summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap3
-rw-r--r--Documentation/admin-guide/laptops/thinkpad-acpi.rst10
-rw-r--r--Documentation/admin-guide/mm/transhuge.rst2
-rw-r--r--Documentation/admin-guide/pm/amd-pstate.rst4
-rw-r--r--Documentation/devicetree/bindings/crypto/fsl,sec-v4.0.yaml10
-rw-r--r--Documentation/devicetree/bindings/display/bridge/adi,adv7533.yaml2
-rw-r--r--Documentation/devicetree/bindings/mtd/partitions/fixed-partitions.yaml2
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/fsl,qman-portal.yaml2
-rw-r--r--Documentation/devicetree/bindings/sound/adi,ssm2518.yaml20
-rw-r--r--Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-codec.yaml31
-rw-r--r--Documentation/devicetree/bindings/sound/awinic,aw88395.yaml2
-rw-r--r--Documentation/devicetree/bindings/sound/everest,es71x4.yaml10
-rw-r--r--Documentation/devicetree/bindings/sound/everest,es7241.yaml19
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,easrc.yaml32
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,micfil.yaml1
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,mqs.yaml2
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,xcvr.yaml34
-rw-r--r--Documentation/devicetree/bindings/sound/intel,keembay-i2s.yaml32
-rw-r--r--Documentation/devicetree/bindings/sound/mediatek,mt8188-mt6359.yaml16
-rw-r--r--Documentation/devicetree/bindings/sound/neofidelity,ntp8918.yaml26
-rw-r--r--Documentation/devicetree/bindings/sound/realtek,rt5682.yaml156
-rw-r--r--Documentation/devicetree/bindings/sound/renesas,rsnd.yaml6
-rw-r--r--Documentation/devicetree/bindings/sound/renesas,rz-ssi.yaml19
-rw-r--r--Documentation/devicetree/bindings/sound/rt5682.txt98
-rw-r--r--Documentation/devicetree/bindings/sound/ti,pcm6240.yaml32
-rw-r--r--Documentation/devicetree/bindings/sound/ti,tas2562.yaml30
-rw-r--r--Documentation/devicetree/bindings/sound/ti,tas2770.yaml34
-rw-r--r--Documentation/devicetree/bindings/sound/ti,tas2781.yaml38
-rw-r--r--Documentation/devicetree/bindings/sound/ti,tas27xx.yaml34
-rw-r--r--Documentation/devicetree/bindings/sound/ti,tas57xx.yaml36
-rw-r--r--Documentation/mm/process_addrs.rst850
-rw-r--r--Documentation/netlink/specs/mptcp_pm.yaml60
-rw-r--r--MAINTAINERS20
-rw-r--r--Makefile2
-rw-r--r--arch/arc/Kconfig1
-rw-r--r--arch/arc/include/asm/cachetype.h8
-rw-r--r--arch/arm/mach-imx/Kconfig1
-rw-r--r--arch/arm64/boot/dts/arm/fvp-base-revc.dts2
-rw-r--r--arch/arm64/boot/dts/broadcom/bcm2712.dtsi8
-rw-r--r--arch/arm64/kernel/signal.c35
-rw-r--r--arch/hexagon/Makefile6
-rw-r--r--arch/nios2/kernel/cpuinfo.c10
-rw-r--r--arch/powerpc/configs/pmac32_defconfig1
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig1
-rw-r--r--arch/powerpc/platforms/book3s/vas-api.c36
-rw-r--r--arch/s390/boot/startup.c2
-rw-r--r--arch/s390/boot/vmem.c6
-rw-r--r--arch/s390/kernel/ipl.c2
-rw-r--r--arch/x86/events/intel/core.c12
-rw-r--r--arch/x86/events/intel/ds.c1
-rw-r--r--arch/x86/events/intel/uncore.c1
-rw-r--r--arch/x86/include/asm/cpufeatures.h1
-rw-r--r--arch/x86/include/asm/processor.h2
-rw-r--r--arch/x86/include/asm/static_call.h15
-rw-r--r--arch/x86/include/asm/sync_core.h6
-rw-r--r--arch/x86/include/asm/xen/hypercall.h36
-rw-r--r--arch/x86/kernel/callthunks.c5
-rw-r--r--arch/x86/kernel/cet.c30
-rw-r--r--arch/x86/kernel/cpu/common.c38
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c58
-rw-r--r--arch/x86/kernel/static_call.c9
-rw-r--r--arch/x86/kernel/vmlinux.lds.S4
-rw-r--r--arch/x86/kvm/mmu/mmu.c12
-rw-r--r--arch/x86/kvm/mmu/spte.h17
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c5
-rw-r--r--arch/x86/kvm/svm/avic.c6
-rw-r--r--arch/x86/kvm/svm/svm.c9
-rw-r--r--arch/x86/kvm/vmx/posted_intr.h2
-rw-r--r--arch/x86/kvm/x86.c9
-rw-r--r--arch/x86/xen/enlighten.c65
-rw-r--r--arch/x86/xen/enlighten_hvm.c13
-rw-r--r--arch/x86/xen/enlighten_pv.c4
-rw-r--r--arch/x86/xen/enlighten_pvh.c7
-rw-r--r--arch/x86/xen/xen-asm.S50
-rw-r--r--arch/x86/xen/xen-head.S107
-rw-r--r--arch/x86/xen/xen-ops.h9
-rw-r--r--block/bdev.c3
-rw-r--r--block/blk-mq-sysfs.c16
-rw-r--r--block/blk-mq.c40
-rw-r--r--block/blk-sysfs.c4
-rw-r--r--drivers/accel/ivpu/ivpu_gem.c2
-rw-r--r--drivers/accel/ivpu/ivpu_mmu_context.c10
-rw-r--r--drivers/accel/ivpu/ivpu_pm.c2
-rw-r--r--drivers/acpi/Kconfig4
-rw-r--r--drivers/auxdisplay/Kconfig2
-rw-r--r--drivers/block/ublk_drv.c26
-rw-r--r--drivers/block/zram/zram_drv.c15
-rw-r--r--drivers/cdrom/cdrom.c2
-rw-r--r--drivers/clk/imx/clk-imx8mp-audiomix.c3
-rw-r--r--drivers/clk/thead/clk-th1520-ap.c13
-rw-r--r--drivers/clocksource/hyperv_timer.c14
-rw-r--r--drivers/cpufreq/amd-pstate.c50
-rw-r--r--drivers/cxl/core/region.c25
-rw-r--r--drivers/cxl/pci.c6
-rw-r--r--drivers/dma-buf/dma-buf.c2
-rw-r--r--drivers/dma-buf/udmabuf.c43
-rw-r--r--drivers/dma/amd/qdma/qdma.c28
-rw-r--r--drivers/dma/apple-admac.c7
-rw-r--r--drivers/dma/at_xdmac.c2
-rw-r--r--drivers/dma/dw/acpi.c6
-rw-r--r--drivers/dma/dw/internal.h8
-rw-r--r--drivers/dma/dw/pci.c4
-rw-r--r--drivers/dma/fsl-edma-common.h1
-rw-r--r--drivers/dma/fsl-edma-main.c41
-rw-r--r--drivers/dma/loongson2-apb-dma.c2
-rw-r--r--drivers/dma/mv_xor.c2
-rw-r--r--drivers/dma/tegra186-gpc-dma.c10
-rw-r--r--drivers/firmware/arm_ffa/bus.c15
-rw-r--r--drivers/firmware/arm_ffa/driver.c7
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/Kconfig1
-rw-r--r--drivers/firmware/cirrus/Kconfig20
-rw-r--r--drivers/firmware/cirrus/Makefile2
-rw-r--r--drivers/firmware/cirrus/test/Makefile23
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_bin.c199
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c752
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_regmap.c367
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_utils.c13
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_mock_wmfw.c473
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_bin.c2556
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c600
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_callbacks.c688
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_control_cache.c3282
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_control_parse.c1851
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_control_rw.c2669
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_wmfw.c2211
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_wmfw_error.c1347
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_tests.c14
-rw-r--r--drivers/firmware/imx/Kconfig1
-rw-r--r--drivers/firmware/microchip/mpfs-auto-update.c4
-rw-r--r--drivers/gpu/drm/Kconfig4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c2
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_audio.c14
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c10
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7533.c4
-rw-r--r--drivers/gpu/drm/display/drm_dp_tunnel.c10
-rw-r--r--drivers/gpu/drm/drm_modes.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c41
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx83102.c2
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35950.c4
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7701.c1
-rw-r--r--drivers/gpu/drm/panel/panel-synaptics-r63353.c2
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c3
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c12
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.c15
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c9
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c2
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c134
-rw-r--r--drivers/gpu/drm/xe/xe_ring_ops.c5
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job_types.h2
-rw-r--r--drivers/hv/hv_balloon.c9
-rw-r--r--drivers/hv/hv_kvp.c10
-rw-r--r--drivers/hv/hv_snapshot.c9
-rw-r--r--drivers/hv/hv_util.c13
-rw-r--r--drivers/hv/hyperv_vmbus.h2
-rw-r--r--drivers/hv/vmbus_drv.c2
-rw-r--r--drivers/hwmon/tmp513.c10
-rw-r--r--drivers/i2c/busses/i2c-imx.c9
-rw-r--r--drivers/i2c/busses/i2c-microchip-corei2c.c122
-rw-r--r--drivers/infiniband/core/cma.c16
-rw-r--r--drivers/infiniband/core/nldev.c2
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c16
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c50
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h4
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c8
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c79
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h4
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c5
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h5
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c18
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c43
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c11
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c5
-rw-r--r--drivers/infiniband/hw/mlx5/main.c8
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c23
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mcast.c22
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c24
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c26
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h11
-rw-r--r--drivers/infiniband/sw/siw/siw.h7
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c27
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c15
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c35
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c2
-rw-r--r--drivers/macintosh/Kconfig1
-rw-r--r--drivers/media/dvb-frontends/dib3000mb.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c3
-rw-r--r--drivers/mmc/host/mtk-sd.c2
-rw-r--r--drivers/mmc/host/sdhci-msm.c16
-rw-r--r--drivers/mmc/host/sdhci-tegra.c1
-rw-r--r--drivers/mtd/nand/raw/arasan-nand-controller.c11
-rw-r--r--drivers/mtd/nand/raw/atmel/pmecc.c4
-rw-r--r--drivers/mtd/nand/raw/diskonchip.c2
-rw-r--r--drivers/mtd/nand/raw/omap2.c16
-rw-r--r--drivers/net/can/m_can/m_can.c36
-rw-r--r--drivers/net/can/m_can/m_can.h1
-rw-r--r--drivers/net/can/m_can/m_can_pci.c1
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c47
-rw-r--r--drivers/net/dsa/microchip/ksz9477_reg.h4
-rw-r--r--drivers/net/dsa/microchip/lan937x_main.c62
-rw-r--r--drivers/net/dsa/microchip/lan937x_reg.h9
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c21
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c5
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c5
-rw-r--r--drivers/net/ethernet/google/gve/gve.h1
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c63
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c46
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c2
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_dev.c3
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c29
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h8
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_vf_dev.c3
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c14
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/rep.c5
-rw-r--r--drivers/net/ethernet/marvell/sky2.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c3
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c2
-rw-r--r--drivers/net/ethernet/oa_tc6.c11
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c5
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c1
-rw-r--r--drivers/net/ethernet/renesas/rswitch.c74
-rw-r--r--drivers/net/ethernet/renesas/rswitch.h13
-rw-r--r--drivers/net/ethernet/sfc/tc_conntrack.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c43
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c2
-rw-r--r--drivers/net/ethernet/ti/icssg/icss_iep.c8
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_common.c25
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.c41
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.h1
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c261
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.h5
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c24
-rw-r--r--drivers/net/mdio/fwnode_mdio.c13
-rw-r--r--drivers/net/netdevsim/health.c2
-rw-r--r--drivers/net/netdevsim/netdev.c4
-rw-r--r--drivers/net/phy/aquantia/aquantia_leds.c2
-rw-r--r--drivers/net/phy/intel-xway.c2
-rw-r--r--drivers/net/phy/micrel.c114
-rw-r--r--drivers/net/phy/mxl-gpy.c2
-rw-r--r--drivers/net/pse-pd/tps23881.c16
-rw-r--r--drivers/net/team/team_core.c8
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/bz.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c37
-rw-r--r--drivers/net/wireless/st/cw1200/cw1200_spi.c2
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mmio.c2
-rw-r--r--drivers/net/wwan/t7xx/t7xx_state_monitor.c26
-rw-r--r--drivers/net/wwan/t7xx/t7xx_state_monitor.h5
-rw-r--r--drivers/net/xen-netfront.c5
-rw-r--r--drivers/nvme/host/core.c2
-rw-r--r--drivers/nvme/host/nvme.h5
-rw-r--r--drivers/nvme/host/pci.c9
-rw-r--r--drivers/nvme/host/tcp.c18
-rw-r--r--drivers/nvme/target/admin-cmd.c9
-rw-r--r--drivers/nvme/target/configfs.c23
-rw-r--r--drivers/nvme/target/core.c108
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c2
-rw-r--r--drivers/nvme/target/nvmet.h7
-rw-r--r--drivers/nvme/target/pr.c8
-rw-r--r--drivers/of/address.c5
-rw-r--r--drivers/of/base.c18
-rw-r--r--drivers/of/empty_root.dts9
-rw-r--r--drivers/of/irq.c2
-rw-r--r--drivers/of/property.c2
-rw-r--r--drivers/of/unittest-data/tests-address.dtsi2
-rw-r--r--drivers/of/unittest.c39
-rw-r--r--drivers/pci/msi/irqdomain.c7
-rw-r--r--drivers/pci/msi/msi.c4
-rw-r--r--drivers/pci/pci.c6
-rw-r--r--drivers/pci/pcie/portdrv.c4
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c6
-rw-r--r--drivers/phy/freescale/phy-fsl-samsung-hdmi.c3
-rw-r--r--drivers/phy/mediatek/Kconfig1
-rw-r--r--drivers/phy/phy-core.c21
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usb.c2
-rw-r--r--drivers/phy/rockchip/phy-rockchip-naneng-combphy.c2
-rw-r--r--drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c3
-rw-r--r--drivers/phy/st/phy-stm32-combophy.c21
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c6
-rw-r--r--drivers/platform/chrome/cros_ec_lpc.c4
-rw-r--r--drivers/platform/loongarch/Kconfig2
-rw-r--r--drivers/platform/x86/dell/alienware-wmi.c24
-rw-r--r--drivers/platform/x86/hp/hp-wmi.c4
-rw-r--r--drivers/platform/x86/intel/ifs/core.c1
-rw-r--r--drivers/platform/x86/intel/vsec.c2
-rw-r--r--drivers/platform/x86/mlx-platform.c2
-rw-r--r--drivers/platform/x86/p2sb.c77
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c4
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c26
-rw-r--r--drivers/pmdomain/core.c6
-rw-r--r--drivers/pmdomain/imx/gpcv2.c4
-rw-r--r--drivers/power/supply/bq24190_charger.c12
-rw-r--r--drivers/power/supply/cros_charge-control.c36
-rw-r--r--drivers/power/supply/gpio-charger.c8
-rw-r--r--drivers/pwm/pwm-stm32.c2
-rw-r--r--drivers/regulator/of_regulator.c2
-rw-r--r--drivers/spi/spi-rockchip-sfc.c4
-rw-r--r--drivers/staging/fbtft/Kconfig1
-rw-r--r--drivers/staging/gpib/common/Makefile2
-rw-r--r--drivers/staging/gpib/nec7210/Makefile2
-rw-r--r--drivers/thermal/thermal_thresholds.c68
-rw-r--r--drivers/thunderbolt/nhi.c8
-rw-r--r--drivers/thunderbolt/nhi.h4
-rw-r--r--drivers/thunderbolt/retimer.c19
-rw-r--r--drivers/thunderbolt/tb.c41
-rw-r--r--drivers/usb/host/xhci-mem.c2
-rw-r--r--drivers/usb/host/xhci-ring.c2
-rw-r--r--drivers/usb/serial/option.c27
-rw-r--r--drivers/video/fbdev/Kconfig18
-rw-r--r--drivers/video/fbdev/core/Kconfig3
-rw-r--r--drivers/virt/coco/tdx-guest/tdx-guest.c4
-rw-r--r--drivers/watchdog/stm32_iwdg.c2
-rw-r--r--fs/btrfs/bio.c16
-rw-r--r--fs/btrfs/ctree.c11
-rw-r--r--fs/btrfs/ctree.h19
-rw-r--r--fs/btrfs/extent-tree.c6
-rw-r--r--fs/btrfs/inode.c154
-rw-r--r--fs/btrfs/qgroup.c3
-rw-r--r--fs/btrfs/relocation.c6
-rw-r--r--fs/btrfs/send.c6
-rw-r--r--fs/btrfs/sysfs.c6
-rw-r--r--fs/btrfs/tree-checker.c27
-rw-r--r--fs/ceph/file.c77
-rw-r--r--fs/ceph/mds_client.c9
-rw-r--r--fs/ceph/super.c2
-rw-r--r--fs/erofs/data.c36
-rw-r--r--fs/erofs/fileio.c9
-rw-r--r--fs/erofs/fscache.c10
-rw-r--r--fs/erofs/internal.h15
-rw-r--r--fs/erofs/super.c78
-rw-r--r--fs/erofs/zdata.c4
-rw-r--r--fs/erofs/zutil.c7
-rw-r--r--fs/hugetlbfs/inode.c2
-rw-r--r--fs/nfs/pnfs.c2
-rw-r--r--fs/nfs/super.c1
-rw-r--r--fs/nfsd/export.c31
-rw-r--r--fs/nfsd/export.h4
-rw-r--r--fs/nfsd/nfs4callback.c4
-rw-r--r--fs/nfsd/nfs4proc.c13
-rw-r--r--fs/nilfs2/btnode.c1
-rw-r--r--fs/nilfs2/gcinode.c2
-rw-r--r--fs/nilfs2/inode.c13
-rw-r--r--fs/nilfs2/namei.c5
-rw-r--r--fs/nilfs2/nilfs.h1
-rw-r--r--fs/ocfs2/localalloc.c27
-rw-r--r--fs/ocfs2/quota_global.c2
-rw-r--r--fs/ocfs2/quota_local.c1
-rw-r--r--fs/proc/task_mmu.c2
-rw-r--r--fs/smb/client/Kconfig1
-rw-r--r--fs/smb/client/cifsfs.c2
-rw-r--r--fs/smb/client/cifsproto.h2
-rw-r--r--fs/smb/client/connect.c36
-rw-r--r--fs/smb/client/file.c6
-rw-r--r--fs/smb/client/sess.c25
-rw-r--r--fs/smb/client/smb2pdu.c5
-rw-r--r--fs/smb/server/connection.c18
-rw-r--r--fs/smb/server/connection.h1
-rw-r--r--fs/smb/server/server.c7
-rw-r--r--fs/smb/server/server.h1
-rw-r--r--fs/smb/server/smb2pdu.c2
-rw-r--r--fs/smb/server/transport_ipc.c5
-rw-r--r--include/clocksource/hyperv_timer.h2
-rw-r--r--include/dt-bindings/sound/qcom,wcd9335.h1
-rw-r--r--include/linux/alloc_tag.h9
-rw-r--r--include/linux/arm_ffa.h13
-rw-r--r--include/linux/cacheinfo.h6
-rw-r--r--include/linux/compiler.h37
-rw-r--r--include/linux/dmaengine.h13
-rw-r--r--include/linux/firmware/cirrus/cs_dsp_test_utils.h160
-rw-r--r--include/linux/fortify-string.h14
-rw-r--r--include/linux/highmem.h8
-rw-r--r--include/linux/hyperv.h1
-rw-r--r--include/linux/if_vlan.h16
-rw-r--r--include/linux/io_uring.h4
-rw-r--r--include/linux/io_uring_types.h2
-rw-r--r--include/linux/memfd.h14
-rw-r--r--include/linux/mlx5/driver.h7
-rw-r--r--include/linux/mlx5/mlx5_ifc.h4
-rw-r--r--include/linux/mm.h90
-rw-r--r--include/linux/mm_types.h30
-rw-r--r--include/linux/page-flags.h12
-rw-r--r--include/linux/percpu-defs.h5
-rw-r--r--include/linux/platform_data/amd_qdma.h2
-rw-r--r--include/linux/sched.h3
-rw-r--r--include/linux/skmsg.h11
-rw-r--r--include/linux/static_call.h6
-rw-r--r--include/linux/trace_events.h8
-rw-r--r--include/linux/vermagic.h6
-rw-r--r--include/linux/vmstat.h2
-rw-r--r--include/net/netfilter/nf_tables.h7
-rw-r--r--include/net/sock.h10
-rw-r--r--include/sound/sdca.h7
-rw-r--r--include/sound/sdca_function.h3
-rw-r--r--include/sound/simple_card_utils.h8
-rw-r--r--include/sound/soc-dai.h3
-rw-r--r--include/sound/soc.h12
-rw-r--r--include/sound/soc_sdw_utils.h2
-rw-r--r--include/uapi/linux/mptcp_pm.h50
-rw-r--r--include/uapi/linux/stddef.h13
-rw-r--r--include/uapi/linux/thermal.h4
-rw-r--r--include/uapi/sound/compress_params.h23
-rw-r--r--include/uapi/sound/sof/tokens.h2
-rw-r--r--io_uring/io_uring.c17
-rw-r--r--io_uring/kbuf.c4
-rw-r--r--io_uring/net.c1
-rw-r--r--io_uring/register.c3
-rw-r--r--io_uring/rw.c2
-rw-r--r--io_uring/sqpoll.c6
-rw-r--r--io_uring/timeout.c85
-rw-r--r--kernel/bpf/verifier.c6
-rw-r--r--kernel/fork.c13
-rw-r--r--kernel/kcov.c2
-rw-r--r--kernel/locking/rtmutex.c18
-rw-r--r--kernel/locking/rtmutex_api.c2
-rw-r--r--kernel/sched/ext.c4
-rw-r--r--kernel/static_call_inline.c2
-rw-r--r--kernel/trace/fgraph.c10
-rw-r--r--kernel/trace/ftrace.c8
-rw-r--r--kernel/trace/ring_buffer.c6
-rw-r--r--kernel/trace/trace.c267
-rw-r--r--kernel/trace/trace.h6
-rw-r--r--kernel/trace/trace_events.c237
-rw-r--r--kernel/trace/trace_functions.c3
-rw-r--r--kernel/trace/trace_kprobe.c2
-rw-r--r--kernel/trace/trace_output.c6
-rw-r--r--kernel/workqueue.c23
-rw-r--r--lib/alloc_tag.c41
-rw-r--r--lib/maple_tree.c1
-rw-r--r--mm/damon/core.c10
-rw-r--r--mm/filemap.c9
-rw-r--r--mm/huge_memory.c19
-rw-r--r--mm/hugetlb.c21
-rw-r--r--mm/internal.h12
-rw-r--r--mm/khugepaged.c3
-rw-r--r--mm/kmemleak.c2
-rw-r--r--mm/list_lru.c2
-rw-r--r--mm/memfd.c2
-rw-r--r--mm/memory.c18
-rw-r--r--mm/mmap.c6
-rw-r--r--mm/page_alloc.c6
-rw-r--r--mm/pgtable-generic.c2
-rw-r--r--mm/readahead.c6
-rw-r--r--mm/shmem.c29
-rw-r--r--mm/util.c7
-rw-r--r--mm/vma.c5
-rw-r--r--mm/vmalloc.c6
-rw-r--r--mm/vmscan.c9
-rw-r--r--mm/vmstat.c3
-rw-r--r--mm/zswap.c19
-rw-r--r--net/ceph/osd_client.c2
-rw-r--r--net/core/dev.c4
-rw-r--r--net/core/filter.c21
-rw-r--r--net/core/netdev-genl.c25
-rw-r--r--net/core/rtnetlink.c5
-rw-r--r--net/core/skmsg.c11
-rw-r--r--net/core/sock.c5
-rw-r--r--net/dsa/tag.h16
-rw-r--r--net/ipv4/ip_tunnel.c6
-rw-r--r--net/ipv4/tcp_bpf.c14
-rw-r--r--net/ipv4/tcp_input.c1
-rw-r--r--net/ipv6/ila/ila_xlat.c16
-rw-r--r--net/llc/llc_input.c2
-rw-r--r--net/mctp/route.c36
-rw-r--r--net/mctp/test/route-test.c86
-rw-r--r--net/mptcp/options.c7
-rw-r--r--net/mptcp/protocol.c23
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c4
-rw-r--r--net/netrom/nr_route.c6
-rw-r--r--net/packet/af_packet.c28
-rw-r--r--net/psample/psample.c9
-rw-r--r--net/sctp/associola.c3
-rw-r--r--net/smc/af_smc.c18
-rw-r--r--net/smc/smc_clc.c17
-rw-r--r--net/smc/smc_clc.h22
-rw-r--r--net/smc/smc_core.c9
-rw-r--r--rust/kernel/net/phy.rs4
-rw-r--r--rust/kernel/workqueue.rs18
-rwxr-xr-xscripts/mksysmap4
-rw-r--r--scripts/mod/file2alias.c36
-rw-r--r--scripts/mod/modpost.c41
-rw-r--r--scripts/mod/modpost.h17
-rw-r--r--scripts/package/PKGBUILD2
-rwxr-xr-xscripts/package/builddeb6
-rwxr-xr-xscripts/package/mkdebian7
-rw-r--r--scripts/sorttable.h5
-rw-r--r--security/selinux/ss/services.c8
-rw-r--r--sound/soc/amd/Kconfig2
-rw-r--r--sound/soc/amd/ps/pci-ps.c16
-rw-r--r--sound/soc/codecs/Kconfig2
-rw-r--r--sound/soc/codecs/Makefile8
-rw-r--r--sound/soc/codecs/ad193x-i2c.c3
-rw-r--r--sound/soc/codecs/adau1761-i2c.c5
-rw-r--r--sound/soc/codecs/adau1781-i2c.c5
-rw-r--r--sound/soc/codecs/adau1977-i2c.c5
-rw-r--r--sound/soc/codecs/alc5623.c10
-rw-r--r--sound/soc/codecs/alc5632.c6
-rw-r--r--sound/soc/codecs/aw88081.c333
-rw-r--r--sound/soc/codecs/aw88081.h43
-rw-r--r--sound/soc/codecs/cs35l56.c8
-rw-r--r--sound/soc/codecs/cs42l43.c2
-rw-r--r--sound/soc/codecs/cs42l51-i2c.c6
-rw-r--r--sound/soc/codecs/cs42l84.c2
-rw-r--r--sound/soc/codecs/es8323.c2
-rw-r--r--sound/soc/codecs/madera.c7
-rw-r--r--sound/soc/codecs/max98088.c4
-rw-r--r--sound/soc/codecs/max98090.c18
-rw-r--r--sound/soc/codecs/max98095.c4
-rw-r--r--sound/soc/codecs/ntp8835.c2
-rw-r--r--sound/soc/codecs/ntp8918.c2
-rw-r--r--sound/soc/codecs/pcm186x-i2c.c3
-rw-r--r--sound/soc/codecs/pcm6240.c3
-rw-r--r--sound/soc/codecs/peb2466.c3
-rw-r--r--sound/soc/codecs/rt5682-i2c.c6
-rw-r--r--sound/soc/codecs/rt5682.c12
-rw-r--r--sound/soc/codecs/rt5682.h2
-rw-r--r--sound/soc/codecs/rt715-sdw.c41
-rw-r--r--sound/soc/codecs/rt715.h3
-rw-r--r--sound/soc/codecs/sma1307.c4
-rw-r--r--sound/soc/codecs/ssm2602-i2c.c5
-rw-r--r--sound/soc/codecs/tas2562.c4
-rw-r--r--sound/soc/codecs/tas2781-i2c.c71
-rw-r--r--sound/soc/codecs/tas5720.c10
-rw-r--r--sound/soc/codecs/tlv320adc3xxx.c4
-rw-r--r--sound/soc/codecs/tlv320aic31xx.c6
-rw-r--r--sound/soc/codecs/tlv320aic3x-i2c.c3
-rw-r--r--sound/soc/codecs/tpa6130a2.c4
-rw-r--r--sound/soc/codecs/uda1342.c2
-rw-r--r--sound/soc/codecs/wcd9335.c2
-rw-r--r--sound/soc/codecs/wm8904.c13
-rw-r--r--sound/soc/codecs/wm8985.c4
-rw-r--r--sound/soc/fsl/Kconfig4
-rw-r--r--sound/soc/fsl/Makefile2
-rw-r--r--sound/soc/fsl/fsl-asoc-card.c2
-rw-r--r--sound/soc/fsl/fsl_asrc.c179
-rw-r--r--sound/soc/fsl/fsl_asrc.h2
-rw-r--r--sound/soc/fsl/fsl_asrc_common.h70
-rw-r--r--sound/soc/fsl/fsl_asrc_m2m.c727
-rw-r--r--sound/soc/fsl/fsl_easrc.c261
-rw-r--r--sound/soc/fsl/fsl_easrc.h4
-rw-r--r--sound/soc/fsl/fsl_micfil.c131
-rw-r--r--sound/soc/fsl/fsl_micfil.h2
-rw-r--r--sound/soc/fsl/fsl_mqs.c28
-rw-r--r--sound/soc/fsl/fsl_sai.c7
-rw-r--r--sound/soc/fsl/fsl_sai.h3
-rw-r--r--sound/soc/fsl/fsl_utils.c45
-rw-r--r--sound/soc/fsl/fsl_utils.h5
-rw-r--r--sound/soc/fsl/fsl_xcvr.c404
-rw-r--r--sound/soc/fsl/fsl_xcvr.h13
-rw-r--r--sound/soc/fsl/imx-audmux.c2
-rw-r--r--sound/soc/fsl/imx-card.c2
-rw-r--r--sound/soc/fsl/imx-rpmsg.c2
-rw-r--r--sound/soc/generic/audio-graph-card.c48
-rw-r--r--sound/soc/generic/audio-graph-card2.c262
-rw-r--r--sound/soc/generic/simple-card-utils.c79
-rw-r--r--sound/soc/generic/simple-card.c58
-rw-r--r--sound/soc/intel/avs/apl.c2
-rw-r--r--sound/soc/intel/avs/core.c24
-rw-r--r--sound/soc/intel/avs/debugfs.c1
-rw-r--r--sound/soc/intel/avs/ipc.c25
-rw-r--r--sound/soc/intel/avs/loader.c36
-rw-r--r--sound/soc/intel/avs/messages.c22
-rw-r--r--sound/soc/intel/avs/messages.h3
-rw-r--r--sound/soc/intel/avs/pcm.c5
-rw-r--r--sound/soc/intel/avs/registers.h2
-rw-r--r--sound/soc/intel/avs/topology.c4
-rw-r--r--sound/soc/intel/avs/trace.h38
-rw-r--r--sound/soc/intel/boards/skl_hda_dsp_generic.c2
-rw-r--r--sound/soc/intel/boards/sof_sdw.c33
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-arl-match.c45
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-lnl-match.c70
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-mtl-match.c289
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-ptl-match.c148
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-tgl-match.c194
-rw-r--r--sound/soc/intel/keembay/kmb_platform.c2
-rw-r--r--sound/soc/mediatek/common/mtk-soundcard-driver.c4
-rw-r--r--sound/soc/mediatek/mt8192/mt8192-afe-pcm.c19
-rw-r--r--sound/soc/mediatek/mt8365/Makefile2
-rw-r--r--sound/soc/mediatek/mt8365/mt8365-mt6357.c11
-rw-r--r--sound/soc/qcom/common.c6
-rw-r--r--sound/soc/qcom/sc7180.c2
-rw-r--r--sound/soc/qcom/sdm845.c5
-rw-r--r--sound/soc/renesas/rz-ssi.c228
-rw-r--r--sound/soc/rockchip/rockchip_i2s_tdm.c28
-rw-r--r--sound/soc/sdca/Makefile2
-rw-r--r--sound/soc/sdca/sdca_device.c2
-rw-r--r--sound/soc/sdca/sdca_functions.c132
-rw-r--r--sound/soc/sdw_utils/soc_sdw_cs_amp.c46
-rw-r--r--sound/soc/sdw_utils/soc_sdw_utils.c13
-rw-r--r--sound/soc/soc-card.c4
-rw-r--r--sound/soc/soc-core.c58
-rw-r--r--sound/soc/soc-dai.c27
-rw-r--r--sound/soc/soc-dapm.c14
-rw-r--r--sound/soc/soc-pcm.c32
-rw-r--r--sound/soc/soc-topology.c10
-rw-r--r--sound/soc/sof/intel/atom.c16
-rw-r--r--sound/soc/sof/intel/bdw.c16
-rw-r--r--sound/soc/sof/intel/hda-dai.c12
-rw-r--r--sound/soc/sof/intel/hda-pcm.c15
-rw-r--r--sound/soc/sof/intel/hda.c5
-rw-r--r--sound/soc/sof/ipc4-topology.c2
-rw-r--r--sound/soc/sof/sof-audio.h1
-rw-r--r--sound/soc/sof/sof-priv.h8
-rw-r--r--sound/soc/sof/topology.c4
-rw-r--r--sound/soc/sunxi/sun4i-codec.c409
-rw-r--r--sound/soc/sunxi/sun4i-spdif.c24
-rw-r--r--sound/soc/xilinx/xlnx_spdif.c38
-rw-r--r--tools/hv/.gitignore3
-rw-r--r--tools/hv/hv_fcopy_uio_daemon.c12
-rwxr-xr-xtools/hv/hv_get_dns_info.sh4
-rw-r--r--tools/hv/hv_kvp_daemon.c9
-rwxr-xr-xtools/hv/hv_set_ifconfig.sh2
-rw-r--r--tools/include/uapi/linux/stddef.h15
-rw-r--r--tools/net/ynl/lib/ynl.py6
-rw-r--r--tools/objtool/check.c9
-rw-r--r--tools/objtool/noreturns.h1
-rw-r--r--tools/sched_ext/include/scx/common.bpf.h6
-rw-r--r--tools/sched_ext/scx_central.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/socket_helpers.h394
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_basic.c51
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h385
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_change_tail.c62
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_change_tail.c40
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_change_tail.c106
-rw-r--r--tools/testing/selftests/bpf/sdt.h2
-rw-r--r--tools/testing/selftests/bpf/trace_helpers.c4
-rwxr-xr-xtools/testing/selftests/drivers/net/queues.py51
-rwxr-xr-xtools/testing/selftests/drivers/net/stats.py19
-rw-r--r--tools/testing/selftests/memfd/memfd_test.c57
-rwxr-xr-xtools/testing/selftests/net/forwarding/local_termination.sh1
-rw-r--r--tools/testing/selftests/net/lib/py/ynl.py16
-rwxr-xr-xtools/testing/selftests/net/openvswitch/openvswitch.sh6
-rw-r--r--tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.bpf.c2
-rw-r--r--tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.bpf.c4
-rw-r--r--tools/testing/selftests/sched_ext/dsp_local_on.bpf.c7
-rw-r--r--tools/testing/selftests/sched_ext/dsp_local_on.c5
-rw-r--r--tools/testing/selftests/sched_ext/enq_select_cpu_fails.bpf.c2
-rw-r--r--tools/testing/selftests/sched_ext/exit.bpf.c4
-rw-r--r--tools/testing/selftests/sched_ext/maximal.bpf.c8
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dfl.bpf.c2
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.bpf.c2
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dispatch.bpf.c2
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dispatch_bad_dsq.bpf.c2
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dispatch_dbl_dsp.bpf.c4
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c8
-rw-r--r--tools/tracing/rtla/src/timerlat_hist.c177
-rw-r--r--usr/include/Makefile2
-rwxr-xr-xusr/include/headers_check.pl9
674 files changed, 28149 insertions, 4268 deletions
diff --git a/.mailmap b/.mailmap
index 5ff0e5d681e7..f5f97f947020 100644
--- a/.mailmap
+++ b/.mailmap
@@ -435,7 +435,7 @@ Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com>
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@puri.sm>
Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com>
Martyna Szapar-Mudlaw <martyna.szapar-mudlaw@linux.intel.com> <martyna.szapar-mudlaw@intel.com>
-Mathieu Othacehe <m.othacehe@gmail.com> <othacehe@gnu.org>
+Mathieu Othacehe <othacehe@gnu.org> <m.othacehe@gmail.com>
Mat Martineau <martineau@kernel.org> <mathew.j.martineau@linux.intel.com>
Mat Martineau <martineau@kernel.org> <mathewm@codeaurora.org>
Matthew Wilcox <willy@infradead.org> <matthew.r.wilcox@intel.com>
@@ -735,6 +735,7 @@ Wolfram Sang <wsa@kernel.org> <w.sang@pengutronix.de>
Wolfram Sang <wsa@kernel.org> <wsa@the-dreams.de>
Yakir Yang <kuankuan.y@gmail.com> <ykk@rock-chips.com>
Yanteng Si <si.yanteng@linux.dev> <siyanteng@loongson.cn>
+Ying Huang <huang.ying.caritas@gmail.com> <ying.huang@intel.com>
Yusuke Goda <goda.yusuke@renesas.com>
Zack Rusin <zack.rusin@broadcom.com> <zackr@vmware.com>
Zhu Yanjun <zyjzyj2000@gmail.com> <yanjunz@nvidia.com>
diff --git a/Documentation/admin-guide/laptops/thinkpad-acpi.rst b/Documentation/admin-guide/laptops/thinkpad-acpi.rst
index 7f674a6cfa8a..4ab0fef7d440 100644
--- a/Documentation/admin-guide/laptops/thinkpad-acpi.rst
+++ b/Documentation/admin-guide/laptops/thinkpad-acpi.rst
@@ -445,8 +445,10 @@ event code Key Notes
0x1008 0x07 FN+F8 IBM: toggle screen expand
Lenovo: configure UltraNav,
or toggle screen expand.
- On newer platforms (2024+)
- replaced by 0x131f (see below)
+ On 2024 platforms replaced by
+ 0x131f (see below) and on newer
+ platforms (2025 +) keycode is
+ replaced by 0x1401 (see below).
0x1009 0x08 FN+F9 -
@@ -506,9 +508,11 @@ event code Key Notes
0x1019 0x18 unknown
-0x131f ... FN+F8 Platform Mode change.
+0x131f ... FN+F8 Platform Mode change (2024 systems).
Implemented in driver.
+0x1401 ... FN+F8 Platform Mode change (2025 + systems).
+ Implemented in driver.
... ... ...
0x1020 0x1F unknown
diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst
index 5034915f4e8e..8872203df088 100644
--- a/Documentation/admin-guide/mm/transhuge.rst
+++ b/Documentation/admin-guide/mm/transhuge.rst
@@ -436,7 +436,7 @@ AnonHugePmdMapped).
The number of file transparent huge pages mapped to userspace is available
by reading ShmemPmdMapped and ShmemHugePages fields in ``/proc/meminfo``.
To identify what applications are mapping file transparent huge pages, it
-is necessary to read ``/proc/PID/smaps`` and count the FileHugeMapped fields
+is necessary to read ``/proc/PID/smaps`` and count the FilePmdMapped fields
for each mapping.
Note that reading the smaps file is expensive and reading it
diff --git a/Documentation/admin-guide/pm/amd-pstate.rst b/Documentation/admin-guide/pm/amd-pstate.rst
index 210a808b74ec..412423c54f25 100644
--- a/Documentation/admin-guide/pm/amd-pstate.rst
+++ b/Documentation/admin-guide/pm/amd-pstate.rst
@@ -251,9 +251,7 @@ performance supported in `AMD CPPC Performance Capability <perf_cap_>`_).
In some ASICs, the highest CPPC performance is not the one in the ``_CPC``
table, so we need to expose it to sysfs. If boost is not active, but
still supported, this maximum frequency will be larger than the one in
-``cpuinfo``. On systems that support preferred core, the driver will have
-different values for some cores than others and this will reflect the values
-advertised by the platform at bootup.
+``cpuinfo``.
This attribute is read-only.
``amd_pstate_lowest_nonlinear_freq``
diff --git a/Documentation/devicetree/bindings/crypto/fsl,sec-v4.0.yaml b/Documentation/devicetree/bindings/crypto/fsl,sec-v4.0.yaml
index 9c8c9991f29a..f0c4a7c83568 100644
--- a/Documentation/devicetree/bindings/crypto/fsl,sec-v4.0.yaml
+++ b/Documentation/devicetree/bindings/crypto/fsl,sec-v4.0.yaml
@@ -114,8 +114,9 @@ patternProperties:
table that specifies the PPID to LIODN mapping. Needed if the PAMU is
used. Value is a 12 bit value where value is a LIODN ID for this JR.
This property is normally set by boot firmware.
- $ref: /schemas/types.yaml#/definitions/uint32
- maximum: 0xfff
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ items:
+ - maximum: 0xfff
'^rtic@[0-9a-f]+$':
type: object
@@ -186,8 +187,9 @@ patternProperties:
Needed if the PAMU is used. Value is a 12 bit value where value
is a LIODN ID for this JR. This property is normally set by boot
firmware.
- $ref: /schemas/types.yaml#/definitions/uint32
- maximum: 0xfff
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ items:
+ - maximum: 0xfff
fsl,rtic-region:
description:
diff --git a/Documentation/devicetree/bindings/display/bridge/adi,adv7533.yaml b/Documentation/devicetree/bindings/display/bridge/adi,adv7533.yaml
index df20a3c9c744..ec89115c74e4 100644
--- a/Documentation/devicetree/bindings/display/bridge/adi,adv7533.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/adi,adv7533.yaml
@@ -90,7 +90,7 @@ properties:
adi,dsi-lanes:
description: Number of DSI data lanes connected to the DSI host.
$ref: /schemas/types.yaml#/definitions/uint32
- enum: [ 1, 2, 3, 4 ]
+ enum: [ 2, 3, 4 ]
"#sound-dai-cells":
const: 0
diff --git a/Documentation/devicetree/bindings/mtd/partitions/fixed-partitions.yaml b/Documentation/devicetree/bindings/mtd/partitions/fixed-partitions.yaml
index 058253d6d889..62086366837c 100644
--- a/Documentation/devicetree/bindings/mtd/partitions/fixed-partitions.yaml
+++ b/Documentation/devicetree/bindings/mtd/partitions/fixed-partitions.yaml
@@ -82,7 +82,7 @@ examples:
uimage@100000 {
reg = <0x0100000 0x200000>;
- compress = "lzma";
+ compression = "lzma";
};
};
diff --git a/Documentation/devicetree/bindings/soc/fsl/fsl,qman-portal.yaml b/Documentation/devicetree/bindings/soc/fsl/fsl,qman-portal.yaml
index 17016184143f..e459fec02ba8 100644
--- a/Documentation/devicetree/bindings/soc/fsl/fsl,qman-portal.yaml
+++ b/Documentation/devicetree/bindings/soc/fsl/fsl,qman-portal.yaml
@@ -35,6 +35,7 @@ properties:
fsl,liodn:
$ref: /schemas/types.yaml#/definitions/uint32-array
+ maxItems: 2
description: See pamu.txt. Two LIODN(s). DQRR LIODN (DLIODN) and Frame LIODN
(FLIODN)
@@ -69,6 +70,7 @@ patternProperties:
type: object
properties:
fsl,liodn:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
description: See pamu.txt, PAMU property used for static LIODN assignment
fsl,iommu-parent:
diff --git a/Documentation/devicetree/bindings/sound/adi,ssm2518.yaml b/Documentation/devicetree/bindings/sound/adi,ssm2518.yaml
index f3f32540779c..f1beae84cad1 100644
--- a/Documentation/devicetree/bindings/sound/adi,ssm2518.yaml
+++ b/Documentation/devicetree/bindings/sound/adi,ssm2518.yaml
@@ -36,12 +36,14 @@ unevaluatedProperties: false
examples:
- |
- i2c {
- #address-cells = <1>;
- #size-cells = <0>;
- codec@34 {
- compatible = "adi,ssm2518";
- reg = <0x34>;
- gpios = <&gpio 5 0>;
- };
- };
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ codec@34 {
+ compatible = "adi,ssm2518";
+ reg = <0x34>;
+ gpios = <&gpio 5 GPIO_ACTIVE_HIGH>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-codec.yaml b/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-codec.yaml
index ebc9097f936a..ccae64ce3071 100644
--- a/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-codec.yaml
+++ b/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-codec.yaml
@@ -23,6 +23,7 @@ properties:
- allwinner,sun8i-h3-codec
- allwinner,sun8i-v3s-codec
- allwinner,sun50i-h616-codec
+ - allwinner,suniv-f1c100s-codec
reg:
maxItems: 1
@@ -77,6 +78,7 @@ properties:
- MIC1
- MIC2
- MIC3
+ - MIC
# Microphone Biases from the SoC
- HBIAS
@@ -87,6 +89,8 @@ properties:
- Headset Mic
- Line In
- Line Out
+ - Right FM In
+ - Left FM In
- Mic
- Speaker
@@ -270,6 +274,33 @@ allOf:
- const: rx
- const: tx
+ - if:
+ properties:
+ compatible:
+ enum:
+ - allwinner,suniv-f1c100s-codec
+
+ then:
+ properties:
+ allwinner,audio-routing:
+ items:
+ enum:
+ - HP
+ - HPCOM
+ - LINEIN
+ - LINEOUT
+ - MIC
+ - HBIAS
+ - MBIAS
+ - Headphone
+ - Headset Mic
+ - Line In
+ - Line Out
+ - Right FM In
+ - Left FM In
+ - Mic
+ - Speaker
+
unevaluatedProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/sound/awinic,aw88395.yaml b/Documentation/devicetree/bindings/sound/awinic,aw88395.yaml
index 3b0b743e49c4..6676406bf2de 100644
--- a/Documentation/devicetree/bindings/sound/awinic,aw88395.yaml
+++ b/Documentation/devicetree/bindings/sound/awinic,aw88395.yaml
@@ -18,6 +18,7 @@ properties:
compatible:
enum:
- awinic,aw88081
+ - awinic,aw88083
- awinic,aw88261
- awinic,aw88395
- awinic,aw88399
@@ -58,6 +59,7 @@ allOf:
contains:
enum:
- awinic,aw88081
+ - awinic,aw88083
- awinic,aw88261
then:
properties:
diff --git a/Documentation/devicetree/bindings/sound/everest,es71x4.yaml b/Documentation/devicetree/bindings/sound/everest,es71x4.yaml
index fd1b32812228..efe9f3fd3778 100644
--- a/Documentation/devicetree/bindings/sound/everest,es71x4.yaml
+++ b/Documentation/devicetree/bindings/sound/everest,es71x4.yaml
@@ -53,10 +53,10 @@ unevaluatedProperties: false
examples:
- |
- codec {
- compatible = "everest,es7134";
- #sound-dai-cells = <0>;
- VDD-supply = <&vdd_supply>;
- };
+ codec {
+ compatible = "everest,es7134";
+ #sound-dai-cells = <0>;
+ VDD-supply = <&vdd_supply>;
+ };
...
diff --git a/Documentation/devicetree/bindings/sound/everest,es7241.yaml b/Documentation/devicetree/bindings/sound/everest,es7241.yaml
index f179af758730..e5cfb40f1ef2 100644
--- a/Documentation/devicetree/bindings/sound/everest,es7241.yaml
+++ b/Documentation/devicetree/bindings/sound/everest,es7241.yaml
@@ -54,14 +54,15 @@ unevaluatedProperties: false
examples:
- |
- #include <dt-bindings/gpio/gpio.h>
- codec {
- compatible = "everest,es7241";
- #sound-dai-cells = <0>;
- reset-gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>;
- VDDP-supply = <&vddp_supply>;
- VDDA-supply = <&vdda_supply>;
- VDDD-supply = <&vddd_supply>;
- };
+ #include <dt-bindings/gpio/gpio.h>
+
+ codec {
+ compatible = "everest,es7241";
+ #sound-dai-cells = <0>;
+ reset-gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>;
+ VDDP-supply = <&vddp_supply>;
+ VDDA-supply = <&vdda_supply>;
+ VDDD-supply = <&vddd_supply>;
+ };
...
diff --git a/Documentation/devicetree/bindings/sound/fsl,easrc.yaml b/Documentation/devicetree/bindings/sound/fsl,easrc.yaml
index 0782f3f9947f..c454110f4281 100644
--- a/Documentation/devicetree/bindings/sound/fsl,easrc.yaml
+++ b/Documentation/devicetree/bindings/sound/fsl,easrc.yaml
@@ -87,20 +87,20 @@ examples:
#include <dt-bindings/clock/imx8mn-clock.h>
easrc: easrc@300c0000 {
- compatible = "fsl,imx8mn-easrc";
- reg = <0x300c0000 0x10000>;
- interrupts = <0x0 122 0x4>;
- clocks = <&clk IMX8MN_CLK_ASRC_ROOT>;
- clock-names = "mem";
- dmas = <&sdma2 16 23 0> , <&sdma2 17 23 0>,
- <&sdma2 18 23 0> , <&sdma2 19 23 0>,
- <&sdma2 20 23 0> , <&sdma2 21 23 0>,
- <&sdma2 22 23 0> , <&sdma2 23 23 0>;
- dma-names = "ctx0_rx", "ctx0_tx",
- "ctx1_rx", "ctx1_tx",
- "ctx2_rx", "ctx2_tx",
- "ctx3_rx", "ctx3_tx";
- firmware-name = "imx/easrc/easrc-imx8mn.bin";
- fsl,asrc-rate = <8000>;
- fsl,asrc-format = <2>;
+ compatible = "fsl,imx8mn-easrc";
+ reg = <0x300c0000 0x10000>;
+ interrupts = <0x0 122 0x4>;
+ clocks = <&clk IMX8MN_CLK_ASRC_ROOT>;
+ clock-names = "mem";
+ dmas = <&sdma2 16 23 0> , <&sdma2 17 23 0>,
+ <&sdma2 18 23 0> , <&sdma2 19 23 0>,
+ <&sdma2 20 23 0> , <&sdma2 21 23 0>,
+ <&sdma2 22 23 0> , <&sdma2 23 23 0>;
+ dma-names = "ctx0_rx", "ctx0_tx",
+ "ctx1_rx", "ctx1_tx",
+ "ctx2_rx", "ctx2_tx",
+ "ctx3_rx", "ctx3_tx";
+ firmware-name = "imx/easrc/easrc-imx8mn.bin";
+ fsl,asrc-rate = <8000>;
+ fsl,asrc-format = <2>;
};
diff --git a/Documentation/devicetree/bindings/sound/fsl,micfil.yaml b/Documentation/devicetree/bindings/sound/fsl,micfil.yaml
index c1e9803fc113..c47b7a097490 100644
--- a/Documentation/devicetree/bindings/sound/fsl,micfil.yaml
+++ b/Documentation/devicetree/bindings/sound/fsl,micfil.yaml
@@ -25,6 +25,7 @@ properties:
- fsl,imx8mm-micfil
- fsl,imx8mp-micfil
- fsl,imx93-micfil
+ - fsl,imx943-micfil
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/sound/fsl,mqs.yaml b/Documentation/devicetree/bindings/sound/fsl,mqs.yaml
index 030ccc173130..8c22e8348b14 100644
--- a/Documentation/devicetree/bindings/sound/fsl,mqs.yaml
+++ b/Documentation/devicetree/bindings/sound/fsl,mqs.yaml
@@ -23,6 +23,8 @@ properties:
- fsl,imx8qm-mqs
- fsl,imx8qxp-mqs
- fsl,imx93-mqs
+ - fsl,imx943-aonmix-mqs
+ - fsl,imx943-wakeupmix-mqs
- fsl,imx95-aonmix-mqs
- fsl,imx95-netcmix-mqs
diff --git a/Documentation/devicetree/bindings/sound/fsl,xcvr.yaml b/Documentation/devicetree/bindings/sound/fsl,xcvr.yaml
index 5e2801014221..f68d0e0ecfe5 100644
--- a/Documentation/devicetree/bindings/sound/fsl,xcvr.yaml
+++ b/Documentation/devicetree/bindings/sound/fsl,xcvr.yaml
@@ -140,21 +140,21 @@ examples:
#include <dt-bindings/reset/imx8mp-reset.h>
xcvr: xcvr@30cc0000 {
- compatible = "fsl,imx8mp-xcvr";
- reg = <0x30cc0000 0x800>,
- <0x30cc0800 0x400>,
- <0x30cc0c00 0x080>,
- <0x30cc0e00 0x080>;
- reg-names = "ram", "regs", "rxfifo", "txfifo";
- interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&audiomix_clk IMX8MP_CLK_AUDIOMIX_EARC_IPG>,
- <&audiomix_clk IMX8MP_CLK_AUDIOMIX_EARC_PHY>,
- <&audiomix_clk IMX8MP_CLK_AUDIOMIX_SPBA2_ROOT>,
- <&audiomix_clk IMX8MP_CLK_AUDIOMIX_AUDPLL_ROOT>;
- clock-names = "ipg", "phy", "spba", "pll_ipg";
- dmas = <&sdma2 30 2 0>, <&sdma2 31 2 0>;
- dma-names = "rx", "tx";
- resets = <&audiomix_reset 0>;
+ compatible = "fsl,imx8mp-xcvr";
+ reg = <0x30cc0000 0x800>,
+ <0x30cc0800 0x400>,
+ <0x30cc0c00 0x080>,
+ <0x30cc0e00 0x080>;
+ reg-names = "ram", "regs", "rxfifo", "txfifo";
+ interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&audiomix_clk IMX8MP_CLK_AUDIOMIX_EARC_IPG>,
+ <&audiomix_clk IMX8MP_CLK_AUDIOMIX_EARC_PHY>,
+ <&audiomix_clk IMX8MP_CLK_AUDIOMIX_SPBA2_ROOT>,
+ <&audiomix_clk IMX8MP_CLK_AUDIOMIX_AUDPLL_ROOT>;
+ clock-names = "ipg", "phy", "spba", "pll_ipg";
+ dmas = <&sdma2 30 2 0>, <&sdma2 31 2 0>;
+ dma-names = "rx", "tx";
+ resets = <&audiomix_reset 0>;
};
diff --git a/Documentation/devicetree/bindings/sound/intel,keembay-i2s.yaml b/Documentation/devicetree/bindings/sound/intel,keembay-i2s.yaml
index 76b6f2cf25df..dca617860938 100644
--- a/Documentation/devicetree/bindings/sound/intel,keembay-i2s.yaml
+++ b/Documentation/devicetree/bindings/sound/intel,keembay-i2s.yaml
@@ -72,19 +72,19 @@ unevaluatedProperties: false
examples:
- |
- #include <dt-bindings/interrupt-controller/arm-gic.h>
- #include <dt-bindings/interrupt-controller/irq.h>
- #define KEEM_BAY_PSS_AUX_I2S3
- #define KEEM_BAY_PSS_I2S3
- i2s3: i2s@20140000 {
- compatible = "intel,keembay-i2s";
- #sound-dai-cells = <0>;
- reg = <0x20140000 0x200>, /* I2S registers */
- <0x202a00a4 0x4>; /* I2S gen configuration */
- reg-names = "i2s-regs", "i2s_gen_cfg";
- interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
- clock-names = "osc", "apb_clk";
- clocks = <&scmi_clk KEEM_BAY_PSS_AUX_I2S3>, <&scmi_clk KEEM_BAY_PSS_I2S3>;
- dmas = <&axi_dma0 29>, <&axi_dma0 33>;
- dma-names = "tx", "rx";
- };
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #define KEEM_BAY_PSS_AUX_I2S3
+ #define KEEM_BAY_PSS_I2S3
+ i2s@20140000 {
+ compatible = "intel,keembay-i2s";
+ #sound-dai-cells = <0>;
+ reg = <0x20140000 0x200>, /* I2S registers */
+ <0x202a00a4 0x4>; /* I2S gen configuration */
+ reg-names = "i2s-regs", "i2s_gen_cfg";
+ interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
+ clock-names = "osc", "apb_clk";
+ clocks = <&scmi_clk KEEM_BAY_PSS_AUX_I2S3>, <&scmi_clk KEEM_BAY_PSS_I2S3>;
+ dmas = <&axi_dma0 29>, <&axi_dma0 33>;
+ dma-names = "tx", "rx";
+ };
diff --git a/Documentation/devicetree/bindings/sound/mediatek,mt8188-mt6359.yaml b/Documentation/devicetree/bindings/sound/mediatek,mt8188-mt6359.yaml
index ba482747f0e6..362e729b51b4 100644
--- a/Documentation/devicetree/bindings/sound/mediatek,mt8188-mt6359.yaml
+++ b/Documentation/devicetree/bindings/sound/mediatek,mt8188-mt6359.yaml
@@ -14,11 +14,15 @@ allOf:
properties:
compatible:
- enum:
- - mediatek,mt8188-es8326
- - mediatek,mt8188-mt6359-evb
- - mediatek,mt8188-nau8825
- - mediatek,mt8188-rt5682s
+ oneOf:
+ - enum:
+ - mediatek,mt8188-es8326
+ - mediatek,mt8188-mt6359-evb
+ - mediatek,mt8188-nau8825
+ - mediatek,mt8188-rt5682s
+ - items:
+ - const: mediatek,mt8390-mt6359-evk
+ - const: mediatek,mt8188-mt6359-evb
audio-routing:
description:
@@ -56,6 +60,8 @@ patternProperties:
- ETDM2_OUT_BE
- ETDM3_OUT_BE
- PCM1_BE
+ - DL_SRC_BE
+ - UL_SRC_BE
codec:
description: Holds subnode which indicates codec dai.
diff --git a/Documentation/devicetree/bindings/sound/neofidelity,ntp8918.yaml b/Documentation/devicetree/bindings/sound/neofidelity,ntp8918.yaml
index 952768b35902..6946177e391a 100644
--- a/Documentation/devicetree/bindings/sound/neofidelity,ntp8918.yaml
+++ b/Documentation/devicetree/bindings/sound/neofidelity,ntp8918.yaml
@@ -55,16 +55,18 @@ unevaluatedProperties: false
examples:
- |
- #include <dt-bindings/gpio/gpio.h>
- i2c {
- #address-cells = <1>;
- #size-cells = <0>;
- audio-codec@2a {
- compatible = "neofidelity,ntp8918";
- #sound-dai-cells = <0>;
- reg = <0x2a>;
- clocks = <&clkc 150>, <&clkc 151>, <&clkc 152>;
- clock-names = "wck", "scl", "bck";
- reset-gpios = <&gpio 5 GPIO_ACTIVE_LOW>;
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ audio-codec@2a {
+ compatible = "neofidelity,ntp8918";
+ #sound-dai-cells = <0>;
+ reg = <0x2a>;
+ clocks = <&clkc 150>, <&clkc 151>, <&clkc 152>;
+ clock-names = "wck", "scl", "bck";
+ reset-gpios = <&gpio 5 GPIO_ACTIVE_LOW>;
+ };
};
- };
diff --git a/Documentation/devicetree/bindings/sound/realtek,rt5682.yaml b/Documentation/devicetree/bindings/sound/realtek,rt5682.yaml
new file mode 100644
index 000000000000..39333ea05646
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/realtek,rt5682.yaml
@@ -0,0 +1,156 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/realtek,rt5682.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Realtek rt5682 and rt5682i codecs
+
+maintainers:
+ - Bard Liao <bardliao@realtek.com>
+
+allOf:
+ - $ref: dai-common.yaml#
+
+properties:
+ compatible:
+ enum:
+ - realtek,rt5682
+ - realtek,rt5682i
+
+ reg:
+ maxItems: 1
+ description: I2C address of the device.
+
+ interrupts:
+ maxItems: 1
+ description: The CODEC's interrupt output.
+
+ realtek,dmic1-data-pin:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum:
+ - 0 # dmic1 data is not used
+ - 1 # using GPIO2 pin as dmic1 data pin
+ - 2 # using GPIO5 pin as dmic1 data pin
+ description:
+ Specify which GPIO pin be used as DMIC1 data pin.
+
+ realtek,dmic1-clk-pin:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum:
+ - 0 # using GPIO1 pin as dmic1 clock pin
+ - 1 # using GPIO3 pin as dmic1 clock pin
+ description:
+ Specify which GPIO pin be used as DMIC1 clk pin.
+
+ realtek,jd-src:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum:
+ - 0 # No JD is used
+ - 1 # using JD1 as JD source
+ description:
+ Specify which JD source be used.
+
+ realtek,ldo1-en-gpios:
+ description:
+ The GPIO that controls the CODEC's LDO1_EN pin.
+
+ realtek,btndet-delay:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ The debounce delay for push button.
+ The delay time is realtek,btndet-delay value multiple of 8.192 ms.
+ If absent, the default is 16.
+
+ realtek,dmic-clk-rate-hz:
+ description:
+ Set the clock rate (hz) for the requirement of the particular DMIC.
+
+ realtek,dmic-delay-ms:
+ description:
+ Set the delay time (ms) for the requirement of the particular DMIC.
+
+ realtek,dmic-clk-driving-high:
+ type: boolean
+ description:
+ Set the high driving of the DMIC clock out.
+
+ clocks:
+ items:
+ - description: phandle and clock specifier for codec MCLK.
+
+ clock-names:
+ items:
+ - const: mclk
+
+ "#clock-cells":
+ const: 1
+
+ clock-output-names:
+ minItems: 2
+ maxItems: 2
+ description: Name given for DAI word clock and bit clock outputs.
+
+ "#sound-dai-cells":
+ const: 1
+
+ AVDD-supply:
+ description: Regulator supplying analog power through the AVDD pin.
+
+ MICVDD-supply:
+ description: Regulator supplying power for the microphone bias through
+ the MICVDD pin.
+
+ VBAT-supply:
+ description: Regulator supplying battery power through the VBAT pin.
+
+ DBVDD-supply:
+ description: Regulator supplying I/O power through the DBVDD pin.
+
+ LDO1-IN-supply:
+ description: Regulator supplying power to the digital core and charge
+ pump through the LDO1_IN pin.
+
+required:
+ - compatible
+ - reg
+ - AVDD-supply
+ - VBAT-supply
+ - MICVDD-supply
+ - DBVDD-supply
+ - LDO1-IN-supply
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ codec@1a {
+ compatible = "realtek,rt5682";
+ reg = <0x1a>;
+ interrupts = <6 IRQ_TYPE_LEVEL_HIGH>;
+ realtek,ldo1-en-gpios =
+ <&gpio 2 GPIO_ACTIVE_HIGH>;
+ realtek,dmic1-data-pin = <1>;
+ realtek,dmic1-clk-pin = <1>;
+ realtek,jd-src = <1>;
+
+ #clock-cells = <1>;
+ clock-output-names = "rt5682-dai-wclk", "rt5682-dai-bclk";
+
+ clocks = <&osc>;
+ clock-names = "mclk";
+
+ AVDD-supply = <&avdd_reg>;
+ VBAT-supply = <&vbat_reg>;
+ MICVDD-supply = <&micvdd_reg>;
+ DBVDD-supply = <&dbvdd_reg>;
+ LDO1-IN-supply = <&ldo1_in_reg>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/renesas,rsnd.yaml b/Documentation/devicetree/bindings/sound/renesas,rsnd.yaml
index 6d0d1514cd42..e8a2acb92646 100644
--- a/Documentation/devicetree/bindings/sound/renesas,rsnd.yaml
+++ b/Documentation/devicetree/bindings/sound/renesas,rsnd.yaml
@@ -112,12 +112,6 @@ properties:
description: List of necessary clock names.
# details are defined below
- post-init-providers:
- description: At least if rsnd is using DPCM connection on Audio-Graph-Card2,
- fw_devlink might doesn't have enough information to break the cycle. rsnd
- driver will not be probed in such case. Same problem might occur with
- Multi-CPU/Codec or Codec2Codec.
-
# ports is below
port:
$ref: audio-graph-port.yaml#/definitions/port-base
diff --git a/Documentation/devicetree/bindings/sound/renesas,rz-ssi.yaml b/Documentation/devicetree/bindings/sound/renesas,rz-ssi.yaml
index f4610eaed1e1..e4cdbf2202b9 100644
--- a/Documentation/devicetree/bindings/sound/renesas,rz-ssi.yaml
+++ b/Documentation/devicetree/bindings/sound/renesas,rz-ssi.yaml
@@ -19,6 +19,7 @@ properties:
- renesas,r9a07g043-ssi # RZ/G2UL and RZ/Five
- renesas,r9a07g044-ssi # RZ/G2{L,LC}
- renesas,r9a07g054-ssi # RZ/V2L
+ - renesas,r9a08g045-ssi # RZ/G3S
- const: renesas,rz-ssi
reg:
@@ -57,24 +58,6 @@ properties:
dmas:
minItems: 1
maxItems: 2
- description:
- The first cell represents a phandle to dmac.
- The second cell specifies the encoded MID/RID values of the SSI port
- connected to the DMA client and the slave channel configuration
- parameters.
- bits[0:9] - Specifies MID/RID value of a SSI channel as below
- MID/RID value of SSI rx0 = 0x256
- MID/RID value of SSI tx0 = 0x255
- MID/RID value of SSI rx1 = 0x25a
- MID/RID value of SSI tx1 = 0x259
- MID/RID value of SSI rt2 = 0x25f
- MID/RID value of SSI rx3 = 0x262
- MID/RID value of SSI tx3 = 0x261
- bit[10] - HIEN = 1, Detects a request in response to the rising edge
- of the signal
- bit[11] - LVL = 0, Detects based on the edge
- bits[12:14] - AM = 2, Bus cycle mode
- bit[15] - TM = 0, Single transfer mode
dma-names:
oneOf:
diff --git a/Documentation/devicetree/bindings/sound/rt5682.txt b/Documentation/devicetree/bindings/sound/rt5682.txt
deleted file mode 100644
index 5e1d08de18a5..000000000000
--- a/Documentation/devicetree/bindings/sound/rt5682.txt
+++ /dev/null
@@ -1,98 +0,0 @@
-RT5682 audio CODEC
-
-This device supports I2C only.
-
-Required properties:
-
-- compatible : "realtek,rt5682" or "realtek,rt5682i"
-
-- reg : The I2C address of the device.
-
-- AVDD-supply: phandle to the regulator supplying analog power through the
- AVDD pin
-
-- MICVDD-supply: phandle to the regulator supplying power for the microphone
- bias through the MICVDD pin. Either MICVDD or VBAT should be present.
-
-- VBAT-supply: phandle to the regulator supplying battery power through the
- VBAT pin. Either MICVDD or VBAT should be present.
-
-- DBVDD-supply: phandle to the regulator supplying I/O power through the DBVDD
- pin.
-
-- LDO1-IN-supply: phandle to the regulator supplying power to the digital core
- and charge pump through the LDO1_IN pin.
-
-Optional properties:
-
-- interrupts : The CODEC's interrupt output.
-
-- realtek,dmic1-data-pin
- 0: dmic1 is not used
- 1: using GPIO2 pin as dmic1 data pin
- 2: using GPIO5 pin as dmic1 data pin
-
-- realtek,dmic1-clk-pin
- 0: using GPIO1 pin as dmic1 clock pin
- 1: using GPIO3 pin as dmic1 clock pin
-
-- realtek,jd-src
- 0: No JD is used
- 1: using JD1 as JD source
-
-- realtek,ldo1-en-gpios : The GPIO that controls the CODEC's LDO1_EN pin.
-
-- realtek,btndet-delay
- The debounce delay for push button.
- The delay time is realtek,btndet-delay value multiple of 8.192 ms.
- If absent, the default is 16.
-
-- #clock-cells : Should be set to '<1>', wclk and bclk sources provided.
-- clock-output-names : Name given for DAI clocks output.
-
-- clocks : phandle and clock specifier for codec MCLK.
-- clock-names : Clock name string for 'clocks' attribute, should be "mclk".
-
-- realtek,dmic-clk-rate-hz : Set the clock rate (hz) for the requirement of
- the particular DMIC.
-
-- realtek,dmic-delay-ms : Set the delay time (ms) for the requirement of
- the particular DMIC.
-
-- realtek,dmic-clk-driving-high : Set the high driving of the DMIC clock out.
-
-- #sound-dai-cells: Should be set to '<1>'.
-
-Pins on the device (for linking into audio routes) for RT5682:
-
- * DMIC L1
- * DMIC R1
- * IN1P
- * HPOL
- * HPOR
-
-Example:
-
-rt5682 {
- compatible = "realtek,rt5682i";
- reg = <0x1a>;
- interrupt-parent = <&gpio>;
- interrupts = <TEGRA_GPIO(U, 6) IRQ_TYPE_LEVEL_HIGH>;
- realtek,ldo1-en-gpios =
- <&gpio TEGRA_GPIO(R, 2) GPIO_ACTIVE_HIGH>;
- realtek,dmic1-data-pin = <1>;
- realtek,dmic1-clk-pin = <1>;
- realtek,jd-src = <1>;
- realtek,btndet-delay = <16>;
-
- #clock-cells = <1>;
- clock-output-names = "rt5682-dai-wclk", "rt5682-dai-bclk";
-
- clocks = <&osc>;
- clock-names = "mclk";
-
- AVDD-supply = <&avdd_reg>;
- MICVDD-supply = <&micvdd_reg>;
- DBVDD-supply = <&dbvdd_reg>;
- LDO1-IN-supply = <&ldo1_in_reg>;
-};
diff --git a/Documentation/devicetree/bindings/sound/ti,pcm6240.yaml b/Documentation/devicetree/bindings/sound/ti,pcm6240.yaml
index dd5b08e3d7a1..d89b4255b51c 100644
--- a/Documentation/devicetree/bindings/sound/ti,pcm6240.yaml
+++ b/Documentation/devicetree/bindings/sound/ti,pcm6240.yaml
@@ -159,19 +159,21 @@ additionalProperties: false
examples:
- |
- #include <dt-bindings/gpio/gpio.h>
- i2c {
- /* example for two devices with interrupt support */
- #address-cells = <1>;
- #size-cells = <0>;
- pcm6240: audio-codec@48 {
- compatible = "ti,pcm6240";
- reg = <0x48>, /* primary-device */
- <0x4b>; /* secondary-device */
- #sound-dai-cells = <0>;
- reset-gpios = <&gpio1 10 GPIO_ACTIVE_HIGH>;
- interrupt-parent = <&gpio1>;
- interrupts = <15>;
- };
- };
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c {
+ /* example for two devices with interrupt support */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ audio-codec@48 {
+ compatible = "ti,pcm6240";
+ reg = <0x48>, /* primary-device */
+ <0x4b>; /* secondary-device */
+ #sound-dai-cells = <0>;
+ reset-gpios = <&gpio1 10 GPIO_ACTIVE_HIGH>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <15>;
+ };
+ };
...
diff --git a/Documentation/devicetree/bindings/sound/ti,tas2562.yaml b/Documentation/devicetree/bindings/sound/ti,tas2562.yaml
index 8bc3b0c7531e..3763ca16b91f 100644
--- a/Documentation/devicetree/bindings/sound/ti,tas2562.yaml
+++ b/Documentation/devicetree/bindings/sound/ti,tas2562.yaml
@@ -65,17 +65,19 @@ unevaluatedProperties: false
examples:
- |
- #include <dt-bindings/gpio/gpio.h>
- i2c {
- #address-cells = <1>;
- #size-cells = <0>;
- codec: codec@4c {
- compatible = "ti,tas2562";
- reg = <0x4c>;
- #sound-dai-cells = <0>;
- interrupt-parent = <&gpio1>;
- interrupts = <14>;
- shutdown-gpios = <&gpio1 15 0>;
- ti,imon-slot-no = <0>;
- };
- };
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ codec@4c {
+ compatible = "ti,tas2562";
+ reg = <0x4c>;
+ #sound-dai-cells = <0>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <14>;
+ shutdown-gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>;
+ ti,imon-slot-no = <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/ti,tas2770.yaml b/Documentation/devicetree/bindings/sound/ti,tas2770.yaml
index 362c2e6154f0..5e7aea43aced 100644
--- a/Documentation/devicetree/bindings/sound/ti,tas2770.yaml
+++ b/Documentation/devicetree/bindings/sound/ti,tas2770.yaml
@@ -69,19 +69,21 @@ unevaluatedProperties: false
examples:
- |
- #include <dt-bindings/gpio/gpio.h>
- i2c {
- #address-cells = <1>;
- #size-cells = <0>;
- codec: codec@41 {
- compatible = "ti,tas2770";
- reg = <0x41>;
- #sound-dai-cells = <0>;
- interrupt-parent = <&gpio1>;
- interrupts = <14>;
- reset-gpio = <&gpio1 15 0>;
- shutdown-gpios = <&gpio1 14 0>;
- ti,imon-slot-no = <0>;
- ti,vmon-slot-no = <2>;
- };
- };
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ codec@41 {
+ compatible = "ti,tas2770";
+ reg = <0x41>;
+ #sound-dai-cells = <0>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <14>;
+ reset-gpio = <&gpio1 15 GPIO_ACTIVE_HIGH>;
+ shutdown-gpios = <&gpio1 14 GPIO_ACTIVE_HIGH>;
+ ti,imon-slot-no = <0>;
+ ti,vmon-slot-no = <2>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/ti,tas2781.yaml b/Documentation/devicetree/bindings/sound/ti,tas2781.yaml
index 976238689249..5ea1cdc593b5 100644
--- a/Documentation/devicetree/bindings/sound/ti,tas2781.yaml
+++ b/Documentation/devicetree/bindings/sound/ti,tas2781.yaml
@@ -101,22 +101,24 @@ additionalProperties: false
examples:
- |
- #include <dt-bindings/gpio/gpio.h>
- i2c {
- /* example with quad tas2781s, such as tablet or pad device */
- #address-cells = <1>;
- #size-cells = <0>;
- quad_tas2781: tas2781@38 {
- compatible = "ti,tas2781";
- reg = <0x38>, /* Audio slot 0 */
- <0x3a>, /* Audio slot 1 */
- <0x39>, /* Audio slot 2 */
- <0x3b>; /* Audio slot 3 */
-
- #sound-dai-cells = <0>;
- reset-gpios = <&gpio1 10 GPIO_ACTIVE_HIGH>;
- interrupt-parent = <&gpio1>;
- interrupts = <15>;
- };
- };
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c {
+ /* example with quad tas2781s, such as tablet or pad device */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ audio-codec@38 {
+ compatible = "ti,tas2781";
+ reg = <0x38>, /* Audio slot 0 */
+ <0x3a>, /* Audio slot 1 */
+ <0x39>, /* Audio slot 2 */
+ <0x3b>; /* Audio slot 3 */
+
+ #sound-dai-cells = <0>;
+ reset-gpios = <&gpio1 10 GPIO_ACTIVE_HIGH>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <15>;
+ };
+ };
...
diff --git a/Documentation/devicetree/bindings/sound/ti,tas27xx.yaml b/Documentation/devicetree/bindings/sound/ti,tas27xx.yaml
index 530bc3937847..5447482179c1 100644
--- a/Documentation/devicetree/bindings/sound/ti,tas27xx.yaml
+++ b/Documentation/devicetree/bindings/sound/ti,tas27xx.yaml
@@ -62,21 +62,23 @@ unevaluatedProperties: false
examples:
- |
- #include <dt-bindings/gpio/gpio.h>
- i2c {
- #address-cells = <1>;
- #size-cells = <0>;
- codec: codec@38 {
- compatible = "ti,tas2764";
- reg = <0x38>;
- #sound-dai-cells = <0>;
- interrupt-parent = <&gpio1>;
- interrupts = <14>;
- reset-gpios = <&gpio1 15 0>;
- shutdown-gpios = <&gpio1 15 0>;
- ti,imon-slot-no = <0>;
- ti,vmon-slot-no = <2>;
- };
- };
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ codec@38 {
+ compatible = "ti,tas2764";
+ reg = <0x38>;
+ #sound-dai-cells = <0>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <14>;
+ reset-gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>;
+ shutdown-gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>;
+ ti,imon-slot-no = <0>;
+ ti,vmon-slot-no = <2>;
+ };
+ };
...
diff --git a/Documentation/devicetree/bindings/sound/ti,tas57xx.yaml b/Documentation/devicetree/bindings/sound/ti,tas57xx.yaml
index 2f917238db95..74f7d02b424b 100644
--- a/Documentation/devicetree/bindings/sound/ti,tas57xx.yaml
+++ b/Documentation/devicetree/bindings/sound/ti,tas57xx.yaml
@@ -112,22 +112,24 @@ unevaluatedProperties: false
examples:
- |
- i2c {
- #address-cells = <1>;
- #size-cells = <0>;
-
- codec@2a {
- compatible = "ti,tas5717";
- reg = <0x2a>;
- #sound-dai-cells = <0>;
- reset-gpios = <&gpio1 15 0>;
- pdn-gpios = <&gpio1 15 0>;
- AVDD-supply = <&avdd_supply>;
- DVDD-supply = <&dvdd_supply>;
- HPVDD-supply = <&hpvdd_supply>;
- PVDD_AB-supply = <&pvdd_ab_supply>;
- PVDD_CD-supply = <&pvdd_cd_supply>;
- };
- };
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ codec@2a {
+ compatible = "ti,tas5717";
+ reg = <0x2a>;
+ #sound-dai-cells = <0>;
+ reset-gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>;
+ pdn-gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>;
+ AVDD-supply = <&avdd_supply>;
+ DVDD-supply = <&dvdd_supply>;
+ HPVDD-supply = <&hpvdd_supply>;
+ PVDD_AB-supply = <&pvdd_ab_supply>;
+ PVDD_CD-supply = <&pvdd_cd_supply>;
+ };
+ };
...
diff --git a/Documentation/mm/process_addrs.rst b/Documentation/mm/process_addrs.rst
index e8618fbc62c9..1d416658d7f5 100644
--- a/Documentation/mm/process_addrs.rst
+++ b/Documentation/mm/process_addrs.rst
@@ -3,3 +3,853 @@
=================
Process Addresses
=================
+
+.. toctree::
+ :maxdepth: 3
+
+
+Userland memory ranges are tracked by the kernel via Virtual Memory Areas or
+'VMA's of type :c:struct:`!struct vm_area_struct`.
+
+Each VMA describes a virtually contiguous memory range with identical
+attributes, each described by a :c:struct:`!struct vm_area_struct`
+object. Userland access outside of VMAs is invalid except in the case where an
+adjacent stack VMA could be extended to contain the accessed address.
+
+All VMAs are contained within one and only one virtual address space, described
+by a :c:struct:`!struct mm_struct` object which is referenced by all tasks (that is,
+threads) which share the virtual address space. We refer to this as the
+:c:struct:`!mm`.
+
+Each mm object contains a maple tree data structure which describes all VMAs
+within the virtual address space.
+
+.. note:: An exception to this is the 'gate' VMA which is provided by
+ architectures which use :c:struct:`!vsyscall` and is a global static
+ object which does not belong to any specific mm.
+
+-------
+Locking
+-------
+
+The kernel is designed to be highly scalable against concurrent read operations
+on VMA **metadata** so a complicated set of locks are required to ensure memory
+corruption does not occur.
+
+.. note:: Locking VMAs for their metadata does not have any impact on the memory
+ they describe nor the page tables that map them.
+
+Terminology
+-----------
+
+* **mmap locks** - Each MM has a read/write semaphore :c:member:`!mmap_lock`
+ which locks at a process address space granularity which can be acquired via
+ :c:func:`!mmap_read_lock`, :c:func:`!mmap_write_lock` and variants.
+* **VMA locks** - The VMA lock is at VMA granularity (of course) which behaves
+ as a read/write semaphore in practice. A VMA read lock is obtained via
+ :c:func:`!lock_vma_under_rcu` (and unlocked via :c:func:`!vma_end_read`) and a
+ write lock via :c:func:`!vma_start_write` (all VMA write locks are unlocked
+ automatically when the mmap write lock is released). To take a VMA write lock
+ you **must** have already acquired an :c:func:`!mmap_write_lock`.
+* **rmap locks** - When trying to access VMAs through the reverse mapping via a
+ :c:struct:`!struct address_space` or :c:struct:`!struct anon_vma` object
+ (reachable from a folio via :c:member:`!folio->mapping`). VMAs must be stabilised via
+ :c:func:`!anon_vma_[try]lock_read` or :c:func:`!anon_vma_[try]lock_write` for
+ anonymous memory and :c:func:`!i_mmap_[try]lock_read` or
+ :c:func:`!i_mmap_[try]lock_write` for file-backed memory. We refer to these
+ locks as the reverse mapping locks, or 'rmap locks' for brevity.
+
+We discuss page table locks separately in the dedicated section below.
+
+The first thing **any** of these locks achieve is to **stabilise** the VMA
+within the MM tree. That is, guaranteeing that the VMA object will not be
+deleted from under you nor modified (except for some specific fields
+described below).
+
+Stabilising a VMA also keeps the address space described by it around.
+
+Lock usage
+----------
+
+If you want to **read** VMA metadata fields or just keep the VMA stable, you
+must do one of the following:
+
+* Obtain an mmap read lock at the MM granularity via :c:func:`!mmap_read_lock` (or a
+ suitable variant), unlocking it with a matching :c:func:`!mmap_read_unlock` when
+ you're done with the VMA, *or*
+* Try to obtain a VMA read lock via :c:func:`!lock_vma_under_rcu`. This tries to
+ acquire the lock atomically so might fail, in which case fall-back logic is
+ required to instead obtain an mmap read lock if this returns :c:macro:`!NULL`,
+ *or*
+* Acquire an rmap lock before traversing the locked interval tree (whether
+ anonymous or file-backed) to obtain the required VMA.
+
+If you want to **write** VMA metadata fields, then things vary depending on the
+field (we explore each VMA field in detail below). For the majority you must:
+
+* Obtain an mmap write lock at the MM granularity via :c:func:`!mmap_write_lock` (or a
+ suitable variant), unlocking it with a matching :c:func:`!mmap_write_unlock` when
+ you're done with the VMA, *and*
+* Obtain a VMA write lock via :c:func:`!vma_start_write` for each VMA you wish to
+ modify, which will be released automatically when :c:func:`!mmap_write_unlock` is
+ called.
+* If you want to be able to write to **any** field, you must also hide the VMA
+ from the reverse mapping by obtaining an **rmap write lock**.
+
+VMA locks are special in that you must obtain an mmap **write** lock **first**
+in order to obtain a VMA **write** lock. A VMA **read** lock however can be
+obtained without any other lock (:c:func:`!lock_vma_under_rcu` will acquire then
+release an RCU lock to lookup the VMA for you).
+
+This constrains the impact of writers on readers, as a writer can interact with
+one VMA while a reader interacts with another simultaneously.
+
+.. note:: The primary users of VMA read locks are page fault handlers, which
+ means that without a VMA write lock, page faults will run concurrent with
+ whatever you are doing.
+
+Examining all valid lock states:
+
+.. table::
+
+ ========= ======== ========= ======= ===== =========== ==========
+ mmap lock VMA lock rmap lock Stable? Read? Write most? Write all?
+ ========= ======== ========= ======= ===== =========== ==========
+ \- \- \- N N N N
+ \- R \- Y Y N N
+ \- \- R/W Y Y N N
+ R/W \-/R \-/R/W Y Y N N
+ W W \-/R Y Y Y N
+ W W W Y Y Y Y
+ ========= ======== ========= ======= ===== =========== ==========
+
+.. warning:: While it's possible to obtain a VMA lock while holding an mmap read lock,
+ attempting to do the reverse is invalid as it can result in deadlock - if
+ another task already holds an mmap write lock and attempts to acquire a VMA
+ write lock that will deadlock on the VMA read lock.
+
+All of these locks behave as read/write semaphores in practice, so you can
+obtain either a read or a write lock for each of these.
+
+.. note:: Generally speaking, a read/write semaphore is a class of lock which
+ permits concurrent readers. However a write lock can only be obtained
+ once all readers have left the critical region (and pending readers
+ made to wait).
+
+ This renders read locks on a read/write semaphore concurrent with other
+ readers and write locks exclusive against all others holding the semaphore.
+
+VMA fields
+^^^^^^^^^^
+
+We can subdivide :c:struct:`!struct vm_area_struct` fields by their purpose, which makes it
+easier to explore their locking characteristics:
+
+.. note:: We exclude VMA lock-specific fields here to avoid confusion, as these
+ are in effect an internal implementation detail.
+
+.. table:: Virtual layout fields
+
+ ===================== ======================================== ===========
+ Field Description Write lock
+ ===================== ======================================== ===========
+ :c:member:`!vm_start` Inclusive start virtual address of range mmap write,
+ VMA describes. VMA write,
+ rmap write.
+ :c:member:`!vm_end` Exclusive end virtual address of range mmap write,
+ VMA describes. VMA write,
+ rmap write.
+ :c:member:`!vm_pgoff` Describes the page offset into the file, mmap write,
+ the original page offset within the VMA write,
+ virtual address space (prior to any rmap write.
+ :c:func:`!mremap`), or PFN if a PFN map
+ and the architecture does not support
+ :c:macro:`!CONFIG_ARCH_HAS_PTE_SPECIAL`.
+ ===================== ======================================== ===========
+
+These fields describes the size, start and end of the VMA, and as such cannot be
+modified without first being hidden from the reverse mapping since these fields
+are used to locate VMAs within the reverse mapping interval trees.
+
+.. table:: Core fields
+
+ ============================ ======================================== =========================
+ Field Description Write lock
+ ============================ ======================================== =========================
+ :c:member:`!vm_mm` Containing mm_struct. None - written once on
+ initial map.
+ :c:member:`!vm_page_prot` Architecture-specific page table mmap write, VMA write.
+ protection bits determined from VMA
+ flags.
+ :c:member:`!vm_flags` Read-only access to VMA flags describing N/A
+ attributes of the VMA, in union with
+ private writable
+ :c:member:`!__vm_flags`.
+ :c:member:`!__vm_flags` Private, writable access to VMA flags mmap write, VMA write.
+ field, updated by
+ :c:func:`!vm_flags_*` functions.
+ :c:member:`!vm_file` If the VMA is file-backed, points to a None - written once on
+ struct file object describing the initial map.
+ underlying file, if anonymous then
+ :c:macro:`!NULL`.
+ :c:member:`!vm_ops` If the VMA is file-backed, then either None - Written once on
+ the driver or file-system provides a initial map by
+ :c:struct:`!struct vm_operations_struct` :c:func:`!f_ops->mmap()`.
+ object describing callbacks to be
+ invoked on VMA lifetime events.
+ :c:member:`!vm_private_data` A :c:member:`!void *` field for Handled by driver.
+ driver-specific metadata.
+ ============================ ======================================== =========================
+
+These are the core fields which describe the MM the VMA belongs to and its attributes.
+
+.. table:: Config-specific fields
+
+ ================================= ===================== ======================================== ===============
+ Field Configuration option Description Write lock
+ ================================= ===================== ======================================== ===============
+ :c:member:`!anon_name` CONFIG_ANON_VMA_NAME A field for storing a mmap write,
+ :c:struct:`!struct anon_vma_name` VMA write.
+ object providing a name for anonymous
+ mappings, or :c:macro:`!NULL` if none
+ is set or the VMA is file-backed. The
+ underlying object is reference counted
+ and can be shared across multiple VMAs
+ for scalability.
+ :c:member:`!swap_readahead_info` CONFIG_SWAP Metadata used by the swap mechanism mmap read,
+ to perform readahead. This field is swap-specific
+ accessed atomically. lock.
+ :c:member:`!vm_policy` CONFIG_NUMA :c:type:`!mempolicy` object which mmap write,
+ describes the NUMA behaviour of the VMA write.
+ VMA. The underlying object is reference
+ counted.
+ :c:member:`!numab_state` CONFIG_NUMA_BALANCING :c:type:`!vma_numab_state` object which mmap read,
+ describes the current state of numab-specific
+ NUMA balancing in relation to this VMA. lock.
+ Updated under mmap read lock by
+ :c:func:`!task_numa_work`.
+ :c:member:`!vm_userfaultfd_ctx` CONFIG_USERFAULTFD Userfaultfd context wrapper object of mmap write,
+ type :c:type:`!vm_userfaultfd_ctx`, VMA write.
+ either of zero size if userfaultfd is
+ disabled, or containing a pointer
+ to an underlying
+ :c:type:`!userfaultfd_ctx` object which
+ describes userfaultfd metadata.
+ ================================= ===================== ======================================== ===============
+
+These fields are present or not depending on whether the relevant kernel
+configuration option is set.
+
+.. table:: Reverse mapping fields
+
+ =================================== ========================================= ============================
+ Field Description Write lock
+ =================================== ========================================= ============================
+ :c:member:`!shared.rb` A red/black tree node used, if the mmap write, VMA write,
+ mapping is file-backed, to place the VMA i_mmap write.
+ in the
+ :c:member:`!struct address_space->i_mmap`
+ red/black interval tree.
+ :c:member:`!shared.rb_subtree_last` Metadata used for management of the mmap write, VMA write,
+ interval tree if the VMA is file-backed. i_mmap write.
+ :c:member:`!anon_vma_chain` List of pointers to both forked/CoW’d mmap read, anon_vma write.
+ :c:type:`!anon_vma` objects and
+ :c:member:`!vma->anon_vma` if it is
+ non-:c:macro:`!NULL`.
+ :c:member:`!anon_vma` :c:type:`!anon_vma` object used by When :c:macro:`NULL` and
+ anonymous folios mapped exclusively to setting non-:c:macro:`NULL`:
+ this VMA. Initially set by mmap read, page_table_lock.
+ :c:func:`!anon_vma_prepare` serialised
+ by the :c:macro:`!page_table_lock`. This When non-:c:macro:`NULL` and
+ is set as soon as any page is faulted in. setting :c:macro:`NULL`:
+ mmap write, VMA write,
+ anon_vma write.
+ =================================== ========================================= ============================
+
+These fields are used to both place the VMA within the reverse mapping, and for
+anonymous mappings, to be able to access both related :c:struct:`!struct anon_vma` objects
+and the :c:struct:`!struct anon_vma` in which folios mapped exclusively to this VMA should
+reside.
+
+.. note:: If a file-backed mapping is mapped with :c:macro:`!MAP_PRIVATE` set
+ then it can be in both the :c:type:`!anon_vma` and :c:type:`!i_mmap`
+ trees at the same time, so all of these fields might be utilised at
+ once.
+
+Page tables
+-----------
+
+We won't speak exhaustively on the subject but broadly speaking, page tables map
+virtual addresses to physical ones through a series of page tables, each of
+which contain entries with physical addresses for the next page table level
+(along with flags), and at the leaf level the physical addresses of the
+underlying physical data pages or a special entry such as a swap entry,
+migration entry or other special marker. Offsets into these pages are provided
+by the virtual address itself.
+
+In Linux these are divided into five levels - PGD, P4D, PUD, PMD and PTE. Huge
+pages might eliminate one or two of these levels, but when this is the case we
+typically refer to the leaf level as the PTE level regardless.
+
+.. note:: In instances where the architecture supports fewer page tables than
+ five the kernel cleverly 'folds' page table levels, that is stubbing
+ out functions related to the skipped levels. This allows us to
+ conceptually act as if there were always five levels, even if the
+ compiler might, in practice, eliminate any code relating to missing
+ ones.
+
+There are four key operations typically performed on page tables:
+
+1. **Traversing** page tables - Simply reading page tables in order to traverse
+ them. This only requires that the VMA is kept stable, so a lock which
+ establishes this suffices for traversal (there are also lockless variants
+ which eliminate even this requirement, such as :c:func:`!gup_fast`).
+2. **Installing** page table mappings - Whether creating a new mapping or
+ modifying an existing one in such a way as to change its identity. This
+ requires that the VMA is kept stable via an mmap or VMA lock (explicitly not
+ rmap locks).
+3. **Zapping/unmapping** page table entries - This is what the kernel calls
+ clearing page table mappings at the leaf level only, whilst leaving all page
+ tables in place. This is a very common operation in the kernel performed on
+ file truncation, the :c:macro:`!MADV_DONTNEED` operation via
+ :c:func:`!madvise`, and others. This is performed by a number of functions
+ including :c:func:`!unmap_mapping_range` and :c:func:`!unmap_mapping_pages`.
+ The VMA need only be kept stable for this operation.
+4. **Freeing** page tables - When finally the kernel removes page tables from a
+ userland process (typically via :c:func:`!free_pgtables`) extreme care must
+ be taken to ensure this is done safely, as this logic finally frees all page
+ tables in the specified range, ignoring existing leaf entries (it assumes the
+ caller has both zapped the range and prevented any further faults or
+ modifications within it).
+
+.. note:: Modifying mappings for reclaim or migration is performed under rmap
+ lock as it, like zapping, does not fundamentally modify the identity
+ of what is being mapped.
+
+**Traversing** and **zapping** ranges can be performed holding any one of the
+locks described in the terminology section above - that is the mmap lock, the
+VMA lock or either of the reverse mapping locks.
+
+That is - as long as you keep the relevant VMA **stable** - you are good to go
+ahead and perform these operations on page tables (though internally, kernel
+operations that perform writes also acquire internal page table locks to
+serialise - see the page table implementation detail section for more details).
+
+When **installing** page table entries, the mmap or VMA lock must be held to
+keep the VMA stable. We explore why this is in the page table locking details
+section below.
+
+.. warning:: Page tables are normally only traversed in regions covered by VMAs.
+ If you want to traverse page tables in areas that might not be
+ covered by VMAs, heavier locking is required.
+ See :c:func:`!walk_page_range_novma` for details.
+
+**Freeing** page tables is an entirely internal memory management operation and
+has special requirements (see the page freeing section below for more details).
+
+.. warning:: When **freeing** page tables, it must not be possible for VMAs
+ containing the ranges those page tables map to be accessible via
+ the reverse mapping.
+
+ The :c:func:`!free_pgtables` function removes the relevant VMAs
+ from the reverse mappings, but no other VMAs can be permitted to be
+ accessible and span the specified range.
+
+Lock ordering
+-------------
+
+As we have multiple locks across the kernel which may or may not be taken at the
+same time as explicit mm or VMA locks, we have to be wary of lock inversion, and
+the **order** in which locks are acquired and released becomes very important.
+
+.. note:: Lock inversion occurs when two threads need to acquire multiple locks,
+ but in doing so inadvertently cause a mutual deadlock.
+
+ For example, consider thread 1 which holds lock A and tries to acquire lock B,
+ while thread 2 holds lock B and tries to acquire lock A.
+
+ Both threads are now deadlocked on each other. However, had they attempted to
+ acquire locks in the same order, one would have waited for the other to
+ complete its work and no deadlock would have occurred.
+
+The opening comment in :c:macro:`!mm/rmap.c` describes in detail the required
+ordering of locks within memory management code:
+
+.. code-block::
+
+ inode->i_rwsem (while writing or truncating, not reading or faulting)
+ mm->mmap_lock
+ mapping->invalidate_lock (in filemap_fault)
+ folio_lock
+ hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share, see hugetlbfs below)
+ vma_start_write
+ mapping->i_mmap_rwsem
+ anon_vma->rwsem
+ mm->page_table_lock or pte_lock
+ swap_lock (in swap_duplicate, swap_info_get)
+ mmlist_lock (in mmput, drain_mmlist and others)
+ mapping->private_lock (in block_dirty_folio)
+ i_pages lock (widely used)
+ lruvec->lru_lock (in folio_lruvec_lock_irq)
+ inode->i_lock (in set_page_dirty's __mark_inode_dirty)
+ bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
+ sb_lock (within inode_lock in fs/fs-writeback.c)
+ i_pages lock (widely used, in set_page_dirty,
+ in arch-dependent flush_dcache_mmap_lock,
+ within bdi.wb->list_lock in __sync_single_inode)
+
+There is also a file-system specific lock ordering comment located at the top of
+:c:macro:`!mm/filemap.c`:
+
+.. code-block::
+
+ ->i_mmap_rwsem (truncate_pagecache)
+ ->private_lock (__free_pte->block_dirty_folio)
+ ->swap_lock (exclusive_swap_page, others)
+ ->i_pages lock
+
+ ->i_rwsem
+ ->invalidate_lock (acquired by fs in truncate path)
+ ->i_mmap_rwsem (truncate->unmap_mapping_range)
+
+ ->mmap_lock
+ ->i_mmap_rwsem
+ ->page_table_lock or pte_lock (various, mainly in memory.c)
+ ->i_pages lock (arch-dependent flush_dcache_mmap_lock)
+
+ ->mmap_lock
+ ->invalidate_lock (filemap_fault)
+ ->lock_page (filemap_fault, access_process_vm)
+
+ ->i_rwsem (generic_perform_write)
+ ->mmap_lock (fault_in_readable->do_page_fault)
+
+ bdi->wb.list_lock
+ sb_lock (fs/fs-writeback.c)
+ ->i_pages lock (__sync_single_inode)
+
+ ->i_mmap_rwsem
+ ->anon_vma.lock (vma_merge)
+
+ ->anon_vma.lock
+ ->page_table_lock or pte_lock (anon_vma_prepare and various)
+
+ ->page_table_lock or pte_lock
+ ->swap_lock (try_to_unmap_one)
+ ->private_lock (try_to_unmap_one)
+ ->i_pages lock (try_to_unmap_one)
+ ->lruvec->lru_lock (follow_page_mask->mark_page_accessed)
+ ->lruvec->lru_lock (check_pte_range->folio_isolate_lru)
+ ->private_lock (folio_remove_rmap_pte->set_page_dirty)
+ ->i_pages lock (folio_remove_rmap_pte->set_page_dirty)
+ bdi.wb->list_lock (folio_remove_rmap_pte->set_page_dirty)
+ ->inode->i_lock (folio_remove_rmap_pte->set_page_dirty)
+ bdi.wb->list_lock (zap_pte_range->set_page_dirty)
+ ->inode->i_lock (zap_pte_range->set_page_dirty)
+ ->private_lock (zap_pte_range->block_dirty_folio)
+
+Please check the current state of these comments which may have changed since
+the time of writing of this document.
+
+------------------------------
+Locking Implementation Details
+------------------------------
+
+.. warning:: Locking rules for PTE-level page tables are very different from
+ locking rules for page tables at other levels.
+
+Page table locking details
+--------------------------
+
+In addition to the locks described in the terminology section above, we have
+additional locks dedicated to page tables:
+
+* **Higher level page table locks** - Higher level page tables, that is PGD, P4D
+ and PUD each make use of the process address space granularity
+ :c:member:`!mm->page_table_lock` lock when modified.
+
+* **Fine-grained page table locks** - PMDs and PTEs each have fine-grained locks
+ either kept within the folios describing the page tables or allocated
+ separated and pointed at by the folios if :c:macro:`!ALLOC_SPLIT_PTLOCKS` is
+ set. The PMD spin lock is obtained via :c:func:`!pmd_lock`, however PTEs are
+ mapped into higher memory (if a 32-bit system) and carefully locked via
+ :c:func:`!pte_offset_map_lock`.
+
+These locks represent the minimum required to interact with each page table
+level, but there are further requirements.
+
+Importantly, note that on a **traversal** of page tables, sometimes no such
+locks are taken. However, at the PTE level, at least concurrent page table
+deletion must be prevented (using RCU) and the page table must be mapped into
+high memory, see below.
+
+Whether care is taken on reading the page table entries depends on the
+architecture, see the section on atomicity below.
+
+Locking rules
+^^^^^^^^^^^^^
+
+We establish basic locking rules when interacting with page tables:
+
+* When changing a page table entry the page table lock for that page table
+ **must** be held, except if you can safely assume nobody can access the page
+ tables concurrently (such as on invocation of :c:func:`!free_pgtables`).
+* Reads from and writes to page table entries must be *appropriately*
+ atomic. See the section on atomicity below for details.
+* Populating previously empty entries requires that the mmap or VMA locks are
+ held (read or write), doing so with only rmap locks would be dangerous (see
+ the warning below).
+* As mentioned previously, zapping can be performed while simply keeping the VMA
+ stable, that is holding any one of the mmap, VMA or rmap locks.
+
+.. warning:: Populating previously empty entries is dangerous as, when unmapping
+ VMAs, :c:func:`!vms_clear_ptes` has a window of time between
+ zapping (via :c:func:`!unmap_vmas`) and freeing page tables (via
+ :c:func:`!free_pgtables`), where the VMA is still visible in the
+ rmap tree. :c:func:`!free_pgtables` assumes that the zap has
+ already been performed and removes PTEs unconditionally (along with
+ all other page tables in the freed range), so installing new PTE
+ entries could leak memory and also cause other unexpected and
+ dangerous behaviour.
+
+There are additional rules applicable when moving page tables, which we discuss
+in the section on this topic below.
+
+PTE-level page tables are different from page tables at other levels, and there
+are extra requirements for accessing them:
+
+* On 32-bit architectures, they may be in high memory (meaning they need to be
+ mapped into kernel memory to be accessible).
+* When empty, they can be unlinked and RCU-freed while holding an mmap lock or
+ rmap lock for reading in combination with the PTE and PMD page table locks.
+ In particular, this happens in :c:func:`!retract_page_tables` when handling
+ :c:macro:`!MADV_COLLAPSE`.
+ So accessing PTE-level page tables requires at least holding an RCU read lock;
+ but that only suffices for readers that can tolerate racing with concurrent
+ page table updates such that an empty PTE is observed (in a page table that
+ has actually already been detached and marked for RCU freeing) while another
+ new page table has been installed in the same location and filled with
+ entries. Writers normally need to take the PTE lock and revalidate that the
+ PMD entry still refers to the same PTE-level page table.
+
+To access PTE-level page tables, a helper like :c:func:`!pte_offset_map_lock` or
+:c:func:`!pte_offset_map` can be used depending on stability requirements.
+These map the page table into kernel memory if required, take the RCU lock, and
+depending on variant, may also look up or acquire the PTE lock.
+See the comment on :c:func:`!__pte_offset_map_lock`.
+
+Atomicity
+^^^^^^^^^
+
+Regardless of page table locks, the MMU hardware concurrently updates accessed
+and dirty bits (perhaps more, depending on architecture). Additionally, page
+table traversal operations in parallel (though holding the VMA stable) and
+functionality like GUP-fast locklessly traverses (that is reads) page tables,
+without even keeping the VMA stable at all.
+
+When performing a page table traversal and keeping the VMA stable, whether a
+read must be performed once and only once or not depends on the architecture
+(for instance x86-64 does not require any special precautions).
+
+If a write is being performed, or if a read informs whether a write takes place
+(on an installation of a page table entry say, for instance in
+:c:func:`!__pud_install`), special care must always be taken. In these cases we
+can never assume that page table locks give us entirely exclusive access, and
+must retrieve page table entries once and only once.
+
+If we are reading page table entries, then we need only ensure that the compiler
+does not rearrange our loads. This is achieved via :c:func:`!pXXp_get`
+functions - :c:func:`!pgdp_get`, :c:func:`!p4dp_get`, :c:func:`!pudp_get`,
+:c:func:`!pmdp_get`, and :c:func:`!ptep_get`.
+
+Each of these uses :c:func:`!READ_ONCE` to guarantee that the compiler reads
+the page table entry only once.
+
+However, if we wish to manipulate an existing page table entry and care about
+the previously stored data, we must go further and use an hardware atomic
+operation as, for example, in :c:func:`!ptep_get_and_clear`.
+
+Equally, operations that do not rely on the VMA being held stable, such as
+GUP-fast (see :c:func:`!gup_fast` and its various page table level handlers like
+:c:func:`!gup_fast_pte_range`), must very carefully interact with page table
+entries, using functions such as :c:func:`!ptep_get_lockless` and equivalent for
+higher level page table levels.
+
+Writes to page table entries must also be appropriately atomic, as established
+by :c:func:`!set_pXX` functions - :c:func:`!set_pgd`, :c:func:`!set_p4d`,
+:c:func:`!set_pud`, :c:func:`!set_pmd`, and :c:func:`!set_pte`.
+
+Equally functions which clear page table entries must be appropriately atomic,
+as in :c:func:`!pXX_clear` functions - :c:func:`!pgd_clear`,
+:c:func:`!p4d_clear`, :c:func:`!pud_clear`, :c:func:`!pmd_clear`, and
+:c:func:`!pte_clear`.
+
+Page table installation
+^^^^^^^^^^^^^^^^^^^^^^^
+
+Page table installation is performed with the VMA held stable explicitly by an
+mmap or VMA lock in read or write mode (see the warning in the locking rules
+section for details as to why).
+
+When allocating a P4D, PUD or PMD and setting the relevant entry in the above
+PGD, P4D or PUD, the :c:member:`!mm->page_table_lock` must be held. This is
+acquired in :c:func:`!__p4d_alloc`, :c:func:`!__pud_alloc` and
+:c:func:`!__pmd_alloc` respectively.
+
+.. note:: :c:func:`!__pmd_alloc` actually invokes :c:func:`!pud_lock` and
+ :c:func:`!pud_lockptr` in turn, however at the time of writing it ultimately
+ references the :c:member:`!mm->page_table_lock`.
+
+Allocating a PTE will either use the :c:member:`!mm->page_table_lock` or, if
+:c:macro:`!USE_SPLIT_PMD_PTLOCKS` is defined, a lock embedded in the PMD
+physical page metadata in the form of a :c:struct:`!struct ptdesc`, acquired by
+:c:func:`!pmd_ptdesc` called from :c:func:`!pmd_lock` and ultimately
+:c:func:`!__pte_alloc`.
+
+Finally, modifying the contents of the PTE requires special treatment, as the
+PTE page table lock must be acquired whenever we want stable and exclusive
+access to entries contained within a PTE, especially when we wish to modify
+them.
+
+This is performed via :c:func:`!pte_offset_map_lock` which carefully checks to
+ensure that the PTE hasn't changed from under us, ultimately invoking
+:c:func:`!pte_lockptr` to obtain a spin lock at PTE granularity contained within
+the :c:struct:`!struct ptdesc` associated with the physical PTE page. The lock
+must be released via :c:func:`!pte_unmap_unlock`.
+
+.. note:: There are some variants on this, such as
+ :c:func:`!pte_offset_map_rw_nolock` when we know we hold the PTE stable but
+ for brevity we do not explore this. See the comment for
+ :c:func:`!__pte_offset_map_lock` for more details.
+
+When modifying data in ranges we typically only wish to allocate higher page
+tables as necessary, using these locks to avoid races or overwriting anything,
+and set/clear data at the PTE level as required (for instance when page faulting
+or zapping).
+
+A typical pattern taken when traversing page table entries to install a new
+mapping is to optimistically determine whether the page table entry in the table
+above is empty, if so, only then acquiring the page table lock and checking
+again to see if it was allocated underneath us.
+
+This allows for a traversal with page table locks only being taken when
+required. An example of this is :c:func:`!__pud_alloc`.
+
+At the leaf page table, that is the PTE, we can't entirely rely on this pattern
+as we have separate PMD and PTE locks and a THP collapse for instance might have
+eliminated the PMD entry as well as the PTE from under us.
+
+This is why :c:func:`!__pte_offset_map_lock` locklessly retrieves the PMD entry
+for the PTE, carefully checking it is as expected, before acquiring the
+PTE-specific lock, and then *again* checking that the PMD entry is as expected.
+
+If a THP collapse (or similar) were to occur then the lock on both pages would
+be acquired, so we can ensure this is prevented while the PTE lock is held.
+
+Installing entries this way ensures mutual exclusion on write.
+
+Page table freeing
+^^^^^^^^^^^^^^^^^^
+
+Tearing down page tables themselves is something that requires significant
+care. There must be no way that page tables designated for removal can be
+traversed or referenced by concurrent tasks.
+
+It is insufficient to simply hold an mmap write lock and VMA lock (which will
+prevent racing faults, and rmap operations), as a file-backed mapping can be
+truncated under the :c:struct:`!struct address_space->i_mmap_rwsem` alone.
+
+As a result, no VMA which can be accessed via the reverse mapping (either
+through the :c:struct:`!struct anon_vma->rb_root` or the :c:member:`!struct
+address_space->i_mmap` interval trees) can have its page tables torn down.
+
+The operation is typically performed via :c:func:`!free_pgtables`, which assumes
+either the mmap write lock has been taken (as specified by its
+:c:member:`!mm_wr_locked` parameter), or that the VMA is already unreachable.
+
+It carefully removes the VMA from all reverse mappings, however it's important
+that no new ones overlap these or any route remain to permit access to addresses
+within the range whose page tables are being torn down.
+
+Additionally, it assumes that a zap has already been performed and steps have
+been taken to ensure that no further page table entries can be installed between
+the zap and the invocation of :c:func:`!free_pgtables`.
+
+Since it is assumed that all such steps have been taken, page table entries are
+cleared without page table locks (in the :c:func:`!pgd_clear`, :c:func:`!p4d_clear`,
+:c:func:`!pud_clear`, and :c:func:`!pmd_clear` functions.
+
+.. note:: It is possible for leaf page tables to be torn down independent of
+ the page tables above it as is done by
+ :c:func:`!retract_page_tables`, which is performed under the i_mmap
+ read lock, PMD, and PTE page table locks, without this level of care.
+
+Page table moving
+^^^^^^^^^^^^^^^^^
+
+Some functions manipulate page table levels above PMD (that is PUD, P4D and PGD
+page tables). Most notable of these is :c:func:`!mremap`, which is capable of
+moving higher level page tables.
+
+In these instances, it is required that **all** locks are taken, that is
+the mmap lock, the VMA lock and the relevant rmap locks.
+
+You can observe this in the :c:func:`!mremap` implementation in the functions
+:c:func:`!take_rmap_locks` and :c:func:`!drop_rmap_locks` which perform the rmap
+side of lock acquisition, invoked ultimately by :c:func:`!move_page_tables`.
+
+VMA lock internals
+------------------
+
+Overview
+^^^^^^^^
+
+VMA read locking is entirely optimistic - if the lock is contended or a competing
+write has started, then we do not obtain a read lock.
+
+A VMA **read** lock is obtained by :c:func:`!lock_vma_under_rcu`, which first
+calls :c:func:`!rcu_read_lock` to ensure that the VMA is looked up in an RCU
+critical section, then attempts to VMA lock it via :c:func:`!vma_start_read`,
+before releasing the RCU lock via :c:func:`!rcu_read_unlock`.
+
+VMA read locks hold the read lock on the :c:member:`!vma->vm_lock` semaphore for
+their duration and the caller of :c:func:`!lock_vma_under_rcu` must release it
+via :c:func:`!vma_end_read`.
+
+VMA **write** locks are acquired via :c:func:`!vma_start_write` in instances where a
+VMA is about to be modified, unlike :c:func:`!vma_start_read` the lock is always
+acquired. An mmap write lock **must** be held for the duration of the VMA write
+lock, releasing or downgrading the mmap write lock also releases the VMA write
+lock so there is no :c:func:`!vma_end_write` function.
+
+Note that a semaphore write lock is not held across a VMA lock. Rather, a
+sequence number is used for serialisation, and the write semaphore is only
+acquired at the point of write lock to update this.
+
+This ensures the semantics we require - VMA write locks provide exclusive write
+access to the VMA.
+
+Implementation details
+^^^^^^^^^^^^^^^^^^^^^^
+
+The VMA lock mechanism is designed to be a lightweight means of avoiding the use
+of the heavily contended mmap lock. It is implemented using a combination of a
+read/write semaphore and sequence numbers belonging to the containing
+:c:struct:`!struct mm_struct` and the VMA.
+
+Read locks are acquired via :c:func:`!vma_start_read`, which is an optimistic
+operation, i.e. it tries to acquire a read lock but returns false if it is
+unable to do so. At the end of the read operation, :c:func:`!vma_end_read` is
+called to release the VMA read lock.
+
+Invoking :c:func:`!vma_start_read` requires that :c:func:`!rcu_read_lock` has
+been called first, establishing that we are in an RCU critical section upon VMA
+read lock acquisition. Once acquired, the RCU lock can be released as it is only
+required for lookup. This is abstracted by :c:func:`!lock_vma_under_rcu` which
+is the interface a user should use.
+
+Writing requires the mmap to be write-locked and the VMA lock to be acquired via
+:c:func:`!vma_start_write`, however the write lock is released by the termination or
+downgrade of the mmap write lock so no :c:func:`!vma_end_write` is required.
+
+All this is achieved by the use of per-mm and per-VMA sequence counts, which are
+used in order to reduce complexity, especially for operations which write-lock
+multiple VMAs at once.
+
+If the mm sequence count, :c:member:`!mm->mm_lock_seq` is equal to the VMA
+sequence count :c:member:`!vma->vm_lock_seq` then the VMA is write-locked. If
+they differ, then it is not.
+
+Each time the mmap write lock is released in :c:func:`!mmap_write_unlock` or
+:c:func:`!mmap_write_downgrade`, :c:func:`!vma_end_write_all` is invoked which
+also increments :c:member:`!mm->mm_lock_seq` via
+:c:func:`!mm_lock_seqcount_end`.
+
+This way, we ensure that, regardless of the VMA's sequence number, a write lock
+is never incorrectly indicated and that when we release an mmap write lock we
+efficiently release **all** VMA write locks contained within the mmap at the
+same time.
+
+Since the mmap write lock is exclusive against others who hold it, the automatic
+release of any VMA locks on its release makes sense, as you would never want to
+keep VMAs locked across entirely separate write operations. It also maintains
+correct lock ordering.
+
+Each time a VMA read lock is acquired, we acquire a read lock on the
+:c:member:`!vma->vm_lock` read/write semaphore and hold it, while checking that
+the sequence count of the VMA does not match that of the mm.
+
+If it does, the read lock fails. If it does not, we hold the lock, excluding
+writers, but permitting other readers, who will also obtain this lock under RCU.
+
+Importantly, maple tree operations performed in :c:func:`!lock_vma_under_rcu`
+are also RCU safe, so the whole read lock operation is guaranteed to function
+correctly.
+
+On the write side, we acquire a write lock on the :c:member:`!vma->vm_lock`
+read/write semaphore, before setting the VMA's sequence number under this lock,
+also simultaneously holding the mmap write lock.
+
+This way, if any read locks are in effect, :c:func:`!vma_start_write` will sleep
+until these are finished and mutual exclusion is achieved.
+
+After setting the VMA's sequence number, the lock is released, avoiding
+complexity with a long-term held write lock.
+
+This clever combination of a read/write semaphore and sequence count allows for
+fast RCU-based per-VMA lock acquisition (especially on page fault, though
+utilised elsewhere) with minimal complexity around lock ordering.
+
+mmap write lock downgrading
+---------------------------
+
+When an mmap write lock is held one has exclusive access to resources within the
+mmap (with the usual caveats about requiring VMA write locks to avoid races with
+tasks holding VMA read locks).
+
+It is then possible to **downgrade** from a write lock to a read lock via
+:c:func:`!mmap_write_downgrade` which, similar to :c:func:`!mmap_write_unlock`,
+implicitly terminates all VMA write locks via :c:func:`!vma_end_write_all`, but
+importantly does not relinquish the mmap lock while downgrading, therefore
+keeping the locked virtual address space stable.
+
+An interesting consequence of this is that downgraded locks are exclusive
+against any other task possessing a downgraded lock (since a racing task would
+have to acquire a write lock first to downgrade it, and the downgraded lock
+prevents a new write lock from being obtained until the original lock is
+released).
+
+For clarity, we map read (R)/downgraded write (D)/write (W) locks against one
+another showing which locks exclude the others:
+
+.. list-table:: Lock exclusivity
+ :widths: 5 5 5 5
+ :header-rows: 1
+ :stub-columns: 1
+
+ * -
+ - R
+ - D
+ - W
+ * - R
+ - N
+ - N
+ - Y
+ * - D
+ - N
+ - Y
+ - Y
+ * - W
+ - Y
+ - Y
+ - Y
+
+Here a Y indicates the locks in the matching row/column are mutually exclusive,
+and N indicates that they are not.
+
+Stack expansion
+---------------
+
+Stack expansion throws up additional complexities in that we cannot permit there
+to be racing page faults, as a result we invoke :c:func:`!vma_start_write` to
+prevent this in :c:func:`!expand_downwards` or :c:func:`!expand_upwards`.
diff --git a/Documentation/netlink/specs/mptcp_pm.yaml b/Documentation/netlink/specs/mptcp_pm.yaml
index dc190bf838fe..dfd017780d2f 100644
--- a/Documentation/netlink/specs/mptcp_pm.yaml
+++ b/Documentation/netlink/specs/mptcp_pm.yaml
@@ -22,65 +22,67 @@ definitions:
doc: unused event
-
name: created
- doc:
- token, family, saddr4 | saddr6, daddr4 | daddr6, sport, dport
+ doc: >-
A new MPTCP connection has been created. It is the good time to
allocate memory and send ADD_ADDR if needed. Depending on the
traffic-patterns it can take a long time until the
MPTCP_EVENT_ESTABLISHED is sent.
+ Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6, sport,
+ dport, server-side.
-
name: established
- doc:
- token, family, saddr4 | saddr6, daddr4 | daddr6, sport, dport
+ doc: >-
A MPTCP connection is established (can start new subflows).
+ Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6, sport,
+ dport, server-side.
-
name: closed
- doc:
- token
+ doc: >-
A MPTCP connection has stopped.
+ Attribute: token.
-
name: announced
value: 6
- doc:
- token, rem_id, family, daddr4 | daddr6 [, dport]
+ doc: >-
A new address has been announced by the peer.
+ Attributes: token, rem_id, family, daddr4 | daddr6 [, dport].
-
name: removed
- doc:
- token, rem_id
+ doc: >-
An address has been lost by the peer.
+ Attributes: token, rem_id.
-
name: sub-established
value: 10
- doc:
- token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 | daddr6, sport,
- dport, backup, if_idx [, error]
+ doc: >-
A new subflow has been established. 'error' should not be set.
+ Attributes: token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 |
+ daddr6, sport, dport, backup, if_idx [, error].
-
name: sub-closed
- doc:
- token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 | daddr6, sport,
- dport, backup, if_idx [, error]
+ doc: >-
A subflow has been closed. An error (copy of sk_err) could be set if an
error has been detected for this subflow.
+ Attributes: token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 |
+ daddr6, sport, dport, backup, if_idx [, error].
-
name: sub-priority
value: 13
- doc:
- token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 | daddr6, sport,
- dport, backup, if_idx [, error]
+ doc: >-
The priority of a subflow has changed. 'error' should not be set.
+ Attributes: token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 |
+ daddr6, sport, dport, backup, if_idx [, error].
-
name: listener-created
value: 15
- doc:
- family, sport, saddr4 | saddr6
+ doc: >-
A new PM listener is created.
+ Attributes: family, sport, saddr4 | saddr6.
-
name: listener-closed
- doc:
- family, sport, saddr4 | saddr6
+ doc: >-
A PM listener is closed.
+ Attributes: family, sport, saddr4 | saddr6.
attribute-sets:
-
@@ -306,8 +308,8 @@ operations:
attributes:
- addr
-
- name: flush-addrs
- doc: flush addresses
+ name: flush-addrs
+ doc: Flush addresses
attribute-set: endpoint
dont-validate: [ strict ]
flags: [ uns-admin-perm ]
@@ -351,7 +353,7 @@ operations:
- addr-remote
-
name: announce
- doc: announce new sf
+ doc: Announce new address
attribute-set: attr
dont-validate: [ strict ]
flags: [ uns-admin-perm ]
@@ -362,7 +364,7 @@ operations:
- token
-
name: remove
- doc: announce removal
+ doc: Announce removal
attribute-set: attr
dont-validate: [ strict ]
flags: [ uns-admin-perm ]
@@ -373,7 +375,7 @@ operations:
- loc-id
-
name: subflow-create
- doc: todo
+ doc: Create subflow
attribute-set: attr
dont-validate: [ strict ]
flags: [ uns-admin-perm ]
@@ -385,7 +387,7 @@ operations:
- addr-remote
-
name: subflow-destroy
- doc: todo
+ doc: Destroy subflow
attribute-set: attr
dont-validate: [ strict ]
flags: [ uns-admin-perm ]
diff --git a/MAINTAINERS b/MAINTAINERS
index 9eb4094af8a3..545ebac63f51 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1797,7 +1797,6 @@ F: include/uapi/linux/if_arcnet.h
ARM AND ARM64 SoC SUB-ARCHITECTURES (COMMON PARTS)
M: Arnd Bergmann <arnd@arndb.de>
-M: Olof Johansson <olof@lixom.net>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: soc@lists.linux.dev
S: Maintained
@@ -3608,6 +3607,7 @@ F: drivers/phy/qualcomm/phy-ath79-usb.c
ATHEROS ATH GENERIC UTILITIES
M: Kalle Valo <kvalo@kernel.org>
+M: Jeff Johnson <jjohnson@kernel.org>
L: linux-wireless@vger.kernel.org
S: Supported
F: drivers/net/wireless/ath/*
@@ -5502,8 +5502,8 @@ L: patches@opensource.cirrus.com
S: Supported
W: https://github.com/CirrusLogic/linux-drivers/wiki
T: git https://github.com/CirrusLogic/linux-drivers.git
-F: drivers/firmware/cirrus/*
-F: include/linux/firmware/cirrus/*
+F: drivers/firmware/cirrus/
+F: include/linux/firmware/cirrus/
CIRRUS LOGIC EP93XX ETHERNET DRIVER
M: Hartley Sweeten <hsweeten@visionengravers.com>
@@ -7348,7 +7348,7 @@ F: drivers/gpu/drm/panel/panel-novatek-nt36672a.c
DRM DRIVER FOR NVIDIA GEFORCE/QUADRO GPUS
M: Karol Herbst <kherbst@redhat.com>
M: Lyude Paul <lyude@redhat.com>
-M: Danilo Krummrich <dakr@redhat.com>
+M: Danilo Krummrich <dakr@kernel.org>
L: dri-devel@lists.freedesktop.org
L: nouveau@lists.freedesktop.org
S: Supported
@@ -8454,7 +8454,7 @@ F: include/video/s1d13xxxfb.h
EROFS FILE SYSTEM
M: Gao Xiang <xiang@kernel.org>
M: Chao Yu <chao@kernel.org>
-R: Yue Hu <huyue2@coolpad.com>
+R: Yue Hu <zbestahu@gmail.com>
R: Jeffle Xu <jefflexu@linux.alibaba.com>
R: Sandeep Dhavale <dhavale@google.com>
L: linux-erofs@lists.ozlabs.org
@@ -8925,7 +8925,7 @@ F: include/linux/arm_ffa.h
FIRMWARE LOADER (request_firmware)
M: Luis Chamberlain <mcgrof@kernel.org>
M: Russ Weight <russ.weight@linux.dev>
-M: Danilo Krummrich <dakr@redhat.com>
+M: Danilo Krummrich <dakr@kernel.org>
L: linux-kernel@vger.kernel.org
S: Maintained
F: Documentation/firmware_class/
@@ -14759,7 +14759,7 @@ F: drivers/memory/mtk-smi.c
F: include/soc/mediatek/smi.h
MEDIATEK SWITCH DRIVER
-M: Arınç ÜNAL <arinc.unal@arinc9.com>
+M: Chester A. Unal <chester.a.unal@arinc9.com>
M: Daniel Golle <daniel@makrotopia.org>
M: DENG Qingfang <dqfext@gmail.com>
M: Sean Wang <sean.wang@mediatek.com>
@@ -18463,7 +18463,7 @@ F: Documentation/devicetree/bindings/pinctrl/mediatek,mt8183-pinctrl.yaml
F: drivers/pinctrl/mediatek/
PIN CONTROLLER - MEDIATEK MIPS
-M: Arınç ÜNAL <arinc.unal@arinc9.com>
+M: Chester A. Unal <chester.a.unal@arinc9.com>
M: Sergio Paracuellos <sergio.paracuellos@gmail.com>
L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
L: linux-mips@vger.kernel.org
@@ -19507,7 +19507,7 @@ S: Maintained
F: arch/mips/ralink
RALINK MT7621 MIPS ARCHITECTURE
-M: Arınç ÜNAL <arinc.unal@arinc9.com>
+M: Chester A. Unal <chester.a.unal@arinc9.com>
M: Sergio Paracuellos <sergio.paracuellos@gmail.com>
L: linux-mips@vger.kernel.org
S: Maintained
@@ -20910,6 +20910,8 @@ F: kernel/sched/
SCHEDULER - SCHED_EXT
R: Tejun Heo <tj@kernel.org>
R: David Vernet <void@manifault.com>
+R: Andrea Righi <arighi@nvidia.com>
+R: Changwoo Min <changwoo@igalia.com>
L: linux-kernel@vger.kernel.org
S: Maintained
W: https://github.com/sched-ext/scx
diff --git a/Makefile b/Makefile
index e5b8a8832c0c..7904d5d88088 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 13
SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc6
NAME = Baby Opossum Posse
# *DOCUMENTATION*
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index ea5a1dcb133b..4f2eeda907ec 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -6,6 +6,7 @@
config ARC
def_bool y
select ARC_TIMERS
+ select ARCH_HAS_CPU_CACHE_ALIASING
select ARCH_HAS_CACHE_LINE_SIZE
select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_DMA_PREP_COHERENT
diff --git a/arch/arc/include/asm/cachetype.h b/arch/arc/include/asm/cachetype.h
new file mode 100644
index 000000000000..acd3b6cb4bf5
--- /dev/null
+++ b/arch/arc/include/asm/cachetype.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_ARC_CACHETYPE_H
+#define __ASM_ARC_CACHETYPE_H
+
+#define cpu_dcache_is_aliasing() false
+#define cpu_icache_is_aliasing() true
+
+#endif
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index e4fe059cd861..dc47b2312127 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -6,6 +6,7 @@ menuconfig ARCH_MXC
select CLKSRC_IMX_GPT
select GENERIC_IRQ_CHIP
select GPIOLIB
+ select PINCTRL
select PM_OPP if PM
select SOC_BUS
select SRAM
diff --git a/arch/arm64/boot/dts/arm/fvp-base-revc.dts b/arch/arm64/boot/dts/arm/fvp-base-revc.dts
index 19973ab4ea6b..9e10d7a6b5a2 100644
--- a/arch/arm64/boot/dts/arm/fvp-base-revc.dts
+++ b/arch/arm64/boot/dts/arm/fvp-base-revc.dts
@@ -233,7 +233,7 @@
#interrupt-cells = <0x1>;
compatible = "pci-host-ecam-generic";
device_type = "pci";
- bus-range = <0x0 0x1>;
+ bus-range = <0x0 0xff>;
reg = <0x0 0x40000000 0x0 0x10000000>;
ranges = <0x2000000 0x0 0x50000000 0x0 0x50000000 0x0 0x10000000>;
interrupt-map = <0 0 0 1 &gic 0 0 GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/broadcom/bcm2712.dtsi b/arch/arm64/boot/dts/broadcom/bcm2712.dtsi
index 6e5a984c1d4e..26a29e5e5078 100644
--- a/arch/arm64/boot/dts/broadcom/bcm2712.dtsi
+++ b/arch/arm64/boot/dts/broadcom/bcm2712.dtsi
@@ -67,7 +67,7 @@
l2_cache_l0: l2-cache-l0 {
compatible = "cache";
cache-size = <0x80000>;
- cache-line-size = <128>;
+ cache-line-size = <64>;
cache-sets = <1024>; //512KiB(size)/64(line-size)=8192ways/8-way set
cache-level = <2>;
cache-unified;
@@ -91,7 +91,7 @@
l2_cache_l1: l2-cache-l1 {
compatible = "cache";
cache-size = <0x80000>;
- cache-line-size = <128>;
+ cache-line-size = <64>;
cache-sets = <1024>; //512KiB(size)/64(line-size)=8192ways/8-way set
cache-level = <2>;
cache-unified;
@@ -115,7 +115,7 @@
l2_cache_l2: l2-cache-l2 {
compatible = "cache";
cache-size = <0x80000>;
- cache-line-size = <128>;
+ cache-line-size = <64>;
cache-sets = <1024>; //512KiB(size)/64(line-size)=8192ways/8-way set
cache-level = <2>;
cache-unified;
@@ -139,7 +139,7 @@
l2_cache_l3: l2-cache-l3 {
compatible = "cache";
cache-size = <0x80000>;
- cache-line-size = <128>;
+ cache-line-size = <64>;
cache-sets = <1024>; //512KiB(size)/64(line-size)=8192ways/8-way set
cache-level = <2>;
cache-unified;
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 37e24f1bd227..99ea26d400ff 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -36,15 +36,8 @@
#include <asm/traps.h>
#include <asm/vdso.h>
-#ifdef CONFIG_ARM64_GCS
#define GCS_SIGNAL_CAP(addr) (((unsigned long)addr) & GCS_CAP_ADDR_MASK)
-static bool gcs_signal_cap_valid(u64 addr, u64 val)
-{
- return val == GCS_SIGNAL_CAP(addr);
-}
-#endif
-
/*
* Do a signal return; undo the signal stack. These are aligned to 128-bit.
*/
@@ -1062,8 +1055,7 @@ static int restore_sigframe(struct pt_regs *regs,
#ifdef CONFIG_ARM64_GCS
static int gcs_restore_signal(void)
{
- unsigned long __user *gcspr_el0;
- u64 cap;
+ u64 gcspr_el0, cap;
int ret;
if (!system_supports_gcs())
@@ -1072,7 +1064,7 @@ static int gcs_restore_signal(void)
if (!(current->thread.gcs_el0_mode & PR_SHADOW_STACK_ENABLE))
return 0;
- gcspr_el0 = (unsigned long __user *)read_sysreg_s(SYS_GCSPR_EL0);
+ gcspr_el0 = read_sysreg_s(SYS_GCSPR_EL0);
/*
* Ensure that any changes to the GCS done via GCS operations
@@ -1087,22 +1079,23 @@ static int gcs_restore_signal(void)
* then faults will be generated on GCS operations - the main
* concern is to protect GCS pages.
*/
- ret = copy_from_user(&cap, gcspr_el0, sizeof(cap));
+ ret = copy_from_user(&cap, (unsigned long __user *)gcspr_el0,
+ sizeof(cap));
if (ret)
return -EFAULT;
/*
* Check that the cap is the actual GCS before replacing it.
*/
- if (!gcs_signal_cap_valid((u64)gcspr_el0, cap))
+ if (cap != GCS_SIGNAL_CAP(gcspr_el0))
return -EINVAL;
/* Invalidate the token to prevent reuse */
- put_user_gcs(0, (__user void*)gcspr_el0, &ret);
+ put_user_gcs(0, (unsigned long __user *)gcspr_el0, &ret);
if (ret != 0)
return -EFAULT;
- write_sysreg_s(gcspr_el0 + 1, SYS_GCSPR_EL0);
+ write_sysreg_s(gcspr_el0 + 8, SYS_GCSPR_EL0);
return 0;
}
@@ -1421,7 +1414,7 @@ static int get_sigframe(struct rt_sigframe_user_layout *user,
static int gcs_signal_entry(__sigrestore_t sigtramp, struct ksignal *ksig)
{
- unsigned long __user *gcspr_el0;
+ u64 gcspr_el0;
int ret = 0;
if (!system_supports_gcs())
@@ -1434,18 +1427,20 @@ static int gcs_signal_entry(__sigrestore_t sigtramp, struct ksignal *ksig)
* We are entering a signal handler, current register state is
* active.
*/
- gcspr_el0 = (unsigned long __user *)read_sysreg_s(SYS_GCSPR_EL0);
+ gcspr_el0 = read_sysreg_s(SYS_GCSPR_EL0);
/*
* Push a cap and the GCS entry for the trampoline onto the GCS.
*/
- put_user_gcs((unsigned long)sigtramp, gcspr_el0 - 2, &ret);
- put_user_gcs(GCS_SIGNAL_CAP(gcspr_el0 - 1), gcspr_el0 - 1, &ret);
+ put_user_gcs((unsigned long)sigtramp,
+ (unsigned long __user *)(gcspr_el0 - 16), &ret);
+ put_user_gcs(GCS_SIGNAL_CAP(gcspr_el0 - 8),
+ (unsigned long __user *)(gcspr_el0 - 8), &ret);
if (ret != 0)
return ret;
- gcspr_el0 -= 2;
- write_sysreg_s((unsigned long)gcspr_el0, SYS_GCSPR_EL0);
+ gcspr_el0 -= 16;
+ write_sysreg_s(gcspr_el0, SYS_GCSPR_EL0);
return 0;
}
diff --git a/arch/hexagon/Makefile b/arch/hexagon/Makefile
index 92d005958dfb..ff172cbe5881 100644
--- a/arch/hexagon/Makefile
+++ b/arch/hexagon/Makefile
@@ -32,3 +32,9 @@ KBUILD_LDFLAGS += $(ldflags-y)
TIR_NAME := r19
KBUILD_CFLAGS += -ffixed-$(TIR_NAME) -DTHREADINFO_REG=$(TIR_NAME) -D__linux__
KBUILD_AFLAGS += -DTHREADINFO_REG=$(TIR_NAME)
+
+# Disable HexagonConstExtenders pass for LLVM versions prior to 19.1.0
+# https://github.com/llvm/llvm-project/issues/99714
+ifneq ($(call clang-min-version, 190100),y)
+KBUILD_CFLAGS += -mllvm -hexagon-cext=false
+endif
diff --git a/arch/nios2/kernel/cpuinfo.c b/arch/nios2/kernel/cpuinfo.c
index 338849c430a5..7b1e8f9128e9 100644
--- a/arch/nios2/kernel/cpuinfo.c
+++ b/arch/nios2/kernel/cpuinfo.c
@@ -143,11 +143,11 @@ static int show_cpuinfo(struct seq_file *m, void *v)
" DIV:\t\t%s\n"
" BMX:\t\t%s\n"
" CDX:\t\t%s\n",
- cpuinfo.has_mul ? "yes" : "no",
- cpuinfo.has_mulx ? "yes" : "no",
- cpuinfo.has_div ? "yes" : "no",
- cpuinfo.has_bmx ? "yes" : "no",
- cpuinfo.has_cdx ? "yes" : "no");
+ str_yes_no(cpuinfo.has_mul),
+ str_yes_no(cpuinfo.has_mulx),
+ str_yes_no(cpuinfo.has_div),
+ str_yes_no(cpuinfo.has_bmx),
+ str_yes_no(cpuinfo.has_cdx));
seq_printf(m,
"Icache:\t\t%ukB, line length: %u\n",
diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig
index 57ded82c2840..e8b3f67bf3f5 100644
--- a/arch/powerpc/configs/pmac32_defconfig
+++ b/arch/powerpc/configs/pmac32_defconfig
@@ -208,6 +208,7 @@ CONFIG_FB_ATY=y
CONFIG_FB_ATY_CT=y
CONFIG_FB_ATY_GX=y
CONFIG_FB_3DFX=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index 4d77e17541e9..ca0c90e95837 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -716,6 +716,7 @@ CONFIG_FB_TRIDENT=m
CONFIG_FB_SM501=m
CONFIG_FB_IBM_GXT4500=y
CONFIG_LCD_PLATFORM=m
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
CONFIG_LOGO=y
diff --git a/arch/powerpc/platforms/book3s/vas-api.c b/arch/powerpc/platforms/book3s/vas-api.c
index f381b177ea06..0b6365d85d11 100644
--- a/arch/powerpc/platforms/book3s/vas-api.c
+++ b/arch/powerpc/platforms/book3s/vas-api.c
@@ -464,7 +464,43 @@ static vm_fault_t vas_mmap_fault(struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
+/*
+ * During mmap() paste address, mapping VMA is saved in VAS window
+ * struct which is used to unmap during migration if the window is
+ * still open. But the user space can remove this mapping with
+ * munmap() before closing the window and the VMA address will
+ * be invalid. Set VAS window VMA to NULL in this function which
+ * is called before VMA free.
+ */
+static void vas_mmap_close(struct vm_area_struct *vma)
+{
+ struct file *fp = vma->vm_file;
+ struct coproc_instance *cp_inst = fp->private_data;
+ struct vas_window *txwin;
+
+ /* Should not happen */
+ if (!cp_inst || !cp_inst->txwin) {
+ pr_err("No attached VAS window for the paste address mmap\n");
+ return;
+ }
+
+ txwin = cp_inst->txwin;
+ /*
+ * task_ref.vma is set in coproc_mmap() during mmap paste
+ * address. So it has to be the same VMA that is getting freed.
+ */
+ if (WARN_ON(txwin->task_ref.vma != vma)) {
+ pr_err("Invalid paste address mmaping\n");
+ return;
+ }
+
+ mutex_lock(&txwin->task_ref.mmap_mutex);
+ txwin->task_ref.vma = NULL;
+ mutex_unlock(&txwin->task_ref.mmap_mutex);
+}
+
static const struct vm_operations_struct vas_vm_ops = {
+ .close = vas_mmap_close,
.fault = vas_mmap_fault,
};
diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
index abe6e6c0ab98..6087d38c7235 100644
--- a/arch/s390/boot/startup.c
+++ b/arch/s390/boot/startup.c
@@ -234,6 +234,8 @@ static unsigned long get_vmem_size(unsigned long identity_size,
vsize = round_up(SZ_2G + max_mappable, rte_size) +
round_up(vmemmap_size, rte_size) +
FIXMAP_SIZE + MODULES_LEN + KASLR_LEN;
+ if (IS_ENABLED(CONFIG_KMSAN))
+ vsize += MODULES_LEN * 2;
return size_add(vsize, vmalloc_size);
}
diff --git a/arch/s390/boot/vmem.c b/arch/s390/boot/vmem.c
index 145035f84a0e..3fa28db2fe59 100644
--- a/arch/s390/boot/vmem.c
+++ b/arch/s390/boot/vmem.c
@@ -306,7 +306,7 @@ static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long e
pages++;
}
}
- if (mode == POPULATE_DIRECT)
+ if (mode == POPULATE_IDENTITY)
update_page_count(PG_DIRECT_MAP_4K, pages);
}
@@ -339,7 +339,7 @@ static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long e
}
pgtable_pte_populate(pmd, addr, next, mode);
}
- if (mode == POPULATE_DIRECT)
+ if (mode == POPULATE_IDENTITY)
update_page_count(PG_DIRECT_MAP_1M, pages);
}
@@ -372,7 +372,7 @@ static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long e
}
pgtable_pmd_populate(pud, addr, next, mode);
}
- if (mode == POPULATE_DIRECT)
+ if (mode == POPULATE_IDENTITY)
update_page_count(PG_DIRECT_MAP_2G, pages);
}
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index edbb52ce3f1e..7d12a1305fc9 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -270,7 +270,7 @@ static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \
if (len >= sizeof(_value)) \
return -E2BIG; \
len = strscpy(_value, buf, sizeof(_value)); \
- if (len < 0) \
+ if ((ssize_t)len < 0) \
return len; \
strim(_value); \
return len; \
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 2e1e26846050..99c590da0ae2 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -429,6 +429,16 @@ static struct event_constraint intel_lnc_event_constraints[] = {
EVENT_CONSTRAINT_END
};
+static struct extra_reg intel_lnc_extra_regs[] __read_mostly = {
+ INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0xfffffffffffull, RSP_0),
+ INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0xfffffffffffull, RSP_1),
+ INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
+ INTEL_UEVENT_EXTRA_REG(0x02c6, MSR_PEBS_FRONTEND, 0x9, FE),
+ INTEL_UEVENT_EXTRA_REG(0x03c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE),
+ INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0xf, FE),
+ INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
+ EVENT_EXTRA_END
+};
EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
@@ -6422,7 +6432,7 @@ static __always_inline void intel_pmu_init_lnc(struct pmu *pmu)
intel_pmu_init_glc(pmu);
hybrid(pmu, event_constraints) = intel_lnc_event_constraints;
hybrid(pmu, pebs_constraints) = intel_lnc_pebs_event_constraints;
- hybrid(pmu, extra_regs) = intel_rwc_extra_regs;
+ hybrid(pmu, extra_regs) = intel_lnc_extra_regs;
}
static __always_inline void intel_pmu_init_skt(struct pmu *pmu)
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 1a4b326ca2ce..6ba6549f26fa 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -2517,6 +2517,7 @@ void __init intel_ds_init(void)
x86_pmu.large_pebs_flags |= PERF_SAMPLE_TIME;
break;
+ case 6:
case 5:
x86_pmu.pebs_ept = 1;
fallthrough;
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index d98fac567684..e7aba7349231 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -1910,6 +1910,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {
X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, &adl_uncore_init),
X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, &gnr_uncore_init),
X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, &gnr_uncore_init),
+ X86_MATCH_VFM(INTEL_ATOM_DARKMONT_X, &gnr_uncore_init),
{},
};
MODULE_DEVICE_TABLE(x86cpu, intel_uncore_match);
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 17b6590748c0..645aa360628d 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -452,6 +452,7 @@
#define X86_FEATURE_SME_COHERENT (19*32+10) /* AMD hardware-enforced cache coherency */
#define X86_FEATURE_DEBUG_SWAP (19*32+14) /* "debug_swap" AMD SEV-ES full debug state swap support */
#define X86_FEATURE_SVSM (19*32+28) /* "svsm" SVSM present */
+#define X86_FEATURE_HV_INUSE_WR_ALLOWED (19*32+30) /* Allow Write to in-use hypervisor-owned pages */
/* AMD-defined Extended Feature 2 EAX, CPUID level 0x80000021 (EAX), word 20 */
#define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* No Nested Data Breakpoints */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index c0975815980c..20e6009381ed 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -230,6 +230,8 @@ static inline unsigned long long l1tf_pfn_limit(void)
return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
}
+void init_cpu_devs(void);
+void get_cpu_vendor(struct cpuinfo_x86 *c);
extern void early_cpu_init(void);
extern void identify_secondary_cpu(struct cpuinfo_x86 *);
extern void print_cpu_info(struct cpuinfo_x86 *);
diff --git a/arch/x86/include/asm/static_call.h b/arch/x86/include/asm/static_call.h
index 125c407e2abe..41502bd2afd6 100644
--- a/arch/x86/include/asm/static_call.h
+++ b/arch/x86/include/asm/static_call.h
@@ -65,4 +65,19 @@
extern bool __static_call_fixup(void *tramp, u8 op, void *dest);
+extern void __static_call_update_early(void *tramp, void *func);
+
+#define static_call_update_early(name, _func) \
+({ \
+ typeof(&STATIC_CALL_TRAMP(name)) __F = (_func); \
+ if (static_call_initialized) { \
+ __static_call_update(&STATIC_CALL_KEY(name), \
+ STATIC_CALL_TRAMP_ADDR(name), __F);\
+ } else { \
+ WRITE_ONCE(STATIC_CALL_KEY(name).func, _func); \
+ __static_call_update_early(STATIC_CALL_TRAMP_ADDR(name),\
+ __F); \
+ } \
+})
+
#endif /* _ASM_STATIC_CALL_H */
diff --git a/arch/x86/include/asm/sync_core.h b/arch/x86/include/asm/sync_core.h
index ab7382f92aff..96bda43538ee 100644
--- a/arch/x86/include/asm/sync_core.h
+++ b/arch/x86/include/asm/sync_core.h
@@ -8,7 +8,7 @@
#include <asm/special_insns.h>
#ifdef CONFIG_X86_32
-static inline void iret_to_self(void)
+static __always_inline void iret_to_self(void)
{
asm volatile (
"pushfl\n\t"
@@ -19,7 +19,7 @@ static inline void iret_to_self(void)
: ASM_CALL_CONSTRAINT : : "memory");
}
#else
-static inline void iret_to_self(void)
+static __always_inline void iret_to_self(void)
{
unsigned int tmp;
@@ -55,7 +55,7 @@ static inline void iret_to_self(void)
* Like all of Linux's memory ordering operations, this is a
* compiler barrier as well.
*/
-static inline void sync_core(void)
+static __always_inline void sync_core(void)
{
/*
* The SERIALIZE instruction is the most straightforward way to
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index a2dd24947eb8..97771b9d33af 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -39,9 +39,11 @@
#include <linux/string.h>
#include <linux/types.h>
#include <linux/pgtable.h>
+#include <linux/instrumentation.h>
#include <trace/events/xen.h>
+#include <asm/alternative.h>
#include <asm/page.h>
#include <asm/smap.h>
#include <asm/nospec-branch.h>
@@ -86,11 +88,20 @@ struct xen_dm_op_buf;
* there aren't more than 5 arguments...)
*/
-extern struct { char _entry[32]; } hypercall_page[];
+void xen_hypercall_func(void);
+DECLARE_STATIC_CALL(xen_hypercall, xen_hypercall_func);
-#define __HYPERCALL "call hypercall_page+%c[offset]"
-#define __HYPERCALL_ENTRY(x) \
- [offset] "i" (__HYPERVISOR_##x * sizeof(hypercall_page[0]))
+#ifdef MODULE
+#define __ADDRESSABLE_xen_hypercall
+#else
+#define __ADDRESSABLE_xen_hypercall __ADDRESSABLE_ASM_STR(__SCK__xen_hypercall)
+#endif
+
+#define __HYPERCALL \
+ __ADDRESSABLE_xen_hypercall \
+ "call __SCT__xen_hypercall"
+
+#define __HYPERCALL_ENTRY(x) "a" (x)
#ifdef CONFIG_X86_32
#define __HYPERCALL_RETREG "eax"
@@ -148,7 +159,7 @@ extern struct { char _entry[32]; } hypercall_page[];
__HYPERCALL_0ARG(); \
asm volatile (__HYPERCALL \
: __HYPERCALL_0PARAM \
- : __HYPERCALL_ENTRY(name) \
+ : __HYPERCALL_ENTRY(__HYPERVISOR_ ## name) \
: __HYPERCALL_CLOBBER0); \
(type)__res; \
})
@@ -159,7 +170,7 @@ extern struct { char _entry[32]; } hypercall_page[];
__HYPERCALL_1ARG(a1); \
asm volatile (__HYPERCALL \
: __HYPERCALL_1PARAM \
- : __HYPERCALL_ENTRY(name) \
+ : __HYPERCALL_ENTRY(__HYPERVISOR_ ## name) \
: __HYPERCALL_CLOBBER1); \
(type)__res; \
})
@@ -170,7 +181,7 @@ extern struct { char _entry[32]; } hypercall_page[];
__HYPERCALL_2ARG(a1, a2); \
asm volatile (__HYPERCALL \
: __HYPERCALL_2PARAM \
- : __HYPERCALL_ENTRY(name) \
+ : __HYPERCALL_ENTRY(__HYPERVISOR_ ## name) \
: __HYPERCALL_CLOBBER2); \
(type)__res; \
})
@@ -181,7 +192,7 @@ extern struct { char _entry[32]; } hypercall_page[];
__HYPERCALL_3ARG(a1, a2, a3); \
asm volatile (__HYPERCALL \
: __HYPERCALL_3PARAM \
- : __HYPERCALL_ENTRY(name) \
+ : __HYPERCALL_ENTRY(__HYPERVISOR_ ## name) \
: __HYPERCALL_CLOBBER3); \
(type)__res; \
})
@@ -192,7 +203,7 @@ extern struct { char _entry[32]; } hypercall_page[];
__HYPERCALL_4ARG(a1, a2, a3, a4); \
asm volatile (__HYPERCALL \
: __HYPERCALL_4PARAM \
- : __HYPERCALL_ENTRY(name) \
+ : __HYPERCALL_ENTRY(__HYPERVISOR_ ## name) \
: __HYPERCALL_CLOBBER4); \
(type)__res; \
})
@@ -206,12 +217,9 @@ xen_single_call(unsigned int call,
__HYPERCALL_DECLS;
__HYPERCALL_5ARG(a1, a2, a3, a4, a5);
- if (call >= PAGE_SIZE / sizeof(hypercall_page[0]))
- return -EINVAL;
-
- asm volatile(CALL_NOSPEC
+ asm volatile(__HYPERCALL
: __HYPERCALL_5PARAM
- : [thunk_target] "a" (&hypercall_page[call])
+ : __HYPERCALL_ENTRY(call)
: __HYPERCALL_CLOBBER5);
return (long)__res;
diff --git a/arch/x86/kernel/callthunks.c b/arch/x86/kernel/callthunks.c
index 465647456753..f17d16607882 100644
--- a/arch/x86/kernel/callthunks.c
+++ b/arch/x86/kernel/callthunks.c
@@ -143,11 +143,6 @@ static bool skip_addr(void *dest)
dest < (void*)relocate_kernel + KEXEC_CONTROL_CODE_MAX_SIZE)
return true;
#endif
-#ifdef CONFIG_XEN
- if (dest >= (void *)hypercall_page &&
- dest < (void*)hypercall_page + PAGE_SIZE)
- return true;
-#endif
return false;
}
diff --git a/arch/x86/kernel/cet.c b/arch/x86/kernel/cet.c
index d2c732a34e5d..303bf74d175b 100644
--- a/arch/x86/kernel/cet.c
+++ b/arch/x86/kernel/cet.c
@@ -81,6 +81,34 @@ static void do_user_cp_fault(struct pt_regs *regs, unsigned long error_code)
static __ro_after_init bool ibt_fatal = true;
+/*
+ * By definition, all missing-ENDBRANCH #CPs are a result of WFE && !ENDBR.
+ *
+ * For the kernel IBT no ENDBR selftest where #CPs are deliberately triggered,
+ * the WFE state of the interrupted context needs to be cleared to let execution
+ * continue. Otherwise when the CPU resumes from the instruction that just
+ * caused the previous #CP, another missing-ENDBRANCH #CP is raised and the CPU
+ * enters a dead loop.
+ *
+ * This is not a problem with IDT because it doesn't preserve WFE and IRET doesn't
+ * set WFE. But FRED provides space on the entry stack (in an expanded CS area)
+ * to save and restore the WFE state, thus the WFE state is no longer clobbered,
+ * so software must clear it.
+ */
+static void ibt_clear_fred_wfe(struct pt_regs *regs)
+{
+ /*
+ * No need to do any FRED checks.
+ *
+ * For IDT event delivery, the high-order 48 bits of CS are pushed
+ * as 0s into the stack, and later IRET ignores these bits.
+ *
+ * For FRED, a test to check if fred_cs.wfe is set would be dropped
+ * by compilers.
+ */
+ regs->fred_cs.wfe = 0;
+}
+
static void do_kernel_cp_fault(struct pt_regs *regs, unsigned long error_code)
{
if ((error_code & CP_EC) != CP_ENDBR) {
@@ -90,6 +118,7 @@ static void do_kernel_cp_fault(struct pt_regs *regs, unsigned long error_code)
if (unlikely(regs->ip == (unsigned long)&ibt_selftest_noendbr)) {
regs->ax = 0;
+ ibt_clear_fred_wfe(regs);
return;
}
@@ -97,6 +126,7 @@ static void do_kernel_cp_fault(struct pt_regs *regs, unsigned long error_code)
if (!ibt_fatal) {
printk(KERN_DEFAULT CUT_HERE);
__warn(__FILE__, __LINE__, (void *)regs->ip, TAINT_WARN, regs, NULL);
+ ibt_clear_fred_wfe(regs);
return;
}
BUG();
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index a5c28975c608..3e9037690814 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -867,7 +867,7 @@ static void cpu_detect_tlb(struct cpuinfo_x86 *c)
tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]);
}
-static void get_cpu_vendor(struct cpuinfo_x86 *c)
+void get_cpu_vendor(struct cpuinfo_x86 *c)
{
char *v = c->x86_vendor_id;
int i;
@@ -1649,15 +1649,11 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
detect_nopl();
}
-void __init early_cpu_init(void)
+void __init init_cpu_devs(void)
{
const struct cpu_dev *const *cdev;
int count = 0;
-#ifdef CONFIG_PROCESSOR_SELECT
- pr_info("KERNEL supported cpus:\n");
-#endif
-
for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
const struct cpu_dev *cpudev = *cdev;
@@ -1665,20 +1661,30 @@ void __init early_cpu_init(void)
break;
cpu_devs[count] = cpudev;
count++;
+ }
+}
+void __init early_cpu_init(void)
+{
#ifdef CONFIG_PROCESSOR_SELECT
- {
- unsigned int j;
-
- for (j = 0; j < 2; j++) {
- if (!cpudev->c_ident[j])
- continue;
- pr_info(" %s %s\n", cpudev->c_vendor,
- cpudev->c_ident[j]);
- }
- }
+ unsigned int i, j;
+
+ pr_info("KERNEL supported cpus:\n");
#endif
+
+ init_cpu_devs();
+
+#ifdef CONFIG_PROCESSOR_SELECT
+ for (i = 0; i < X86_VENDOR_NUM && cpu_devs[i]; i++) {
+ for (j = 0; j < 2; j++) {
+ if (!cpu_devs[i]->c_ident[j])
+ continue;
+ pr_info(" %s %s\n", cpu_devs[i]->c_vendor,
+ cpu_devs[i]->c_ident[j]);
+ }
}
+#endif
+
early_identify_cpu(&boot_cpu_data);
}
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index d18078834ded..dc12fe5ef3ca 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -223,6 +223,63 @@ static void hv_machine_crash_shutdown(struct pt_regs *regs)
hyperv_cleanup();
}
#endif /* CONFIG_CRASH_DUMP */
+
+static u64 hv_ref_counter_at_suspend;
+static void (*old_save_sched_clock_state)(void);
+static void (*old_restore_sched_clock_state)(void);
+
+/*
+ * Hyper-V clock counter resets during hibernation. Save and restore clock
+ * offset during suspend/resume, while also considering the time passed
+ * before suspend. This is to make sure that sched_clock using hv tsc page
+ * based clocksource, proceeds from where it left off during suspend and
+ * it shows correct time for the timestamps of kernel messages after resume.
+ */
+static void save_hv_clock_tsc_state(void)
+{
+ hv_ref_counter_at_suspend = hv_read_reference_counter();
+}
+
+static void restore_hv_clock_tsc_state(void)
+{
+ /*
+ * Adjust the offsets used by hv tsc clocksource to
+ * account for the time spent before hibernation.
+ * adjusted value = reference counter (time) at suspend
+ * - reference counter (time) now.
+ */
+ hv_adj_sched_clock_offset(hv_ref_counter_at_suspend - hv_read_reference_counter());
+}
+
+/*
+ * Functions to override save_sched_clock_state and restore_sched_clock_state
+ * functions of x86_platform. The Hyper-V clock counter is reset during
+ * suspend-resume and the offset used to measure time needs to be
+ * corrected, post resume.
+ */
+static void hv_save_sched_clock_state(void)
+{
+ old_save_sched_clock_state();
+ save_hv_clock_tsc_state();
+}
+
+static void hv_restore_sched_clock_state(void)
+{
+ restore_hv_clock_tsc_state();
+ old_restore_sched_clock_state();
+}
+
+static void __init x86_setup_ops_for_tsc_pg_clock(void)
+{
+ if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE))
+ return;
+
+ old_save_sched_clock_state = x86_platform.save_sched_clock_state;
+ x86_platform.save_sched_clock_state = hv_save_sched_clock_state;
+
+ old_restore_sched_clock_state = x86_platform.restore_sched_clock_state;
+ x86_platform.restore_sched_clock_state = hv_restore_sched_clock_state;
+}
#endif /* CONFIG_HYPERV */
static uint32_t __init ms_hyperv_platform(void)
@@ -579,6 +636,7 @@ static void __init ms_hyperv_init_platform(void)
/* Register Hyper-V specific clocksource */
hv_init_clocksource();
+ x86_setup_ops_for_tsc_pg_clock();
hv_vtl_init_platform();
#endif
/*
diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c
index 4eefaac64c6c..9eed0c144dad 100644
--- a/arch/x86/kernel/static_call.c
+++ b/arch/x86/kernel/static_call.c
@@ -172,6 +172,15 @@ void arch_static_call_transform(void *site, void *tramp, void *func, bool tail)
}
EXPORT_SYMBOL_GPL(arch_static_call_transform);
+noinstr void __static_call_update_early(void *tramp, void *func)
+{
+ BUG_ON(system_state != SYSTEM_BOOTING);
+ BUG_ON(!early_boot_irqs_disabled);
+ BUG_ON(static_call_initialized);
+ __text_gen_insn(tramp, JMP32_INSN_OPCODE, tramp, func, JMP32_INSN_SIZE);
+ sync_core();
+}
+
#ifdef CONFIG_MITIGATION_RETHUNK
/*
* This is called by apply_returns() to fix up static call trampolines,
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index fab3ac9a4574..6a17396c8174 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -519,14 +519,10 @@ INIT_PER_CPU(irq_stack_backing_store);
* linker will never mark as relocatable. (Using just ABSOLUTE() is not
* sufficient for that).
*/
-#ifdef CONFIG_XEN
#ifdef CONFIG_XEN_PV
xen_elfnote_entry_value =
ABSOLUTE(xen_elfnote_entry) + ABSOLUTE(startup_xen);
#endif
-xen_elfnote_hypercall_page_value =
- ABSOLUTE(xen_elfnote_hypercall_page) + ABSOLUTE(hypercall_page);
-#endif
#ifdef CONFIG_PVH
xen_elfnote_phys32_entry_value =
ABSOLUTE(xen_elfnote_phys32_entry) + ABSOLUTE(pvh_start_xen - LOAD_OFFSET);
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 22e7ad235123..2401606db260 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3364,18 +3364,6 @@ static bool fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu,
return true;
}
-static bool is_access_allowed(struct kvm_page_fault *fault, u64 spte)
-{
- if (fault->exec)
- return is_executable_pte(spte);
-
- if (fault->write)
- return is_writable_pte(spte);
-
- /* Fault was on Read access */
- return spte & PT_PRESENT_MASK;
-}
-
/*
* Returns the last level spte pointer of the shadow page walk for the given
* gpa, and sets *spte to the spte value. This spte may be non-preset. If no
diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
index f332b33bc817..af10bc0380a3 100644
--- a/arch/x86/kvm/mmu/spte.h
+++ b/arch/x86/kvm/mmu/spte.h
@@ -462,6 +462,23 @@ static inline bool is_mmu_writable_spte(u64 spte)
}
/*
+ * Returns true if the access indicated by @fault is allowed by the existing
+ * SPTE protections. Note, the caller is responsible for checking that the
+ * SPTE is a shadow-present, leaf SPTE (either before or after).
+ */
+static inline bool is_access_allowed(struct kvm_page_fault *fault, u64 spte)
+{
+ if (fault->exec)
+ return is_executable_pte(spte);
+
+ if (fault->write)
+ return is_writable_pte(spte);
+
+ /* Fault was on Read access */
+ return spte & PT_PRESENT_MASK;
+}
+
+/*
* If the MMU-writable flag is cleared, i.e. the SPTE is write-protected for
* write-tracking, remote TLBs must be flushed, even if the SPTE was read-only,
* as KVM allows stale Writable TLB entries to exist. When dirty logging, KVM
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 4508d868f1cd..2f15e0e33903 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -985,6 +985,11 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
if (fault->prefetch && is_shadow_present_pte(iter->old_spte))
return RET_PF_SPURIOUS;
+ if (is_shadow_present_pte(iter->old_spte) &&
+ is_access_allowed(fault, iter->old_spte) &&
+ is_last_spte(iter->old_spte, iter->level))
+ return RET_PF_SPURIOUS;
+
if (unlikely(!fault->slot))
new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
else
diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index 4b74ea91f4e6..65fd245a9953 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -1199,6 +1199,12 @@ bool avic_hardware_setup(void)
return false;
}
+ if (cc_platform_has(CC_ATTR_HOST_SEV_SNP) &&
+ !boot_cpu_has(X86_FEATURE_HV_INUSE_WR_ALLOWED)) {
+ pr_warn("AVIC disabled: missing HvInUseWrAllowed on SNP-enabled system\n");
+ return false;
+ }
+
if (boot_cpu_has(X86_FEATURE_AVIC)) {
pr_info("AVIC enabled\n");
} else if (force_avic) {
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index dd15cc635655..21dacd312779 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -3201,15 +3201,6 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
if (data & ~supported_de_cfg)
return 1;
- /*
- * Don't let the guest change the host-programmed value. The
- * MSR is very model specific, i.e. contains multiple bits that
- * are completely unknown to KVM, and the one bit known to KVM
- * is simply a reflection of hardware capabilities.
- */
- if (!msr->host_initiated && data != svm->msr_decfg)
- return 1;
-
svm->msr_decfg = data;
break;
}
diff --git a/arch/x86/kvm/vmx/posted_intr.h b/arch/x86/kvm/vmx/posted_intr.h
index 1715d2ab07be..ad9116a99bcc 100644
--- a/arch/x86/kvm/vmx/posted_intr.h
+++ b/arch/x86/kvm/vmx/posted_intr.h
@@ -2,7 +2,7 @@
#ifndef __KVM_X86_VMX_POSTED_INTR_H
#define __KVM_X86_VMX_POSTED_INTR_H
-#include <linux/find.h>
+#include <linux/bitmap.h>
#include <asm/posted_intr.h>
void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c8160baf3838..c79a8cc57ba4 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -9976,7 +9976,7 @@ static int complete_hypercall_exit(struct kvm_vcpu *vcpu)
{
u64 ret = vcpu->run->hypercall.ret;
- if (!is_64_bit_mode(vcpu))
+ if (!is_64_bit_hypercall(vcpu))
ret = (u32)ret;
kvm_rax_write(vcpu, ret);
++vcpu->stat.hypercalls;
@@ -12724,6 +12724,13 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm_hv_init_vm(kvm);
kvm_xen_init_vm(kvm);
+ if (ignore_msrs && !report_ignored_msrs) {
+ pr_warn_once("Running KVM with ignore_msrs=1 and report_ignored_msrs=0 is not a\n"
+ "a supported configuration. Lying to the guest about the existence of MSRs\n"
+ "may cause the guest operating system to hang or produce errors. If a guest\n"
+ "does not run without ignore_msrs=1, please report it to kvm@vger.kernel.org.\n");
+ }
+
return 0;
out_uninit_mmu:
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 84e5adbd0925..43dcd8c7badc 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -2,6 +2,7 @@
#include <linux/console.h>
#include <linux/cpu.h>
+#include <linux/instrumentation.h>
#include <linux/kexec.h>
#include <linux/memblock.h>
#include <linux/slab.h>
@@ -21,7 +22,8 @@
#include "xen-ops.h"
-EXPORT_SYMBOL_GPL(hypercall_page);
+DEFINE_STATIC_CALL(xen_hypercall, xen_hypercall_hvm);
+EXPORT_STATIC_CALL_TRAMP(xen_hypercall);
/*
* Pointer to the xen_vcpu_info structure or
@@ -68,6 +70,67 @@ EXPORT_SYMBOL(xen_start_flags);
*/
struct shared_info *HYPERVISOR_shared_info = &xen_dummy_shared_info;
+static __ref void xen_get_vendor(void)
+{
+ init_cpu_devs();
+ cpu_detect(&boot_cpu_data);
+ get_cpu_vendor(&boot_cpu_data);
+}
+
+void xen_hypercall_setfunc(void)
+{
+ if (static_call_query(xen_hypercall) != xen_hypercall_hvm)
+ return;
+
+ if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
+ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON))
+ static_call_update(xen_hypercall, xen_hypercall_amd);
+ else
+ static_call_update(xen_hypercall, xen_hypercall_intel);
+}
+
+/*
+ * Evaluate processor vendor in order to select the correct hypercall
+ * function for HVM/PVH guests.
+ * Might be called very early in boot before vendor has been set by
+ * early_cpu_init().
+ */
+noinstr void *__xen_hypercall_setfunc(void)
+{
+ void (*func)(void);
+
+ /*
+ * Xen is supported only on CPUs with CPUID, so testing for
+ * X86_FEATURE_CPUID is a test for early_cpu_init() having been
+ * run.
+ *
+ * Note that __xen_hypercall_setfunc() is noinstr only due to a nasty
+ * dependency chain: it is being called via the xen_hypercall static
+ * call when running as a PVH or HVM guest. Hypercalls need to be
+ * noinstr due to PV guests using hypercalls in noinstr code. So we
+ * can safely tag the function body as "instrumentation ok", since
+ * the PV guest requirement is not of interest here (xen_get_vendor()
+ * calls noinstr functions, and static_call_update_early() might do
+ * so, too).
+ */
+ instrumentation_begin();
+
+ if (!boot_cpu_has(X86_FEATURE_CPUID))
+ xen_get_vendor();
+
+ if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
+ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON))
+ func = xen_hypercall_amd;
+ else
+ func = xen_hypercall_intel;
+
+ static_call_update_early(xen_hypercall, func);
+
+ instrumentation_end();
+
+ return func;
+}
+
static int xen_cpu_up_online(unsigned int cpu)
{
xen_init_lock_cpu(cpu);
diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c
index 24d2957a4726..fe57ff85d004 100644
--- a/arch/x86/xen/enlighten_hvm.c
+++ b/arch/x86/xen/enlighten_hvm.c
@@ -106,15 +106,8 @@ static void __init init_hvm_pv_info(void)
/* PVH set up hypercall page in xen_prepare_pvh(). */
if (xen_pvh_domain())
pv_info.name = "Xen PVH";
- else {
- u64 pfn;
- uint32_t msr;
-
+ else
pv_info.name = "Xen HVM";
- msr = cpuid_ebx(base + 2);
- pfn = __pa(hypercall_page);
- wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
- }
xen_setup_features();
@@ -300,6 +293,10 @@ static uint32_t __init xen_platform_hvm(void)
if (xen_pv_domain())
return 0;
+ /* Set correct hypercall function. */
+ if (xen_domain)
+ xen_hypercall_setfunc();
+
if (xen_pvh_domain() && nopv) {
/* Guest booting via the Xen-PVH boot entry goes here */
pr_info("\"nopv\" parameter is ignored in PVH guest\n");
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index d6818c6cafda..a8eb7e0c473c 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -1341,6 +1341,9 @@ asmlinkage __visible void __init xen_start_kernel(struct start_info *si)
xen_domain_type = XEN_PV_DOMAIN;
xen_start_flags = xen_start_info->flags;
+ /* Interrupts are guaranteed to be off initially. */
+ early_boot_irqs_disabled = true;
+ static_call_update_early(xen_hypercall, xen_hypercall_pv);
xen_setup_features();
@@ -1431,7 +1434,6 @@ asmlinkage __visible void __init xen_start_kernel(struct start_info *si)
WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_pv, xen_cpu_dead_pv));
local_irq_disable();
- early_boot_irqs_disabled = true;
xen_raw_console_write("mapping kernel into physical memory\n");
xen_setup_kernel_pagetable((pgd_t *)xen_start_info->pt_base,
diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c
index bf68c329fc01..0e3d930bcb89 100644
--- a/arch/x86/xen/enlighten_pvh.c
+++ b/arch/x86/xen/enlighten_pvh.c
@@ -129,17 +129,10 @@ static void __init pvh_arch_setup(void)
void __init xen_pvh_init(struct boot_params *boot_params)
{
- u32 msr;
- u64 pfn;
-
xen_pvh = 1;
xen_domain_type = XEN_HVM_DOMAIN;
xen_start_flags = pvh_start_info.flags;
- msr = cpuid_ebx(xen_cpuid_base() + 2);
- pfn = __pa(hypercall_page);
- wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
-
x86_init.oem.arch_setup = pvh_arch_setup;
x86_init.oem.banner = xen_banner;
diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S
index 83189cf5cdce..b518f36d1ca2 100644
--- a/arch/x86/xen/xen-asm.S
+++ b/arch/x86/xen/xen-asm.S
@@ -20,10 +20,33 @@
#include <linux/init.h>
#include <linux/linkage.h>
+#include <linux/objtool.h>
#include <../entry/calling.h>
.pushsection .noinstr.text, "ax"
/*
+ * PV hypercall interface to the hypervisor.
+ *
+ * Called via inline asm(), so better preserve %rcx and %r11.
+ *
+ * Input:
+ * %eax: hypercall number
+ * %rdi, %rsi, %rdx, %r10, %r8: args 1..5 for the hypercall
+ * Output: %rax
+ */
+SYM_FUNC_START(xen_hypercall_pv)
+ ANNOTATE_NOENDBR
+ push %rcx
+ push %r11
+ UNWIND_HINT_SAVE
+ syscall
+ UNWIND_HINT_RESTORE
+ pop %r11
+ pop %rcx
+ RET
+SYM_FUNC_END(xen_hypercall_pv)
+
+/*
* Disabling events is simply a matter of making the event mask
* non-zero.
*/
@@ -176,7 +199,6 @@ SYM_CODE_START(xen_early_idt_handler_array)
SYM_CODE_END(xen_early_idt_handler_array)
__FINIT
-hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
/*
* Xen64 iret frame:
*
@@ -186,17 +208,28 @@ hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
* cs
* rip <-- standard iret frame
*
- * flags
+ * flags <-- xen_iret must push from here on
*
- * rcx }
- * r11 }<-- pushed by hypercall page
- * rsp->rax }
+ * rcx
+ * r11
+ * rsp->rax
*/
+.macro xen_hypercall_iret
+ pushq $0 /* Flags */
+ push %rcx
+ push %r11
+ push %rax
+ mov $__HYPERVISOR_iret, %eax
+ syscall /* Do the IRET. */
+#ifdef CONFIG_MITIGATION_SLS
+ int3
+#endif
+.endm
+
SYM_CODE_START(xen_iret)
UNWIND_HINT_UNDEFINED
ANNOTATE_NOENDBR
- pushq $0
- jmp hypercall_iret
+ xen_hypercall_iret
SYM_CODE_END(xen_iret)
/*
@@ -301,8 +334,7 @@ SYM_CODE_START(xen_entry_SYSENTER_compat)
ENDBR
lea 16(%rsp), %rsp /* strip %rcx, %r11 */
mov $-ENOSYS, %rax
- pushq $0
- jmp hypercall_iret
+ xen_hypercall_iret
SYM_CODE_END(xen_entry_SYSENTER_compat)
SYM_CODE_END(xen_entry_SYSCALL_compat)
diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
index 7f6c69dbb816..9252652afe59 100644
--- a/arch/x86/xen/xen-head.S
+++ b/arch/x86/xen/xen-head.S
@@ -6,9 +6,11 @@
#include <linux/elfnote.h>
#include <linux/init.h>
+#include <linux/instrumentation.h>
#include <asm/boot.h>
#include <asm/asm.h>
+#include <asm/frame.h>
#include <asm/msr.h>
#include <asm/page_types.h>
#include <asm/percpu.h>
@@ -20,28 +22,6 @@
#include <xen/interface/xen-mca.h>
#include <asm/xen/interface.h>
-.pushsection .noinstr.text, "ax"
- .balign PAGE_SIZE
-SYM_CODE_START(hypercall_page)
- .rept (PAGE_SIZE / 32)
- UNWIND_HINT_FUNC
- ANNOTATE_NOENDBR
- ANNOTATE_UNRET_SAFE
- ret
- /*
- * Xen will write the hypercall page, and sort out ENDBR.
- */
- .skip 31, 0xcc
- .endr
-
-#define HYPERCALL(n) \
- .equ xen_hypercall_##n, hypercall_page + __HYPERVISOR_##n * 32; \
- .type xen_hypercall_##n, @function; .size xen_hypercall_##n, 32
-#include <asm/xen-hypercalls.h>
-#undef HYPERCALL
-SYM_CODE_END(hypercall_page)
-.popsection
-
#ifdef CONFIG_XEN_PV
__INIT
SYM_CODE_START(startup_xen)
@@ -87,6 +67,87 @@ SYM_CODE_END(xen_cpu_bringup_again)
#endif
#endif
+ .pushsection .noinstr.text, "ax"
+/*
+ * Xen hypercall interface to the hypervisor.
+ *
+ * Input:
+ * %eax: hypercall number
+ * 32-bit:
+ * %ebx, %ecx, %edx, %esi, %edi: args 1..5 for the hypercall
+ * 64-bit:
+ * %rdi, %rsi, %rdx, %r10, %r8: args 1..5 for the hypercall
+ * Output: %[er]ax
+ */
+SYM_FUNC_START(xen_hypercall_hvm)
+ ENDBR
+ FRAME_BEGIN
+ /* Save all relevant registers (caller save and arguments). */
+#ifdef CONFIG_X86_32
+ push %eax
+ push %ebx
+ push %ecx
+ push %edx
+ push %esi
+ push %edi
+#else
+ push %rax
+ push %rcx
+ push %rdx
+ push %rdi
+ push %rsi
+ push %r11
+ push %r10
+ push %r9
+ push %r8
+#ifdef CONFIG_FRAME_POINTER
+ pushq $0 /* Dummy push for stack alignment. */
+#endif
+#endif
+ /* Set the vendor specific function. */
+ call __xen_hypercall_setfunc
+ /* Set ZF = 1 if AMD, Restore saved registers. */
+#ifdef CONFIG_X86_32
+ lea xen_hypercall_amd, %ebx
+ cmp %eax, %ebx
+ pop %edi
+ pop %esi
+ pop %edx
+ pop %ecx
+ pop %ebx
+ pop %eax
+#else
+ lea xen_hypercall_amd(%rip), %rbx
+ cmp %rax, %rbx
+#ifdef CONFIG_FRAME_POINTER
+ pop %rax /* Dummy pop. */
+#endif
+ pop %r8
+ pop %r9
+ pop %r10
+ pop %r11
+ pop %rsi
+ pop %rdi
+ pop %rdx
+ pop %rcx
+ pop %rax
+#endif
+ /* Use correct hypercall function. */
+ jz xen_hypercall_amd
+ jmp xen_hypercall_intel
+SYM_FUNC_END(xen_hypercall_hvm)
+
+SYM_FUNC_START(xen_hypercall_amd)
+ vmmcall
+ RET
+SYM_FUNC_END(xen_hypercall_amd)
+
+SYM_FUNC_START(xen_hypercall_intel)
+ vmcall
+ RET
+SYM_FUNC_END(xen_hypercall_intel)
+ .popsection
+
ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux")
ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz "2.6")
ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz "xen-3.0")
@@ -116,8 +177,6 @@ SYM_CODE_END(xen_cpu_bringup_again)
#else
# define FEATURES_DOM0 0
#endif
- ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .globl xen_elfnote_hypercall_page;
- xen_elfnote_hypercall_page: _ASM_PTR xen_elfnote_hypercall_page_value - .)
ELFNOTE(Xen, XEN_ELFNOTE_SUPPORTED_FEATURES,
.long FEATURES_PV | FEATURES_PVH | FEATURES_DOM0)
ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz "generic")
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index e1b782e823e6..63c13a2ccf55 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -326,4 +326,13 @@ static inline void xen_smp_intr_free_pv(unsigned int cpu) {}
static inline void xen_smp_count_cpus(void) { }
#endif /* CONFIG_SMP */
+#ifdef CONFIG_XEN_PV
+void xen_hypercall_pv(void);
+#endif
+void xen_hypercall_hvm(void);
+void xen_hypercall_amd(void);
+void xen_hypercall_intel(void);
+void xen_hypercall_setfunc(void);
+void *__xen_hypercall_setfunc(void);
+
#endif /* XEN_OPS_H */
diff --git a/block/bdev.c b/block/bdev.c
index 738e3c8457e7..9d73a8fbf7f9 100644
--- a/block/bdev.c
+++ b/block/bdev.c
@@ -155,8 +155,7 @@ int set_blocksize(struct file *file, int size)
struct inode *inode = file->f_mapping->host;
struct block_device *bdev = I_BDEV(inode);
- /* Size must be a power of two, and between 512 and PAGE_SIZE */
- if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size))
+ if (blk_validate_block_size(size))
return -EINVAL;
/* Size cannot be smaller than the size supported by the device */
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index cd5ea6eaa76b..156e9bb07abf 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -275,13 +275,15 @@ void blk_mq_sysfs_unregister_hctxs(struct request_queue *q)
struct blk_mq_hw_ctx *hctx;
unsigned long i;
- lockdep_assert_held(&q->sysfs_dir_lock);
-
+ mutex_lock(&q->sysfs_dir_lock);
if (!q->mq_sysfs_init_done)
- return;
+ goto unlock;
queue_for_each_hw_ctx(q, hctx, i)
blk_mq_unregister_hctx(hctx);
+
+unlock:
+ mutex_unlock(&q->sysfs_dir_lock);
}
int blk_mq_sysfs_register_hctxs(struct request_queue *q)
@@ -290,10 +292,9 @@ int blk_mq_sysfs_register_hctxs(struct request_queue *q)
unsigned long i;
int ret = 0;
- lockdep_assert_held(&q->sysfs_dir_lock);
-
+ mutex_lock(&q->sysfs_dir_lock);
if (!q->mq_sysfs_init_done)
- return ret;
+ goto unlock;
queue_for_each_hw_ctx(q, hctx, i) {
ret = blk_mq_register_hctx(hctx);
@@ -301,5 +302,8 @@ int blk_mq_sysfs_register_hctxs(struct request_queue *q)
break;
}
+unlock:
+ mutex_unlock(&q->sysfs_dir_lock);
+
return ret;
}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 6b6111513986..8ac19d4ae3c0 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -4412,6 +4412,15 @@ struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
}
EXPORT_SYMBOL(blk_mq_alloc_disk_for_queue);
+/*
+ * Only hctx removed from cpuhp list can be reused
+ */
+static bool blk_mq_hctx_is_reusable(struct blk_mq_hw_ctx *hctx)
+{
+ return hlist_unhashed(&hctx->cpuhp_online) &&
+ hlist_unhashed(&hctx->cpuhp_dead);
+}
+
static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
struct blk_mq_tag_set *set, struct request_queue *q,
int hctx_idx, int node)
@@ -4421,7 +4430,7 @@ static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
/* reuse dead hctx first */
spin_lock(&q->unused_hctx_lock);
list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) {
- if (tmp->numa_node == node) {
+ if (tmp->numa_node == node && blk_mq_hctx_is_reusable(tmp)) {
hctx = tmp;
break;
}
@@ -4453,8 +4462,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
unsigned long i, j;
/* protect against switching io scheduler */
- lockdep_assert_held(&q->sysfs_lock);
-
+ mutex_lock(&q->sysfs_lock);
for (i = 0; i < set->nr_hw_queues; i++) {
int old_node;
int node = blk_mq_get_hctx_node(set, i);
@@ -4487,6 +4495,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
xa_for_each_start(&q->hctx_table, j, hctx, j)
blk_mq_exit_hctx(q, set, hctx, j);
+ mutex_unlock(&q->sysfs_lock);
/* unregister cpuhp callbacks for exited hctxs */
blk_mq_remove_hw_queues_cpuhp(q);
@@ -4518,14 +4527,10 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
xa_init(&q->hctx_table);
- mutex_lock(&q->sysfs_lock);
-
blk_mq_realloc_hw_ctxs(set, q);
if (!q->nr_hw_queues)
goto err_hctxs;
- mutex_unlock(&q->sysfs_lock);
-
INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
@@ -4544,7 +4549,6 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
return 0;
err_hctxs:
- mutex_unlock(&q->sysfs_lock);
blk_mq_release(q);
err_exit:
q->mq_ops = NULL;
@@ -4925,12 +4929,12 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
return false;
/* q->elevator needs protection from ->sysfs_lock */
- lockdep_assert_held(&q->sysfs_lock);
+ mutex_lock(&q->sysfs_lock);
/* the check has to be done with holding sysfs_lock */
if (!q->elevator) {
kfree(qe);
- goto out;
+ goto unlock;
}
INIT_LIST_HEAD(&qe->node);
@@ -4940,7 +4944,9 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
__elevator_get(qe->type);
list_add(&qe->node, head);
elevator_disable(q);
-out:
+unlock:
+ mutex_unlock(&q->sysfs_lock);
+
return true;
}
@@ -4969,9 +4975,11 @@ static void blk_mq_elv_switch_back(struct list_head *head,
list_del(&qe->node);
kfree(qe);
+ mutex_lock(&q->sysfs_lock);
elevator_switch(q, t);
/* drop the reference acquired in blk_mq_elv_switch_none */
elevator_put(t);
+ mutex_unlock(&q->sysfs_lock);
}
static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
@@ -4991,11 +4999,8 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues)
return;
- list_for_each_entry(q, &set->tag_list, tag_set_list) {
- mutex_lock(&q->sysfs_dir_lock);
- mutex_lock(&q->sysfs_lock);
+ list_for_each_entry(q, &set->tag_list, tag_set_list)
blk_mq_freeze_queue(q);
- }
/*
* Switch IO scheduler to 'none', cleaning up the data associated
* with the previous scheduler. We will switch back once we are done
@@ -5051,11 +5056,8 @@ switch_back:
list_for_each_entry(q, &set->tag_list, tag_set_list)
blk_mq_elv_switch_back(&head, q);
- list_for_each_entry(q, &set->tag_list, tag_set_list) {
+ list_for_each_entry(q, &set->tag_list, tag_set_list)
blk_mq_unfreeze_queue(q);
- mutex_unlock(&q->sysfs_lock);
- mutex_unlock(&q->sysfs_dir_lock);
- }
/* Free the excess tags when nr_hw_queues shrink. */
for (i = set->nr_hw_queues; i < prev_nr_hw_queues; i++)
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 64f70c713d2f..767598e719ab 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -706,11 +706,11 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
if (entry->load_module)
entry->load_module(disk, page, length);
- mutex_lock(&q->sysfs_lock);
blk_mq_freeze_queue(q);
+ mutex_lock(&q->sysfs_lock);
res = entry->store(disk, page, length);
- blk_mq_unfreeze_queue(q);
mutex_unlock(&q->sysfs_lock);
+ blk_mq_unfreeze_queue(q);
return res;
}
diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c
index d8e97a760fbc..16178054e629 100644
--- a/drivers/accel/ivpu/ivpu_gem.c
+++ b/drivers/accel/ivpu/ivpu_gem.c
@@ -409,7 +409,7 @@ static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
mutex_lock(&bo->lock);
drm_printf(p, "%-9p %-3u 0x%-12llx %-10lu 0x%-8x %-4u",
- bo, bo->ctx->id, bo->vpu_addr, bo->base.base.size,
+ bo, bo->ctx ? bo->ctx->id : 0, bo->vpu_addr, bo->base.base.size,
bo->flags, kref_read(&bo->base.base.refcount));
if (bo->base.pages)
diff --git a/drivers/accel/ivpu/ivpu_mmu_context.c b/drivers/accel/ivpu/ivpu_mmu_context.c
index 891967a95bc3..0af614dfb6f9 100644
--- a/drivers/accel/ivpu/ivpu_mmu_context.c
+++ b/drivers/accel/ivpu/ivpu_mmu_context.c
@@ -612,18 +612,22 @@ int ivpu_mmu_reserved_context_init(struct ivpu_device *vdev)
if (!ivpu_mmu_ensure_pgd(vdev, &vdev->rctx.pgtable)) {
ivpu_err(vdev, "Failed to allocate root page table for reserved context\n");
ret = -ENOMEM;
- goto unlock;
+ goto err_ctx_fini;
}
ret = ivpu_mmu_cd_set(vdev, vdev->rctx.id, &vdev->rctx.pgtable);
if (ret) {
ivpu_err(vdev, "Failed to set context descriptor for reserved context\n");
- goto unlock;
+ goto err_ctx_fini;
}
-unlock:
mutex_unlock(&vdev->rctx.lock);
return ret;
+
+err_ctx_fini:
+ mutex_unlock(&vdev->rctx.lock);
+ ivpu_mmu_context_fini(vdev, &vdev->rctx);
+ return ret;
}
void ivpu_mmu_reserved_context_fini(struct ivpu_device *vdev)
diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
index dbc0711e28d1..949f4233946c 100644
--- a/drivers/accel/ivpu/ivpu_pm.c
+++ b/drivers/accel/ivpu/ivpu_pm.c
@@ -378,6 +378,7 @@ void ivpu_pm_init(struct ivpu_device *vdev)
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, delay);
+ pm_runtime_set_active(dev);
ivpu_dbg(vdev, PM, "Autosuspend delay = %d\n", delay);
}
@@ -392,7 +393,6 @@ void ivpu_pm_enable(struct ivpu_device *vdev)
{
struct device *dev = vdev->drm.dev;
- pm_runtime_set_active(dev);
pm_runtime_allow(dev);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index d65cd08ba8e1..d81b55f5068c 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -135,10 +135,10 @@ config ACPI_REV_OVERRIDE_POSSIBLE
config ACPI_EC
bool "Embedded Controller"
depends on HAS_IOPORT
- default X86
+ default X86 || LOONGARCH
help
This driver handles communication with the microcontroller
- on many x86 laptops and other machines.
+ on many x86/LoongArch laptops and other machines.
config ACPI_EC_DEBUGFS
tristate "EC read/write access through /sys/kernel/debug/ec"
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index 21545ffba065..8934e6ad5772 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -489,7 +489,7 @@ config IMG_ASCII_LCD
config HT16K33
tristate "Holtek Ht16K33 LED controller with keyscan"
- depends on FB && I2C && INPUT
+ depends on FB && I2C && INPUT && BACKLIGHT_CLASS_DEVICE
select FB_SYSMEM_HELPERS
select INPUT_MATRIXKMAP
select FB_BACKLIGHT
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index d4aed12dd436..934ab9332c80 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -1618,6 +1618,21 @@ static void ublk_unquiesce_dev(struct ublk_device *ub)
blk_mq_kick_requeue_list(ub->ub_disk->queue);
}
+static struct gendisk *ublk_detach_disk(struct ublk_device *ub)
+{
+ struct gendisk *disk;
+
+ /* Sync with ublk_abort_queue() by holding the lock */
+ spin_lock(&ub->lock);
+ disk = ub->ub_disk;
+ ub->dev_info.state = UBLK_S_DEV_DEAD;
+ ub->dev_info.ublksrv_pid = -1;
+ ub->ub_disk = NULL;
+ spin_unlock(&ub->lock);
+
+ return disk;
+}
+
static void ublk_stop_dev(struct ublk_device *ub)
{
struct gendisk *disk;
@@ -1631,14 +1646,7 @@ static void ublk_stop_dev(struct ublk_device *ub)
ublk_unquiesce_dev(ub);
}
del_gendisk(ub->ub_disk);
-
- /* Sync with ublk_abort_queue() by holding the lock */
- spin_lock(&ub->lock);
- disk = ub->ub_disk;
- ub->dev_info.state = UBLK_S_DEV_DEAD;
- ub->dev_info.ublksrv_pid = -1;
- ub->ub_disk = NULL;
- spin_unlock(&ub->lock);
+ disk = ublk_detach_disk(ub);
put_disk(disk);
unlock:
mutex_unlock(&ub->mutex);
@@ -2336,7 +2344,7 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
out_put_cdev:
if (ret) {
- ub->dev_info.state = UBLK_S_DEV_DEAD;
+ ublk_detach_disk(ub);
ublk_put_device(ub);
}
if (ret)
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 3dee026988dc..45df5eeabc5e 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -614,6 +614,12 @@ static ssize_t backing_dev_store(struct device *dev,
}
nr_pages = i_size_read(inode) >> PAGE_SHIFT;
+ /* Refuse to use zero sized device (also prevents self reference) */
+ if (!nr_pages) {
+ err = -EINVAL;
+ goto out;
+ }
+
bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long);
bitmap = kvzalloc(bitmap_sz, GFP_KERNEL);
if (!bitmap) {
@@ -1438,12 +1444,16 @@ static void zram_meta_free(struct zram *zram, u64 disksize)
size_t num_pages = disksize >> PAGE_SHIFT;
size_t index;
+ if (!zram->table)
+ return;
+
/* Free all pages that are still in this zram device */
for (index = 0; index < num_pages; index++)
zram_free_page(zram, index);
zs_destroy_pool(zram->mem_pool);
vfree(zram->table);
+ zram->table = NULL;
}
static bool zram_meta_alloc(struct zram *zram, u64 disksize)
@@ -2320,11 +2330,6 @@ static void zram_reset_device(struct zram *zram)
zram->limit_pages = 0;
- if (!init_done(zram)) {
- up_write(&zram->init_lock);
- return;
- }
-
set_capacity_and_notify(zram->disk, 0);
part_stat_set_all(zram->disk->part0, 0);
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 6a99a459b80b..51745ed1bbab 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -1106,7 +1106,7 @@ int open_for_data(struct cdrom_device_info *cdi)
}
}
- cd_dbg(CD_OPEN, "all seems well, opening the devicen");
+ cd_dbg(CD_OPEN, "all seems well, opening the device\n");
/* all seems well, we can open the device */
ret = cdo->open(cdi, 0); /* open for data */
diff --git a/drivers/clk/imx/clk-imx8mp-audiomix.c b/drivers/clk/imx/clk-imx8mp-audiomix.c
index b2cb157703c5..c409fc7e0618 100644
--- a/drivers/clk/imx/clk-imx8mp-audiomix.c
+++ b/drivers/clk/imx/clk-imx8mp-audiomix.c
@@ -278,7 +278,8 @@ static int clk_imx8mp_audiomix_reset_controller_register(struct device *dev,
#else /* !CONFIG_RESET_CONTROLLER */
-static int clk_imx8mp_audiomix_reset_controller_register(struct clk_imx8mp_audiomix_priv *priv)
+static int clk_imx8mp_audiomix_reset_controller_register(struct device *dev,
+ struct clk_imx8mp_audiomix_priv *priv)
{
return 0;
}
diff --git a/drivers/clk/thead/clk-th1520-ap.c b/drivers/clk/thead/clk-th1520-ap.c
index 17e32ae08720..1015fab95251 100644
--- a/drivers/clk/thead/clk-th1520-ap.c
+++ b/drivers/clk/thead/clk-th1520-ap.c
@@ -779,6 +779,13 @@ static struct ccu_div dpu1_clk = {
},
};
+static CLK_FIXED_FACTOR_HW(emmc_sdio_ref_clk, "emmc-sdio-ref",
+ &video_pll_clk.common.hw, 4, 1, 0);
+
+static const struct clk_parent_data emmc_sdio_ref_clk_pd[] = {
+ { .hw = &emmc_sdio_ref_clk.hw },
+};
+
static CCU_GATE(CLK_BROM, brom_clk, "brom", ahb2_cpusys_hclk_pd, 0x100, BIT(4), 0);
static CCU_GATE(CLK_BMU, bmu_clk, "bmu", axi4_cpusys2_aclk_pd, 0x100, BIT(5), 0);
static CCU_GATE(CLK_AON2CPU_A2X, aon2cpu_a2x_clk, "aon2cpu-a2x", axi4_cpusys2_aclk_pd,
@@ -798,7 +805,7 @@ static CCU_GATE(CLK_PERISYS_APB4_HCLK, perisys_apb4_hclk, "perisys-apb4-hclk", p
0x150, BIT(12), 0);
static CCU_GATE(CLK_NPU_AXI, npu_axi_clk, "npu-axi", axi_aclk_pd, 0x1c8, BIT(5), 0);
static CCU_GATE(CLK_CPU2VP, cpu2vp_clk, "cpu2vp", axi_aclk_pd, 0x1e0, BIT(13), 0);
-static CCU_GATE(CLK_EMMC_SDIO, emmc_sdio_clk, "emmc-sdio", video_pll_clk_pd, 0x204, BIT(30), 0);
+static CCU_GATE(CLK_EMMC_SDIO, emmc_sdio_clk, "emmc-sdio", emmc_sdio_ref_clk_pd, 0x204, BIT(30), 0);
static CCU_GATE(CLK_GMAC1, gmac1_clk, "gmac1", gmac_pll_clk_pd, 0x204, BIT(26), 0);
static CCU_GATE(CLK_PADCTRL1, padctrl1_clk, "padctrl1", perisys_apb_pclk_pd, 0x204, BIT(24), 0);
static CCU_GATE(CLK_DSMART, dsmart_clk, "dsmart", perisys_apb_pclk_pd, 0x204, BIT(23), 0);
@@ -1059,6 +1066,10 @@ static int th1520_clk_probe(struct platform_device *pdev)
return ret;
priv->hws[CLK_PLL_GMAC_100M] = &gmac_pll_clk_100m.hw;
+ ret = devm_clk_hw_register(dev, &emmc_sdio_ref_clk.hw);
+ if (ret)
+ return ret;
+
ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, priv);
if (ret)
return ret;
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
index 99177835cade..b39dee7b93af 100644
--- a/drivers/clocksource/hyperv_timer.c
+++ b/drivers/clocksource/hyperv_timer.c
@@ -27,7 +27,8 @@
#include <asm/mshyperv.h>
static struct clock_event_device __percpu *hv_clock_event;
-static u64 hv_sched_clock_offset __ro_after_init;
+/* Note: offset can hold negative values after hibernation. */
+static u64 hv_sched_clock_offset __read_mostly;
/*
* If false, we're using the old mechanism for stimer0 interrupts
@@ -470,6 +471,17 @@ static void resume_hv_clock_tsc(struct clocksource *arg)
hv_set_msr(HV_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
}
+/*
+ * Called during resume from hibernation, from overridden
+ * x86_platform.restore_sched_clock_state routine. This is to adjust offsets
+ * used to calculate time for hv tsc page based sched_clock, to account for
+ * time spent before hibernation.
+ */
+void hv_adj_sched_clock_offset(u64 offset)
+{
+ hv_sched_clock_offset -= offset;
+}
+
#ifdef HAVE_VDSO_CLOCKMODE_HVCLOCK
static int hv_cs_enable(struct clocksource *cs)
{
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index d7630bab2516..66e5dfc711c0 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -374,15 +374,19 @@ static inline int amd_pstate_cppc_enable(bool enable)
static int msr_init_perf(struct amd_cpudata *cpudata)
{
- u64 cap1;
+ u64 cap1, numerator;
int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
&cap1);
if (ret)
return ret;
- WRITE_ONCE(cpudata->highest_perf, AMD_CPPC_HIGHEST_PERF(cap1));
- WRITE_ONCE(cpudata->max_limit_perf, AMD_CPPC_HIGHEST_PERF(cap1));
+ ret = amd_get_boost_ratio_numerator(cpudata->cpu, &numerator);
+ if (ret)
+ return ret;
+
+ WRITE_ONCE(cpudata->highest_perf, numerator);
+ WRITE_ONCE(cpudata->max_limit_perf, numerator);
WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1));
@@ -394,13 +398,18 @@ static int msr_init_perf(struct amd_cpudata *cpudata)
static int shmem_init_perf(struct amd_cpudata *cpudata)
{
struct cppc_perf_caps cppc_perf;
+ u64 numerator;
int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
if (ret)
return ret;
- WRITE_ONCE(cpudata->highest_perf, cppc_perf.highest_perf);
- WRITE_ONCE(cpudata->max_limit_perf, cppc_perf.highest_perf);
+ ret = amd_get_boost_ratio_numerator(cpudata->cpu, &numerator);
+ if (ret)
+ return ret;
+
+ WRITE_ONCE(cpudata->highest_perf, numerator);
+ WRITE_ONCE(cpudata->max_limit_perf, numerator);
WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf);
WRITE_ONCE(cpudata->lowest_nonlinear_perf,
cppc_perf.lowest_nonlinear_perf);
@@ -561,16 +570,13 @@ static int amd_pstate_verify(struct cpufreq_policy_data *policy_data)
static int amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
{
- u32 max_limit_perf, min_limit_perf, lowest_perf, max_perf;
+ u32 max_limit_perf, min_limit_perf, lowest_perf, max_perf, max_freq;
struct amd_cpudata *cpudata = policy->driver_data;
- if (cpudata->boost_supported && !policy->boost_enabled)
- max_perf = READ_ONCE(cpudata->nominal_perf);
- else
- max_perf = READ_ONCE(cpudata->highest_perf);
-
- max_limit_perf = div_u64(policy->max * max_perf, policy->cpuinfo.max_freq);
- min_limit_perf = div_u64(policy->min * max_perf, policy->cpuinfo.max_freq);
+ max_perf = READ_ONCE(cpudata->highest_perf);
+ max_freq = READ_ONCE(cpudata->max_freq);
+ max_limit_perf = div_u64(policy->max * max_perf, max_freq);
+ min_limit_perf = div_u64(policy->min * max_perf, max_freq);
lowest_perf = READ_ONCE(cpudata->lowest_perf);
if (min_limit_perf < lowest_perf)
@@ -889,7 +895,6 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
{
int ret;
u32 min_freq, max_freq;
- u64 numerator;
u32 nominal_perf, nominal_freq;
u32 lowest_nonlinear_perf, lowest_nonlinear_freq;
u32 boost_ratio, lowest_nonlinear_ratio;
@@ -911,10 +916,7 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
nominal_perf = READ_ONCE(cpudata->nominal_perf);
- ret = amd_get_boost_ratio_numerator(cpudata->cpu, &numerator);
- if (ret)
- return ret;
- boost_ratio = div_u64(numerator << SCHED_CAPACITY_SHIFT, nominal_perf);
+ boost_ratio = div_u64(cpudata->highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
max_freq = (nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT) * 1000;
lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
@@ -1869,18 +1871,18 @@ static int __init amd_pstate_init(void)
static_call_update(amd_pstate_update_perf, shmem_update_perf);
}
- ret = amd_pstate_register_driver(cppc_state);
- if (ret) {
- pr_err("failed to register with return %d\n", ret);
- return ret;
- }
-
if (amd_pstate_prefcore) {
ret = amd_detect_prefcore(&amd_pstate_prefcore);
if (ret)
return ret;
}
+ ret = amd_pstate_register_driver(cppc_state);
+ if (ret) {
+ pr_err("failed to register with return %d\n", ret);
+ return ret;
+ }
+
dev_root = bus_get_dev_root(&cpu_subsys);
if (dev_root) {
ret = sysfs_create_group(&dev_root->kobj, &amd_pstate_global_attr_group);
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index d77899650798..b98b1ccffd1c 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -1295,6 +1295,7 @@ static int cxl_port_setup_targets(struct cxl_port *port,
struct cxl_region_params *p = &cxlr->params;
struct cxl_decoder *cxld = cxl_rr->decoder;
struct cxl_switch_decoder *cxlsd;
+ struct cxl_port *iter = port;
u16 eig, peig;
u8 eiw, peiw;
@@ -1311,16 +1312,26 @@ static int cxl_port_setup_targets(struct cxl_port *port,
cxlsd = to_cxl_switch_decoder(&cxld->dev);
if (cxl_rr->nr_targets_set) {
- int i, distance;
+ int i, distance = 1;
+ struct cxl_region_ref *cxl_rr_iter;
/*
- * Passthrough decoders impose no distance requirements between
- * peers
+ * The "distance" between peer downstream ports represents which
+ * endpoint positions in the region interleave a given port can
+ * host.
+ *
+ * For example, at the root of a hierarchy the distance is
+ * always 1 as every index targets a different host-bridge. At
+ * each subsequent switch level those ports map every Nth region
+ * position where N is the width of the switch == distance.
*/
- if (cxl_rr->nr_targets == 1)
- distance = 0;
- else
- distance = p->nr_targets / cxl_rr->nr_targets;
+ do {
+ cxl_rr_iter = cxl_rr_load(iter, cxlr);
+ distance *= cxl_rr_iter->nr_targets;
+ iter = to_cxl_port(iter->dev.parent);
+ } while (!is_cxl_root(iter));
+ distance *= cxlrd->cxlsd.cxld.interleave_ways;
+
for (i = 0; i < cxl_rr->nr_targets_set; i++)
if (ep->dport == cxlsd->target[i]) {
rc = check_last_peer(cxled, ep, cxl_rr,
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 0241d1d7133a..6d94ff4a4f1a 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -836,6 +836,9 @@ static ssize_t rcd_pcie_cap_emit(struct device *dev, u16 offset, char *buf, size
if (!root_dev)
return -ENXIO;
+ if (!dport->regs.rcd_pcie_cap)
+ return -ENXIO;
+
guard(device)(root_dev);
if (!root_dev->driver)
return -ENXIO;
@@ -1032,8 +1035,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
return rc;
- rc = cxl_pci_ras_unmask(pdev);
- if (rc)
+ if (cxl_pci_ras_unmask(pdev))
dev_dbg(&pdev->dev, "No RAS reporting unmasked\n");
pci_save_state(pdev);
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 7eeee3a38202..5baa83b85515 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -60,7 +60,7 @@ static void __dma_buf_debugfs_list_add(struct dma_buf *dmabuf)
{
}
-static void __dma_buf_debugfs_list_del(struct file *file)
+static void __dma_buf_debugfs_list_del(struct dma_buf *dmabuf)
{
}
#endif
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index 8ce1f074c2d3..cc7398cc17d6 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -297,7 +297,7 @@ static const struct dma_buf_ops udmabuf_ops = {
};
#define SEALS_WANTED (F_SEAL_SHRINK)
-#define SEALS_DENIED (F_SEAL_WRITE)
+#define SEALS_DENIED (F_SEAL_WRITE|F_SEAL_FUTURE_WRITE)
static int check_memfd_seals(struct file *memfd)
{
@@ -317,12 +317,10 @@ static int check_memfd_seals(struct file *memfd)
return 0;
}
-static int export_udmabuf(struct udmabuf *ubuf,
- struct miscdevice *device,
- u32 flags)
+static struct dma_buf *export_udmabuf(struct udmabuf *ubuf,
+ struct miscdevice *device)
{
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
- struct dma_buf *buf;
ubuf->device = device;
exp_info.ops = &udmabuf_ops;
@@ -330,11 +328,7 @@ static int export_udmabuf(struct udmabuf *ubuf,
exp_info.priv = ubuf;
exp_info.flags = O_RDWR;
- buf = dma_buf_export(&exp_info);
- if (IS_ERR(buf))
- return PTR_ERR(buf);
-
- return dma_buf_fd(buf, flags);
+ return dma_buf_export(&exp_info);
}
static long udmabuf_pin_folios(struct udmabuf *ubuf, struct file *memfd,
@@ -391,6 +385,7 @@ static long udmabuf_create(struct miscdevice *device,
struct folio **folios = NULL;
pgoff_t pgcnt = 0, pglimit;
struct udmabuf *ubuf;
+ struct dma_buf *dmabuf;
long ret = -EINVAL;
u32 i, flags;
@@ -436,23 +431,39 @@ static long udmabuf_create(struct miscdevice *device,
goto err;
}
+ /*
+ * Take the inode lock to protect against concurrent
+ * memfd_add_seals(), which takes this lock in write mode.
+ */
+ inode_lock_shared(file_inode(memfd));
ret = check_memfd_seals(memfd);
- if (ret < 0) {
- fput(memfd);
- goto err;
- }
+ if (ret)
+ goto out_unlock;
ret = udmabuf_pin_folios(ubuf, memfd, list[i].offset,
list[i].size, folios);
+out_unlock:
+ inode_unlock_shared(file_inode(memfd));
fput(memfd);
if (ret)
goto err;
}
flags = head->flags & UDMABUF_FLAGS_CLOEXEC ? O_CLOEXEC : 0;
- ret = export_udmabuf(ubuf, device, flags);
- if (ret < 0)
+ dmabuf = export_udmabuf(ubuf, device);
+ if (IS_ERR(dmabuf)) {
+ ret = PTR_ERR(dmabuf);
goto err;
+ }
+ /*
+ * Ownership of ubuf is held by the dmabuf from here.
+ * If the following dma_buf_fd() fails, dma_buf_put() cleans up both the
+ * dmabuf and the ubuf (through udmabuf_ops.release).
+ */
+
+ ret = dma_buf_fd(dmabuf, flags);
+ if (ret < 0)
+ dma_buf_put(dmabuf);
kvfree(folios);
return ret;
diff --git a/drivers/dma/amd/qdma/qdma.c b/drivers/dma/amd/qdma/qdma.c
index 6d9079458fe9..66f00ad67351 100644
--- a/drivers/dma/amd/qdma/qdma.c
+++ b/drivers/dma/amd/qdma/qdma.c
@@ -7,9 +7,9 @@
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
-#include <linux/dma-map-ops.h>
#include <linux/platform_device.h>
#include <linux/platform_data/amd_qdma.h>
#include <linux/regmap.h>
@@ -492,18 +492,9 @@ static int qdma_device_verify(struct qdma_device *qdev)
static int qdma_device_setup(struct qdma_device *qdev)
{
- struct device *dev = &qdev->pdev->dev;
u32 ring_sz = QDMA_DEFAULT_RING_SIZE;
int ret = 0;
- while (dev && get_dma_ops(dev))
- dev = dev->parent;
- if (!dev) {
- qdma_err(qdev, "dma device not found");
- return -EINVAL;
- }
- set_dma_ops(&qdev->pdev->dev, get_dma_ops(dev));
-
ret = qdma_setup_fmap_context(qdev);
if (ret) {
qdma_err(qdev, "Failed setup fmap context");
@@ -548,11 +539,12 @@ static void qdma_free_queue_resources(struct dma_chan *chan)
{
struct qdma_queue *queue = to_qdma_queue(chan);
struct qdma_device *qdev = queue->qdev;
- struct device *dev = qdev->dma_dev.dev;
+ struct qdma_platdata *pdata;
qdma_clear_queue_context(queue);
vchan_free_chan_resources(&queue->vchan);
- dma_free_coherent(dev, queue->ring_size * QDMA_MM_DESC_SIZE,
+ pdata = dev_get_platdata(&qdev->pdev->dev);
+ dma_free_coherent(pdata->dma_dev, queue->ring_size * QDMA_MM_DESC_SIZE,
queue->desc_base, queue->dma_desc_base);
}
@@ -565,6 +557,7 @@ static int qdma_alloc_queue_resources(struct dma_chan *chan)
struct qdma_queue *queue = to_qdma_queue(chan);
struct qdma_device *qdev = queue->qdev;
struct qdma_ctxt_sw_desc desc;
+ struct qdma_platdata *pdata;
size_t size;
int ret;
@@ -572,8 +565,9 @@ static int qdma_alloc_queue_resources(struct dma_chan *chan)
if (ret)
return ret;
+ pdata = dev_get_platdata(&qdev->pdev->dev);
size = queue->ring_size * QDMA_MM_DESC_SIZE;
- queue->desc_base = dma_alloc_coherent(qdev->dma_dev.dev, size,
+ queue->desc_base = dma_alloc_coherent(pdata->dma_dev, size,
&queue->dma_desc_base,
GFP_KERNEL);
if (!queue->desc_base) {
@@ -588,7 +582,7 @@ static int qdma_alloc_queue_resources(struct dma_chan *chan)
if (ret) {
qdma_err(qdev, "Failed to setup SW desc ctxt for %s",
chan->name);
- dma_free_coherent(qdev->dma_dev.dev, size, queue->desc_base,
+ dma_free_coherent(pdata->dma_dev, size, queue->desc_base,
queue->dma_desc_base);
return ret;
}
@@ -948,8 +942,9 @@ static int qdma_init_error_irq(struct qdma_device *qdev)
static int qdmam_alloc_qintr_rings(struct qdma_device *qdev)
{
- u32 ctxt[QDMA_CTXT_REGMAP_LEN];
+ struct qdma_platdata *pdata = dev_get_platdata(&qdev->pdev->dev);
struct device *dev = &qdev->pdev->dev;
+ u32 ctxt[QDMA_CTXT_REGMAP_LEN];
struct qdma_intr_ring *ring;
struct qdma_ctxt_intr intr_ctxt;
u32 vector;
@@ -969,7 +964,8 @@ static int qdmam_alloc_qintr_rings(struct qdma_device *qdev)
ring->msix_id = qdev->err_irq_idx + i + 1;
ring->ridx = i;
ring->color = 1;
- ring->base = dmam_alloc_coherent(dev, QDMA_INTR_RING_SIZE,
+ ring->base = dmam_alloc_coherent(pdata->dma_dev,
+ QDMA_INTR_RING_SIZE,
&ring->dev_base, GFP_KERNEL);
if (!ring->base) {
qdma_err(qdev, "Failed to alloc intr ring %d", i);
diff --git a/drivers/dma/apple-admac.c b/drivers/dma/apple-admac.c
index c499173d80b2..bd49f0374291 100644
--- a/drivers/dma/apple-admac.c
+++ b/drivers/dma/apple-admac.c
@@ -153,6 +153,8 @@ static int admac_alloc_sram_carveout(struct admac_data *ad,
{
struct admac_sram *sram;
int i, ret = 0, nblocks;
+ ad->txcache.size = readl_relaxed(ad->base + REG_TX_SRAM_SIZE);
+ ad->rxcache.size = readl_relaxed(ad->base + REG_RX_SRAM_SIZE);
if (dir == DMA_MEM_TO_DEV)
sram = &ad->txcache;
@@ -912,12 +914,7 @@ static int admac_probe(struct platform_device *pdev)
goto free_irq;
}
- ad->txcache.size = readl_relaxed(ad->base + REG_TX_SRAM_SIZE);
- ad->rxcache.size = readl_relaxed(ad->base + REG_RX_SRAM_SIZE);
-
dev_info(&pdev->dev, "Audio DMA Controller\n");
- dev_info(&pdev->dev, "imprint %x TX cache %u RX cache %u\n",
- readl_relaxed(ad->base + REG_IMPRINT), ad->txcache.size, ad->rxcache.size);
return 0;
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 9c7b40220004..ba25c23164e7 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -1363,6 +1363,8 @@ at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
return NULL;
desc = at_xdmac_memset_create_desc(chan, atchan, dest, len, value);
+ if (!desc)
+ return NULL;
list_add_tail(&desc->desc_node, &desc->descs_list);
desc->tx_dma_desc.cookie = -EBUSY;
diff --git a/drivers/dma/dw/acpi.c b/drivers/dma/dw/acpi.c
index c510c109d2c3..b6452fffa657 100644
--- a/drivers/dma/dw/acpi.c
+++ b/drivers/dma/dw/acpi.c
@@ -8,13 +8,15 @@
static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param)
{
+ struct dw_dma *dw = to_dw_dma(chan->device);
+ struct dw_dma_chip_pdata *data = dev_get_drvdata(dw->dma.dev);
struct acpi_dma_spec *dma_spec = param;
struct dw_dma_slave slave = {
.dma_dev = dma_spec->dev,
.src_id = dma_spec->slave_id,
.dst_id = dma_spec->slave_id,
- .m_master = 0,
- .p_master = 1,
+ .m_master = data->m_master,
+ .p_master = data->p_master,
};
return dw_dma_filter(chan, &slave);
diff --git a/drivers/dma/dw/internal.h b/drivers/dma/dw/internal.h
index 563ce73488db..f1bd06a20cd6 100644
--- a/drivers/dma/dw/internal.h
+++ b/drivers/dma/dw/internal.h
@@ -51,11 +51,15 @@ struct dw_dma_chip_pdata {
int (*probe)(struct dw_dma_chip *chip);
int (*remove)(struct dw_dma_chip *chip);
struct dw_dma_chip *chip;
+ u8 m_master;
+ u8 p_master;
};
static __maybe_unused const struct dw_dma_chip_pdata dw_dma_chip_pdata = {
.probe = dw_dma_probe,
.remove = dw_dma_remove,
+ .m_master = 0,
+ .p_master = 1,
};
static const struct dw_dma_platform_data idma32_pdata = {
@@ -72,6 +76,8 @@ static __maybe_unused const struct dw_dma_chip_pdata idma32_chip_pdata = {
.pdata = &idma32_pdata,
.probe = idma32_dma_probe,
.remove = idma32_dma_remove,
+ .m_master = 0,
+ .p_master = 0,
};
static const struct dw_dma_platform_data xbar_pdata = {
@@ -88,6 +94,8 @@ static __maybe_unused const struct dw_dma_chip_pdata xbar_chip_pdata = {
.pdata = &xbar_pdata,
.probe = idma32_dma_probe,
.remove = idma32_dma_remove,
+ .m_master = 0,
+ .p_master = 0,
};
#endif /* _DMA_DW_INTERNAL_H */
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c
index ad2d4d012cf7..e8a0eb81726a 100644
--- a/drivers/dma/dw/pci.c
+++ b/drivers/dma/dw/pci.c
@@ -56,10 +56,10 @@ static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
if (ret)
return ret;
- dw_dma_acpi_controller_register(chip->dw);
-
pci_set_drvdata(pdev, data);
+ dw_dma_acpi_controller_register(chip->dw);
+
return 0;
}
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
index ce37e1ee9c46..fe8f103d4a63 100644
--- a/drivers/dma/fsl-edma-common.h
+++ b/drivers/dma/fsl-edma-common.h
@@ -166,6 +166,7 @@ struct fsl_edma_chan {
struct work_struct issue_worker;
struct platform_device *pdev;
struct device *pd_dev;
+ struct device_link *pd_dev_link;
u32 srcid;
struct clk *clk;
int priority;
diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c
index 60de1003193a..1a613236b3e4 100644
--- a/drivers/dma/fsl-edma-main.c
+++ b/drivers/dma/fsl-edma-main.c
@@ -417,10 +417,33 @@ static const struct of_device_id fsl_edma_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
+static void fsl_edma3_detach_pd(struct fsl_edma_engine *fsl_edma)
+{
+ struct fsl_edma_chan *fsl_chan;
+ int i;
+
+ for (i = 0; i < fsl_edma->n_chans; i++) {
+ if (fsl_edma->chan_masked & BIT(i))
+ continue;
+ fsl_chan = &fsl_edma->chans[i];
+ if (fsl_chan->pd_dev_link)
+ device_link_del(fsl_chan->pd_dev_link);
+ if (fsl_chan->pd_dev) {
+ dev_pm_domain_detach(fsl_chan->pd_dev, false);
+ pm_runtime_dont_use_autosuspend(fsl_chan->pd_dev);
+ pm_runtime_set_suspended(fsl_chan->pd_dev);
+ }
+ }
+}
+
+static void devm_fsl_edma3_detach_pd(void *data)
+{
+ fsl_edma3_detach_pd(data);
+}
+
static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
{
struct fsl_edma_chan *fsl_chan;
- struct device_link *link;
struct device *pd_chan;
struct device *dev;
int i;
@@ -436,15 +459,16 @@ static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_eng
pd_chan = dev_pm_domain_attach_by_id(dev, i);
if (IS_ERR_OR_NULL(pd_chan)) {
dev_err(dev, "Failed attach pd %d\n", i);
- return -EINVAL;
+ goto detach;
}
- link = device_link_add(dev, pd_chan, DL_FLAG_STATELESS |
+ fsl_chan->pd_dev_link = device_link_add(dev, pd_chan, DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
- if (!link) {
+ if (!fsl_chan->pd_dev_link) {
dev_err(dev, "Failed to add device_link to %d\n", i);
- return -EINVAL;
+ dev_pm_domain_detach(pd_chan, false);
+ goto detach;
}
fsl_chan->pd_dev = pd_chan;
@@ -455,6 +479,10 @@ static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_eng
}
return 0;
+
+detach:
+ fsl_edma3_detach_pd(fsl_edma);
+ return -EINVAL;
}
static int fsl_edma_probe(struct platform_device *pdev)
@@ -544,6 +572,9 @@ static int fsl_edma_probe(struct platform_device *pdev)
ret = fsl_edma3_attach_pd(pdev, fsl_edma);
if (ret)
return ret;
+ ret = devm_add_action_or_reset(&pdev->dev, devm_fsl_edma3_detach_pd, fsl_edma);
+ if (ret)
+ return ret;
}
if (drvdata->flags & FSL_EDMA_DRV_TCD64)
diff --git a/drivers/dma/loongson2-apb-dma.c b/drivers/dma/loongson2-apb-dma.c
index 367ed34ce4da..c528f02b9f84 100644
--- a/drivers/dma/loongson2-apb-dma.c
+++ b/drivers/dma/loongson2-apb-dma.c
@@ -31,7 +31,7 @@
#define LDMA_ASK_VALID BIT(2)
#define LDMA_START BIT(3) /* DMA start operation */
#define LDMA_STOP BIT(4) /* DMA stop operation */
-#define LDMA_CONFIG_MASK GENMASK(4, 0) /* DMA controller config bits mask */
+#define LDMA_CONFIG_MASK GENMASK_ULL(4, 0) /* DMA controller config bits mask */
/* Bitfields in ndesc_addr field of HW descriptor */
#define LDMA_DESC_EN BIT(0) /*1: The next descriptor is valid */
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 43efce77bb57..40b76b40bc30 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -1388,6 +1388,7 @@ static int mv_xor_probe(struct platform_device *pdev)
irq = irq_of_parse_and_map(np, 0);
if (!irq) {
ret = -ENODEV;
+ of_node_put(np);
goto err_channel_add;
}
@@ -1396,6 +1397,7 @@ static int mv_xor_probe(struct platform_device *pdev)
if (IS_ERR(chan)) {
ret = PTR_ERR(chan);
irq_dispose_mapping(irq);
+ of_node_put(np);
goto err_channel_add;
}
diff --git a/drivers/dma/tegra186-gpc-dma.c b/drivers/dma/tegra186-gpc-dma.c
index cacf3757adc2..4d6fe0efa76e 100644
--- a/drivers/dma/tegra186-gpc-dma.c
+++ b/drivers/dma/tegra186-gpc-dma.c
@@ -231,6 +231,7 @@ struct tegra_dma_channel {
bool config_init;
char name[30];
enum dma_transfer_direction sid_dir;
+ enum dma_status status;
int id;
int irq;
int slave_id;
@@ -393,6 +394,8 @@ static int tegra_dma_pause(struct tegra_dma_channel *tdc)
tegra_dma_dump_chan_regs(tdc);
}
+ tdc->status = DMA_PAUSED;
+
return ret;
}
@@ -419,6 +422,8 @@ static void tegra_dma_resume(struct tegra_dma_channel *tdc)
val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE);
val &= ~TEGRA_GPCDMA_CHAN_CSRE_PAUSE;
tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSRE, val);
+
+ tdc->status = DMA_IN_PROGRESS;
}
static int tegra_dma_device_resume(struct dma_chan *dc)
@@ -544,6 +549,7 @@ static void tegra_dma_xfer_complete(struct tegra_dma_channel *tdc)
tegra_dma_sid_free(tdc);
tdc->dma_desc = NULL;
+ tdc->status = DMA_COMPLETE;
}
static void tegra_dma_chan_decode_error(struct tegra_dma_channel *tdc,
@@ -716,6 +722,7 @@ static int tegra_dma_terminate_all(struct dma_chan *dc)
tdc->dma_desc = NULL;
}
+ tdc->status = DMA_COMPLETE;
tegra_dma_sid_free(tdc);
vchan_get_all_descriptors(&tdc->vc, &head);
spin_unlock_irqrestore(&tdc->vc.lock, flags);
@@ -769,6 +776,9 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
if (ret == DMA_COMPLETE)
return ret;
+ if (tdc->status == DMA_PAUSED)
+ ret = DMA_PAUSED;
+
spin_lock_irqsave(&tdc->vc.lock, flags);
vd = vchan_find_desc(&tdc->vc, cookie);
if (vd) {
diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c
index eb17d03b66fe..dfda5ffc14db 100644
--- a/drivers/firmware/arm_ffa/bus.c
+++ b/drivers/firmware/arm_ffa/bus.c
@@ -187,13 +187,18 @@ bool ffa_device_is_valid(struct ffa_device *ffa_dev)
return valid;
}
-struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
- const struct ffa_ops *ops)
+struct ffa_device *
+ffa_device_register(const struct ffa_partition_info *part_info,
+ const struct ffa_ops *ops)
{
int id, ret;
+ uuid_t uuid;
struct device *dev;
struct ffa_device *ffa_dev;
+ if (!part_info)
+ return NULL;
+
id = ida_alloc_min(&ffa_bus_id, 1, GFP_KERNEL);
if (id < 0)
return NULL;
@@ -210,9 +215,11 @@ struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
dev_set_name(&ffa_dev->dev, "arm-ffa-%d", id);
ffa_dev->id = id;
- ffa_dev->vm_id = vm_id;
+ ffa_dev->vm_id = part_info->id;
+ ffa_dev->properties = part_info->properties;
ffa_dev->ops = ops;
- uuid_copy(&ffa_dev->uuid, uuid);
+ import_uuid(&uuid, (u8 *)part_info->uuid);
+ uuid_copy(&ffa_dev->uuid, &uuid);
ret = device_register(&ffa_dev->dev);
if (ret) {
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index b14cbdae94e8..2c2ec3c35f15 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -1387,7 +1387,6 @@ static struct notifier_block ffa_bus_nb = {
static int ffa_setup_partitions(void)
{
int count, idx, ret;
- uuid_t uuid;
struct ffa_device *ffa_dev;
struct ffa_dev_part_info *info;
struct ffa_partition_info *pbuf, *tpbuf;
@@ -1406,23 +1405,19 @@ static int ffa_setup_partitions(void)
xa_init(&drv_info->partition_info);
for (idx = 0, tpbuf = pbuf; idx < count; idx++, tpbuf++) {
- import_uuid(&uuid, (u8 *)tpbuf->uuid);
-
/* Note that if the UUID will be uuid_null, that will require
* ffa_bus_notifier() to find the UUID of this partition id
* with help of ffa_device_match_uuid(). FF-A v1.1 and above
* provides UUID here for each partition as part of the
* discovery API and the same is passed.
*/
- ffa_dev = ffa_device_register(&uuid, tpbuf->id, &ffa_drv_ops);
+ ffa_dev = ffa_device_register(tpbuf, &ffa_drv_ops);
if (!ffa_dev) {
pr_err("%s: failed to register partition ID 0x%x\n",
__func__, tpbuf->id);
continue;
}
- ffa_dev->properties = tpbuf->properties;
-
if (drv_info->version > FFA_VERSION_1_0 &&
!(tpbuf->properties & FFA_PARTITION_AARCH64_EXEC))
ffa_mode_32bit_set(ffa_dev);
diff --git a/drivers/firmware/arm_scmi/vendors/imx/Kconfig b/drivers/firmware/arm_scmi/vendors/imx/Kconfig
index 2883ed24a84d..a01bf5e47301 100644
--- a/drivers/firmware/arm_scmi/vendors/imx/Kconfig
+++ b/drivers/firmware/arm_scmi/vendors/imx/Kconfig
@@ -15,6 +15,7 @@ config IMX_SCMI_BBM_EXT
config IMX_SCMI_MISC_EXT
tristate "i.MX SCMI MISC EXTENSION"
depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF)
+ depends on IMX_SCMI_MISC_DRV
default y if ARCH_MXC
help
This enables i.MX System MISC control logic such as gpio expander
diff --git a/drivers/firmware/cirrus/Kconfig b/drivers/firmware/cirrus/Kconfig
index 3ccbe14e4b0c..ee09269c63b5 100644
--- a/drivers/firmware/cirrus/Kconfig
+++ b/drivers/firmware/cirrus/Kconfig
@@ -3,3 +3,23 @@
config FW_CS_DSP
tristate
default n
+
+config FW_CS_DSP_KUNIT_TEST_UTILS
+ tristate
+ depends on KUNIT
+ select REGMAP
+ select FW_CS_DSP
+
+config FW_CS_DSP_KUNIT_TEST
+ tristate "KUnit tests for Cirrus Logic cs_dsp" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ select REGMAP
+ select FW_CS_DSP
+ select FW_CS_DSP_KUNIT_TEST_UTILS
+ help
+ This builds KUnit tests for cs_dsp.
+ For more information on KUnit and unit tests in general,
+ please refer to the KUnit documentation in
+ Documentation/dev-tools/kunit/.
+ If in doubt, say "N".
diff --git a/drivers/firmware/cirrus/Makefile b/drivers/firmware/cirrus/Makefile
index b91318ca0ff4..b32dfa869491 100644
--- a/drivers/firmware/cirrus/Makefile
+++ b/drivers/firmware/cirrus/Makefile
@@ -1,3 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
#
obj-$(CONFIG_FW_CS_DSP) += cs_dsp.o
+
+obj-y += test/
diff --git a/drivers/firmware/cirrus/test/Makefile b/drivers/firmware/cirrus/test/Makefile
new file mode 100644
index 000000000000..7a24a6079ddc
--- /dev/null
+++ b/drivers/firmware/cirrus/test/Makefile
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+
+cs_dsp_test_utils-objs := \
+ cs_dsp_mock_mem_maps.o \
+ cs_dsp_mock_bin.o \
+ cs_dsp_mock_regmap.o \
+ cs_dsp_mock_utils.o \
+ cs_dsp_mock_wmfw.o
+
+cs_dsp_test-objs := \
+ cs_dsp_test_bin.o \
+ cs_dsp_test_bin_error.o \
+ cs_dsp_test_callbacks.o \
+ cs_dsp_test_control_parse.o \
+ cs_dsp_test_control_cache.o \
+ cs_dsp_test_control_rw.o \
+ cs_dsp_test_wmfw.o \
+ cs_dsp_test_wmfw_error.o \
+ cs_dsp_tests.o
+
+obj-$(CONFIG_FW_CS_DSP_KUNIT_TEST_UTILS) += cs_dsp_test_utils.o
+obj-$(CONFIG_FW_CS_DSP_KUNIT_TEST) += cs_dsp_test.o
diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_bin.c b/drivers/firmware/cirrus/test/cs_dsp_mock_bin.c
new file mode 100644
index 000000000000..49d84f7e59e6
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_mock_bin.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// bin file builder for cs_dsp KUnit tests.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/firmware.h>
+#include <linux/math.h>
+#include <linux/overflow.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+/* Buffer large enough for bin file content */
+#define CS_DSP_MOCK_BIN_BUF_SIZE 32768
+
+KUNIT_DEFINE_ACTION_WRAPPER(vfree_action_wrapper, vfree, void *)
+
+struct cs_dsp_mock_bin_builder {
+ struct cs_dsp_test *test_priv;
+ void *buf;
+ void *write_p;
+ size_t bytes_used;
+};
+
+/**
+ * cs_dsp_mock_bin_get_firmware() - Get struct firmware wrapper for data.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ *
+ * Return: Pointer to a struct firmware wrapper for the data.
+ */
+struct firmware *cs_dsp_mock_bin_get_firmware(struct cs_dsp_mock_bin_builder *builder)
+{
+ struct firmware *fw;
+
+ fw = kunit_kzalloc(builder->test_priv->test, sizeof(*fw), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(builder->test_priv->test, fw);
+
+ fw->data = builder->buf;
+ fw->size = builder->bytes_used;
+
+ return fw;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_bin_get_firmware, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_bin_add_raw_block() - Add a data block to the bin file.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ * @alg_id: Algorithm ID.
+ * @alg_ver: Algorithm version.
+ * @type: Type of the block.
+ * @offset: Offset.
+ * @payload_data: Pointer to buffer containing the payload data.
+ * @payload_len_bytes: Length of payload data in bytes.
+ */
+void cs_dsp_mock_bin_add_raw_block(struct cs_dsp_mock_bin_builder *builder,
+ unsigned int alg_id, unsigned int alg_ver,
+ int type, unsigned int offset,
+ const void *payload_data, size_t payload_len_bytes)
+{
+ struct wmfw_coeff_item *item;
+ size_t bytes_needed = struct_size_t(struct wmfw_coeff_item, data, payload_len_bytes);
+
+ KUNIT_ASSERT_TRUE(builder->test_priv->test,
+ (builder->write_p + bytes_needed) <
+ (builder->buf + CS_DSP_MOCK_BIN_BUF_SIZE));
+
+ item = builder->write_p;
+
+ item->offset = cpu_to_le16(offset);
+ item->type = cpu_to_le16(type);
+ item->id = cpu_to_le32(alg_id);
+ item->ver = cpu_to_le32(alg_ver << 8);
+ item->len = cpu_to_le32(payload_len_bytes);
+
+ if (payload_len_bytes)
+ memcpy(item->data, payload_data, payload_len_bytes);
+
+ builder->write_p += bytes_needed;
+ builder->bytes_used += bytes_needed;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_bin_add_raw_block, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+static void cs_dsp_mock_bin_add_name_or_info(struct cs_dsp_mock_bin_builder *builder,
+ const char *info, int type)
+{
+ size_t info_len = strlen(info);
+ char *tmp = NULL;
+
+ if (info_len % 4) {
+ /* Create a padded string with length a multiple of 4 */
+ info_len = round_up(info_len, 4);
+ tmp = kunit_kzalloc(builder->test_priv->test, info_len, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(builder->test_priv->test, tmp);
+ memcpy(tmp, info, info_len);
+ info = tmp;
+ }
+
+ cs_dsp_mock_bin_add_raw_block(builder, 0, 0, WMFW_INFO_TEXT, 0, info, info_len);
+ kunit_kfree(builder->test_priv->test, tmp);
+}
+
+/**
+ * cs_dsp_mock_bin_add_info() - Add an info block to the bin file.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ * @info: Pointer to info string to be copied into the file.
+ *
+ * The string will be padded to a length that is a multiple of 4 bytes.
+ */
+void cs_dsp_mock_bin_add_info(struct cs_dsp_mock_bin_builder *builder,
+ const char *info)
+{
+ cs_dsp_mock_bin_add_name_or_info(builder, info, WMFW_INFO_TEXT);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_bin_add_info, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_bin_add_name() - Add a name block to the bin file.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ * @name: Pointer to name string to be copied into the file.
+ */
+void cs_dsp_mock_bin_add_name(struct cs_dsp_mock_bin_builder *builder,
+ const char *name)
+{
+ cs_dsp_mock_bin_add_name_or_info(builder, name, WMFW_NAME_TEXT);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_bin_add_name, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_bin_add_patch() - Add a patch data block to the bin file.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ * @alg_id: Algorithm ID for the patch.
+ * @alg_ver: Algorithm version for the patch.
+ * @mem_region: Memory region for the patch.
+ * @reg_addr_offset: Offset to start of data in register addresses.
+ * @payload_data: Pointer to buffer containing the payload data.
+ * @payload_len_bytes: Length of payload data in bytes.
+ */
+void cs_dsp_mock_bin_add_patch(struct cs_dsp_mock_bin_builder *builder,
+ unsigned int alg_id, unsigned int alg_ver,
+ int mem_region, unsigned int reg_addr_offset,
+ const void *payload_data, size_t payload_len_bytes)
+{
+ /* Payload length must be a multiple of 4 */
+ KUNIT_ASSERT_EQ(builder->test_priv->test, payload_len_bytes % 4, 0);
+
+ cs_dsp_mock_bin_add_raw_block(builder, alg_id, alg_ver,
+ mem_region, reg_addr_offset,
+ payload_data, payload_len_bytes);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_bin_add_patch, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_bin_init() - Initialize a struct cs_dsp_mock_bin_builder.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @format_version: Required bin format version.
+ * @fw_version: Firmware version to put in bin file.
+ *
+ * Return: Pointer to created struct cs_dsp_mock_bin_builder.
+ */
+struct cs_dsp_mock_bin_builder *cs_dsp_mock_bin_init(struct cs_dsp_test *priv,
+ int format_version,
+ unsigned int fw_version)
+{
+ struct cs_dsp_mock_bin_builder *builder;
+ struct wmfw_coeff_hdr *hdr;
+
+ builder = kunit_kzalloc(priv->test, sizeof(*builder), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(priv->test, builder);
+ builder->test_priv = priv;
+
+ builder->buf = vmalloc(CS_DSP_MOCK_BIN_BUF_SIZE);
+ KUNIT_ASSERT_NOT_NULL(priv->test, builder->buf);
+ kunit_add_action_or_reset(priv->test, vfree_action_wrapper, builder->buf);
+
+ /* Create header */
+ hdr = builder->buf;
+ memcpy(hdr->magic, "WMDR", sizeof(hdr->magic));
+ hdr->len = cpu_to_le32(offsetof(struct wmfw_coeff_hdr, data));
+ hdr->ver = cpu_to_le32(fw_version | (format_version << 24));
+ hdr->core_ver = cpu_to_le32(((u32)priv->dsp->type << 24) | priv->dsp->rev);
+
+ builder->write_p = hdr->data;
+ builder->bytes_used = offsetof(struct wmfw_coeff_hdr, data);
+
+ return builder;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_bin_init, "FW_CS_DSP_KUNIT_TEST_UTILS");
diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c b/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c
new file mode 100644
index 000000000000..161272e47bda
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_mock_mem_maps.c
@@ -0,0 +1,752 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Mock DSP memory maps for cs_dsp KUnit tests.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/test.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/math.h>
+
+const struct cs_dsp_region cs_dsp_mock_halo_dsp1_regions[] = {
+ { .type = WMFW_HALO_PM_PACKED, .base = 0x3800000 },
+ { .type = WMFW_HALO_XM_PACKED, .base = 0x2000000 },
+ { .type = WMFW_HALO_YM_PACKED, .base = 0x2C00000 },
+ { .type = WMFW_ADSP2_XM, .base = 0x2800000 },
+ { .type = WMFW_ADSP2_YM, .base = 0x3400000 },
+};
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_halo_dsp1_regions, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/* List of sizes in bytes, for each entry above */
+const unsigned int cs_dsp_mock_halo_dsp1_region_sizes[] = {
+ 0x5000, /* PM_PACKED */
+ 0x6000, /* XM_PACKED */
+ 0x47F4, /* YM_PACKED */
+ 0x8000, /* XM_UNPACKED_24 */
+ 0x5FF8, /* YM_UNPACKED_24 */
+
+ 0 /* terminator */
+};
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_halo_dsp1_region_sizes, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+const struct cs_dsp_region cs_dsp_mock_adsp2_32bit_dsp1_regions[] = {
+ { .type = WMFW_ADSP2_PM, .base = 0x080000 },
+ { .type = WMFW_ADSP2_XM, .base = 0x0a0000 },
+ { .type = WMFW_ADSP2_YM, .base = 0x0c0000 },
+ { .type = WMFW_ADSP2_ZM, .base = 0x0e0000 },
+};
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_adsp2_32bit_dsp1_regions, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/* List of sizes in bytes, for each entry above */
+const unsigned int cs_dsp_mock_adsp2_32bit_dsp1_region_sizes[] = {
+ 0x9000, /* PM */
+ 0xa000, /* ZM */
+ 0x2000, /* XM */
+ 0x2000, /* YM */
+
+ 0 /* terminator */
+};
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+const struct cs_dsp_region cs_dsp_mock_adsp2_16bit_dsp1_regions[] = {
+ { .type = WMFW_ADSP2_PM, .base = 0x100000 },
+ { .type = WMFW_ADSP2_ZM, .base = 0x180000 },
+ { .type = WMFW_ADSP2_XM, .base = 0x190000 },
+ { .type = WMFW_ADSP2_YM, .base = 0x1a8000 },
+};
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_adsp2_16bit_dsp1_regions, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/* List of sizes in bytes, for each entry above */
+const unsigned int cs_dsp_mock_adsp2_16bit_dsp1_region_sizes[] = {
+ 0x6000, /* PM */
+ 0x800, /* ZM */
+ 0x800, /* XM */
+ 0x800, /* YM */
+
+ 0 /* terminator */
+};
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+int cs_dsp_mock_count_regions(const unsigned int *region_sizes)
+{
+ int i;
+
+ for (i = 0; region_sizes[i]; ++i)
+ ;
+
+ return i;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_count_regions, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_size_of_region() - Return size of given memory region.
+ *
+ * @dsp: Pointer to struct cs_dsp.
+ * @mem_type: Memory region type.
+ *
+ * Return: Size of region in bytes.
+ */
+unsigned int cs_dsp_mock_size_of_region(const struct cs_dsp *dsp, int mem_type)
+{
+ const unsigned int *sizes;
+ int i;
+
+ if (dsp->mem == cs_dsp_mock_halo_dsp1_regions)
+ sizes = cs_dsp_mock_halo_dsp1_region_sizes;
+ else if (dsp->mem == cs_dsp_mock_adsp2_32bit_dsp1_regions)
+ sizes = cs_dsp_mock_adsp2_32bit_dsp1_region_sizes;
+ else if (dsp->mem == cs_dsp_mock_adsp2_16bit_dsp1_regions)
+ sizes = cs_dsp_mock_adsp2_16bit_dsp1_region_sizes;
+ else
+ return 0;
+
+ for (i = 0; i < dsp->num_mems; ++i) {
+ if (dsp->mem[i].type == mem_type)
+ return sizes[i];
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_size_of_region, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_base_addr_for_mem() - Base register address for memory region.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @mem_type: Memory region type.
+ *
+ * Return: Base register address of region.
+ */
+unsigned int cs_dsp_mock_base_addr_for_mem(struct cs_dsp_test *priv, int mem_type)
+{
+ int num_mems = priv->dsp->num_mems;
+ const struct cs_dsp_region *region = priv->dsp->mem;
+ int i;
+
+ for (i = 0; i < num_mems; ++i) {
+ if (region[i].type == mem_type)
+ return region[i].base;
+ }
+
+ KUNIT_FAIL(priv->test, "Unexpected region %d\n", mem_type);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_base_addr_for_mem, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_reg_addr_inc_per_unpacked_word() - Unpacked register address increment per DSP word.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ *
+ * Return: Amount by which register address increments to move to the next
+ * DSP word in unpacked XM/YM/ZM.
+ */
+unsigned int cs_dsp_mock_reg_addr_inc_per_unpacked_word(struct cs_dsp_test *priv)
+{
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ return 2; /* two 16-bit register indexes per XM/YM/ZM word */
+ case WMFW_HALO:
+ return 4; /* one byte-addressed 32-bit register per XM/YM/ZM word */
+ default:
+ KUNIT_FAIL(priv->test, "Unexpected DSP type\n");
+ return -1;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_reg_addr_inc_per_unpacked_word, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_reg_block_length_bytes() - Number of bytes in an access block.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @mem_type: Memory region type.
+ *
+ * Return: Total number of bytes in a group of registers forming the
+ * smallest bus access size (including any padding bits). For unpacked
+ * memory this is the number of registers containing one DSP word.
+ * For packed memory this is the number of registers in one packed
+ * access block.
+ */
+unsigned int cs_dsp_mock_reg_block_length_bytes(struct cs_dsp_test *priv, int mem_type)
+{
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ switch (mem_type) {
+ case WMFW_ADSP2_PM:
+ return 3 * regmap_get_val_bytes(priv->dsp->regmap);
+ case WMFW_ADSP2_XM:
+ case WMFW_ADSP2_YM:
+ case WMFW_ADSP2_ZM:
+ return sizeof(u32);
+ default:
+ break;
+ }
+ break;
+ case WMFW_HALO:
+ switch (mem_type) {
+ case WMFW_ADSP2_XM:
+ case WMFW_ADSP2_YM:
+ return sizeof(u32);
+ case WMFW_HALO_PM_PACKED:
+ return 5 * sizeof(u32);
+ case WMFW_HALO_XM_PACKED:
+ case WMFW_HALO_YM_PACKED:
+ return 3 * sizeof(u32);
+ default:
+ break;
+ }
+ break;
+ default:
+ KUNIT_FAIL(priv->test, "Unexpected DSP type\n");
+ return 0;
+ }
+
+ KUNIT_FAIL(priv->test, "Unexpected mem type\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_reg_block_length_bytes, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_reg_block_length_registers() - Number of registers in an access block.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @mem_type: Memory region type.
+ *
+ * Return: Total number of register forming the smallest bus access size.
+ * For unpacked memory this is the number of registers containing one
+ * DSP word. For packed memory this is the number of registers in one
+ * packed access block.
+ */
+unsigned int cs_dsp_mock_reg_block_length_registers(struct cs_dsp_test *priv, int mem_type)
+{
+ return cs_dsp_mock_reg_block_length_bytes(priv, mem_type) /
+ regmap_get_val_bytes(priv->dsp->regmap);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_reg_block_length_registers, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_reg_block_length_dsp_words() - Number of dsp_words in an access block.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @mem_type: Memory region type.
+ *
+ * Return: Total number of DSP words in a group of registers forming the
+ * smallest bus access size.
+ */
+unsigned int cs_dsp_mock_reg_block_length_dsp_words(struct cs_dsp_test *priv, int mem_type)
+{
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ switch (mem_type) {
+ case WMFW_ADSP2_PM:
+ return regmap_get_val_bytes(priv->dsp->regmap) / 2;
+ case WMFW_ADSP2_XM:
+ case WMFW_ADSP2_YM:
+ case WMFW_ADSP2_ZM:
+ return 1;
+ default:
+ break;
+ }
+ break;
+ case WMFW_HALO:
+ switch (mem_type) {
+ case WMFW_ADSP2_XM:
+ case WMFW_ADSP2_YM:
+ return 1;
+ case WMFW_HALO_PM_PACKED:
+ case WMFW_HALO_XM_PACKED:
+ case WMFW_HALO_YM_PACKED:
+ return 4;
+ default:
+ break;
+ }
+ break;
+ default:
+ KUNIT_FAIL(priv->test, "Unexpected DSP type\n");
+ return 0;
+ }
+
+ KUNIT_FAIL(priv->test, "Unexpected mem type\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_reg_block_length_dsp_words, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_has_zm() - DSP has ZM
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ *
+ * Return: True if DSP has ZM.
+ */
+bool cs_dsp_mock_has_zm(struct cs_dsp_test *priv)
+{
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ return true;
+ default:
+ return false;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_has_zm, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_packed_to_unpacked_mem_type() - Unpacked region that is
+ * the same memory as a packed region.
+ *
+ * @packed_mem_type: Type of packed memory region.
+ *
+ * Return: unpacked type that is the same memory as packed_mem_type.
+ */
+int cs_dsp_mock_packed_to_unpacked_mem_type(int packed_mem_type)
+{
+ switch (packed_mem_type) {
+ case WMFW_HALO_XM_PACKED:
+ return WMFW_ADSP2_XM;
+ case WMFW_HALO_YM_PACKED:
+ return WMFW_ADSP2_YM;
+ default:
+ return -1;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_packed_to_unpacked_mem_type, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_num_dsp_words_to_num_packed_regs() - Number of DSP words
+ * to number of packed registers.
+ *
+ * @num_dsp_words: Number of DSP words.
+ *
+ * Convert number of DSP words to number of packed registers rounded
+ * down to the nearest register.
+ *
+ * Return: Number of packed registers.
+ */
+unsigned int cs_dsp_mock_num_dsp_words_to_num_packed_regs(unsigned int num_dsp_words)
+{
+ /* There are 3 registers for every 4 packed words */
+ return (num_dsp_words * 3) / 4;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_num_dsp_words_to_num_packed_regs, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+static const struct wmfw_halo_id_hdr cs_dsp_mock_halo_xm_hdr = {
+ .fw = {
+ .core_id = cpu_to_be32(WMFW_HALO << 16),
+ .block_rev = cpu_to_be32(3 << 16),
+ .vendor_id = cpu_to_be32(0x2),
+ .id = cpu_to_be32(0xabcdef),
+ .ver = cpu_to_be32(0x090101),
+ },
+
+ /*
+ * Leave enough space for this header and 40 algorithm descriptors.
+ * base and size are counted in DSP words.
+ */
+ .xm_base = cpu_to_be32(((sizeof(struct wmfw_halo_id_hdr) +
+ (40 * sizeof(struct wmfw_halo_alg_hdr)))
+ / 4) * 3),
+ .xm_size = cpu_to_be32(0x20),
+
+ /* Allocate a dummy word of YM */
+ .ym_base = cpu_to_be32(0),
+ .ym_size = cpu_to_be32(1),
+
+ .n_algs = 0,
+};
+
+static const struct wmfw_adsp2_id_hdr cs_dsp_mock_adsp2_xm_hdr = {
+ .fw = {
+ .core_id = cpu_to_be32(WMFW_ADSP2 << 16),
+ .core_rev = cpu_to_be32(2 << 16),
+ .id = cpu_to_be32(0xabcdef),
+ .ver = cpu_to_be32(0x090101),
+ },
+
+ /*
+ * Leave enough space for this header and 40 algorithm descriptors.
+ * base and size are counted in DSP words.
+ */
+ .xm = cpu_to_be32(((sizeof(struct wmfw_adsp2_id_hdr) +
+ (40 * sizeof(struct wmfw_adsp2_alg_hdr)))
+ / 4) * 3),
+
+ .ym = cpu_to_be32(0),
+ .zm = cpu_to_be32(0),
+
+ .n_algs = 0,
+};
+
+/**
+ * cs_dsp_mock_xm_header_get_alg_base_in_words() - Algorithm base offset in DSP words.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @alg_id: Algorithm ID.
+ * @mem_type: Memory region type.
+ *
+ * Lookup an algorithm in the XM header and return the base offset in
+ * DSP words of the algorithm data in the requested memory region.
+ *
+ * Return: Offset in DSP words.
+ */
+unsigned int cs_dsp_mock_xm_header_get_alg_base_in_words(struct cs_dsp_test *priv,
+ unsigned int alg_id,
+ int mem_type)
+{
+ unsigned int xm = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM);
+ union {
+ struct wmfw_adsp2_alg_hdr adsp2;
+ struct wmfw_halo_alg_hdr halo;
+ } alg;
+ unsigned int alg_hdr_addr;
+ unsigned int val, xm_base = 0, ym_base = 0, zm_base = 0;
+ int ret;
+
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ alg_hdr_addr = xm + (sizeof(struct wmfw_adsp2_id_hdr) / 2);
+ for (;; alg_hdr_addr += sizeof(alg.adsp2) / 2) {
+ ret = regmap_read(priv->dsp->regmap, alg_hdr_addr, &val);
+ KUNIT_ASSERT_GE(priv->test, ret, 0);
+ KUNIT_ASSERT_NE(priv->test, val, 0xbedead);
+ ret = regmap_raw_read(priv->dsp->regmap, alg_hdr_addr,
+ &alg.adsp2, sizeof(alg.adsp2));
+ KUNIT_ASSERT_GE(priv->test, ret, 0);
+ if (be32_to_cpu(alg.adsp2.alg.id) == alg_id) {
+ xm_base = be32_to_cpu(alg.adsp2.xm);
+ ym_base = be32_to_cpu(alg.adsp2.ym);
+ zm_base = be32_to_cpu(alg.adsp2.zm);
+ break;
+ }
+ }
+ break;
+ case WMFW_HALO:
+ alg_hdr_addr = xm + sizeof(struct wmfw_halo_id_hdr);
+ for (;; alg_hdr_addr += sizeof(alg.halo)) {
+ ret = regmap_read(priv->dsp->regmap, alg_hdr_addr, &val);
+ KUNIT_ASSERT_GE(priv->test, ret, 0);
+ KUNIT_ASSERT_NE(priv->test, val, 0xbedead);
+ ret = regmap_raw_read(priv->dsp->regmap, alg_hdr_addr,
+ &alg.halo, sizeof(alg.halo));
+ KUNIT_ASSERT_GE(priv->test, ret, 0);
+ if (be32_to_cpu(alg.halo.alg.id) == alg_id) {
+ xm_base = be32_to_cpu(alg.halo.xm_base);
+ ym_base = be32_to_cpu(alg.halo.ym_base);
+ break;
+ }
+ }
+ break;
+ default:
+ KUNIT_FAIL(priv->test, "Unexpected DSP type %d\n", priv->dsp->type);
+ return 0;
+ }
+
+ switch (mem_type) {
+ case WMFW_ADSP2_XM:
+ case WMFW_HALO_XM_PACKED:
+ return xm_base;
+ case WMFW_ADSP2_YM:
+ case WMFW_HALO_YM_PACKED:
+ return ym_base;
+ case WMFW_ADSP2_ZM:
+ return zm_base;
+ default:
+ KUNIT_FAIL(priv->test, "Bad mem_type\n");
+ return 0;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_xm_header_get_alg_base_in_words, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_xm_header_get_fw_version_from_regmap() - Firmware version.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ *
+ * Return: Firmware version word value.
+ */
+unsigned int cs_dsp_mock_xm_header_get_fw_version_from_regmap(struct cs_dsp_test *priv)
+{
+ unsigned int xm = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM);
+ union {
+ struct wmfw_id_hdr adsp2;
+ struct wmfw_v3_id_hdr halo;
+ } hdr;
+
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ regmap_raw_read(priv->dsp->regmap, xm, &hdr.adsp2, sizeof(hdr.adsp2));
+ return be32_to_cpu(hdr.adsp2.ver);
+ case WMFW_HALO:
+ regmap_raw_read(priv->dsp->regmap, xm, &hdr.halo, sizeof(hdr.halo));
+ return be32_to_cpu(hdr.halo.ver);
+ default:
+ KUNIT_FAIL(priv->test, NULL);
+ return 0;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_xm_header_get_fw_version_from_regmap,
+ "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_xm_header_get_fw_version() - Firmware version.
+ *
+ * @header: Pointer to struct cs_dsp_mock_xm_header.
+ *
+ * Return: Firmware version word value.
+ */
+unsigned int cs_dsp_mock_xm_header_get_fw_version(struct cs_dsp_mock_xm_header *header)
+{
+ const struct wmfw_id_hdr *adsp2_hdr;
+ const struct wmfw_v3_id_hdr *halo_hdr;
+
+ switch (header->test_priv->dsp->type) {
+ case WMFW_ADSP2:
+ adsp2_hdr = header->blob_data;
+ return be32_to_cpu(adsp2_hdr->ver);
+ case WMFW_HALO:
+ halo_hdr = header->blob_data;
+ return be32_to_cpu(halo_hdr->ver);
+ default:
+ KUNIT_FAIL(header->test_priv->test, NULL);
+ return 0;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_xm_header_get_fw_version, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_xm_header_drop_from_regmap_cache() - Drop XM header from regmap cache.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ */
+void cs_dsp_mock_xm_header_drop_from_regmap_cache(struct cs_dsp_test *priv)
+{
+ unsigned int xm = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM);
+ unsigned int bytes;
+ __be32 num_algs_be32;
+ unsigned int num_algs;
+
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ /*
+ * Could be one 32-bit register or two 16-bit registers.
+ * A raw read will read the requested number of bytes.
+ */
+ regmap_raw_read(priv->dsp->regmap,
+ xm + (offsetof(struct wmfw_adsp2_id_hdr, n_algs) / 2),
+ &num_algs_be32, sizeof(num_algs_be32));
+ num_algs = be32_to_cpu(num_algs_be32);
+ bytes = sizeof(struct wmfw_adsp2_id_hdr) +
+ (num_algs * sizeof(struct wmfw_adsp2_alg_hdr)) +
+ 4 /* terminator word */;
+
+ regcache_drop_region(priv->dsp->regmap, xm, xm + (bytes / 2) - 1);
+ break;
+ case WMFW_HALO:
+ regmap_read(priv->dsp->regmap,
+ xm + offsetof(struct wmfw_halo_id_hdr, n_algs),
+ &num_algs);
+ bytes = sizeof(struct wmfw_halo_id_hdr) +
+ (num_algs * sizeof(struct wmfw_halo_alg_hdr)) +
+ 4 /* terminator word */;
+
+ regcache_drop_region(priv->dsp->regmap, xm, xm + bytes - 4);
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_xm_header_drop_from_regmap_cache, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+static void cs_dsp_mock_xm_header_add_adsp2_algs(struct cs_dsp_mock_xm_header *builder,
+ const struct cs_dsp_mock_alg_def *algs,
+ size_t num_algs)
+{
+ struct wmfw_adsp2_id_hdr *hdr = builder->blob_data;
+ unsigned int next_free_xm_word, next_free_ym_word, next_free_zm_word;
+
+ next_free_xm_word = be32_to_cpu(hdr->xm);
+ next_free_ym_word = be32_to_cpu(hdr->ym);
+ next_free_zm_word = be32_to_cpu(hdr->zm);
+
+ /* Set num_algs in XM header. */
+ hdr->n_algs = cpu_to_be32(num_algs);
+
+ /* Create algorithm descriptor list */
+ struct wmfw_adsp2_alg_hdr *alg_info =
+ (struct wmfw_adsp2_alg_hdr *)(&hdr[1]);
+
+ for (; num_algs > 0; num_algs--, algs++, alg_info++) {
+ unsigned int alg_xm_last, alg_ym_last, alg_zm_last;
+
+ alg_info->alg.id = cpu_to_be32(algs->id);
+ alg_info->alg.ver = cpu_to_be32(algs->ver);
+ alg_info->xm = cpu_to_be32(algs->xm_base_words);
+ alg_info->ym = cpu_to_be32(algs->ym_base_words);
+ alg_info->zm = cpu_to_be32(algs->zm_base_words);
+
+ /* Check if we need to auto-allocate base addresses */
+ if (!alg_info->xm && algs->xm_size_words)
+ alg_info->xm = cpu_to_be32(next_free_xm_word);
+
+ if (!alg_info->ym && algs->ym_size_words)
+ alg_info->ym = cpu_to_be32(next_free_ym_word);
+
+ if (!alg_info->zm && algs->zm_size_words)
+ alg_info->zm = cpu_to_be32(next_free_zm_word);
+
+ alg_xm_last = be32_to_cpu(alg_info->xm) + algs->xm_size_words - 1;
+ if (alg_xm_last > next_free_xm_word)
+ next_free_xm_word = alg_xm_last;
+
+ alg_ym_last = be32_to_cpu(alg_info->ym) + algs->ym_size_words - 1;
+ if (alg_ym_last > next_free_ym_word)
+ next_free_ym_word = alg_ym_last;
+
+ alg_zm_last = be32_to_cpu(alg_info->zm) + algs->zm_size_words - 1;
+ if (alg_zm_last > next_free_zm_word)
+ next_free_zm_word = alg_zm_last;
+ }
+
+ /* Write list terminator */
+ *(__be32 *)(alg_info) = cpu_to_be32(0xbedead);
+}
+
+static void cs_dsp_mock_xm_header_add_halo_algs(struct cs_dsp_mock_xm_header *builder,
+ const struct cs_dsp_mock_alg_def *algs,
+ size_t num_algs)
+{
+ struct wmfw_halo_id_hdr *hdr = builder->blob_data;
+ unsigned int next_free_xm_word, next_free_ym_word;
+
+ /* Assume we're starting with bare header */
+ next_free_xm_word = be32_to_cpu(hdr->xm_base) + be32_to_cpu(hdr->xm_size) - 1;
+ next_free_ym_word = be32_to_cpu(hdr->ym_base) + be32_to_cpu(hdr->ym_size) - 1;
+
+ /* Set num_algs in XM header */
+ hdr->n_algs = cpu_to_be32(num_algs);
+
+ /* Create algorithm descriptor list */
+ struct wmfw_halo_alg_hdr *alg_info =
+ (struct wmfw_halo_alg_hdr *)(&hdr[1]);
+
+ for (; num_algs > 0; num_algs--, algs++, alg_info++) {
+ unsigned int alg_xm_last, alg_ym_last;
+
+ alg_info->alg.id = cpu_to_be32(algs->id);
+ alg_info->alg.ver = cpu_to_be32(algs->ver);
+ alg_info->xm_base = cpu_to_be32(algs->xm_base_words);
+ alg_info->xm_size = cpu_to_be32(algs->xm_size_words);
+ alg_info->ym_base = cpu_to_be32(algs->ym_base_words);
+ alg_info->ym_size = cpu_to_be32(algs->ym_size_words);
+
+ /* Check if we need to auto-allocate base addresses */
+ if (!alg_info->xm_base && alg_info->xm_size)
+ alg_info->xm_base = cpu_to_be32(next_free_xm_word);
+
+ if (!alg_info->ym_base && alg_info->ym_size)
+ alg_info->ym_base = cpu_to_be32(next_free_ym_word);
+
+ alg_xm_last = be32_to_cpu(alg_info->xm_base) + be32_to_cpu(alg_info->xm_size) - 1;
+ if (alg_xm_last > next_free_xm_word)
+ next_free_xm_word = alg_xm_last;
+
+ alg_ym_last = be32_to_cpu(alg_info->ym_base) + be32_to_cpu(alg_info->ym_size) - 1;
+ if (alg_ym_last > next_free_ym_word)
+ next_free_ym_word = alg_ym_last;
+ }
+
+ /* Write list terminator */
+ *(__be32 *)(alg_info) = cpu_to_be32(0xbedead);
+}
+
+/**
+ * cs_dsp_mock_xm_header_write_to_regmap() - Write XM header to regmap.
+ *
+ * @header: Pointer to struct cs_dsp_mock_xm_header.
+ *
+ * The data in header is written to the XM addresses in the regmap.
+ *
+ * Return: 0 on success, else negative error code.
+ */
+int cs_dsp_mock_xm_header_write_to_regmap(struct cs_dsp_mock_xm_header *header)
+{
+ struct cs_dsp_test *priv = header->test_priv;
+ unsigned int reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM);
+
+ /*
+ * One 32-bit word corresponds to one 32-bit unpacked XM word so the
+ * blob can be written directly to the regmap.
+ */
+ return regmap_raw_write(priv->dsp->regmap, reg_addr,
+ header->blob_data, header->blob_size_bytes);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_xm_header_write_to_regmap, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_create_mock_xm_header() - Create a dummy XM header.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @algs: Pointer to array of struct cs_dsp_mock_alg_def listing the
+ * dummy algorithm entries to include in the XM header.
+ * @num_algs: Number of entries in the algs array.
+ *
+ * Return: Pointer to created struct cs_dsp_mock_xm_header.
+ */
+struct cs_dsp_mock_xm_header *cs_dsp_create_mock_xm_header(struct cs_dsp_test *priv,
+ const struct cs_dsp_mock_alg_def *algs,
+ size_t num_algs)
+{
+ struct cs_dsp_mock_xm_header *builder;
+ size_t total_bytes_required;
+ const void *header;
+ size_t header_size_bytes;
+
+ builder = kunit_kzalloc(priv->test, sizeof(*builder), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(priv->test, builder);
+ builder->test_priv = priv;
+
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ header = &cs_dsp_mock_adsp2_xm_hdr;
+ header_size_bytes = sizeof(cs_dsp_mock_adsp2_xm_hdr);
+ total_bytes_required = header_size_bytes +
+ (num_algs * sizeof(struct wmfw_adsp2_alg_hdr))
+ + 4; /* terminator word */
+ break;
+ case WMFW_HALO:
+ header = &cs_dsp_mock_halo_xm_hdr,
+ header_size_bytes = sizeof(cs_dsp_mock_halo_xm_hdr);
+ total_bytes_required = header_size_bytes +
+ (num_algs * sizeof(struct wmfw_halo_alg_hdr))
+ + 4; /* terminator word */
+ break;
+ default:
+ KUNIT_FAIL(priv->test, "%s unexpected DSP type %d\n",
+ __func__, priv->dsp->type);
+ return NULL;
+ }
+
+ builder->blob_data = kunit_kzalloc(priv->test, total_bytes_required, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(priv->test, builder->blob_data);
+ builder->blob_size_bytes = total_bytes_required;
+
+ memcpy(builder->blob_data, header, header_size_bytes);
+
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ cs_dsp_mock_xm_header_add_adsp2_algs(builder, algs, num_algs);
+ break;
+ case WMFW_HALO:
+ cs_dsp_mock_xm_header_add_halo_algs(builder, algs, num_algs);
+ break;
+ default:
+ break;
+ }
+
+ return builder;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_create_mock_xm_header, "FW_CS_DSP_KUNIT_TEST_UTILS");
diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_regmap.c b/drivers/firmware/cirrus/test/cs_dsp_mock_regmap.c
new file mode 100644
index 000000000000..fb8e4a5d189a
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_mock_regmap.c
@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Mock regmap for cs_dsp KUnit tests.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/test.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/regmap.h>
+
+static int cs_dsp_mock_regmap_read(void *context, const void *reg_buf,
+ const size_t reg_size, void *val_buf,
+ size_t val_size)
+{
+ struct cs_dsp_test *priv = context;
+
+ /* Should never get here because the regmap is cache-only */
+ KUNIT_FAIL(priv->test, "Unexpected bus read @%#x", *(u32 *)reg_buf);
+
+ return -EIO;
+}
+
+static int cs_dsp_mock_regmap_gather_write(void *context,
+ const void *reg_buf, size_t reg_size,
+ const void *val_buf, size_t val_size)
+{
+ struct cs_dsp_test *priv = context;
+
+ priv->saw_bus_write = true;
+
+ /* Should never get here because the regmap is cache-only */
+ KUNIT_FAIL(priv->test, "Unexpected bus gather_write @%#x", *(u32 *)reg_buf);
+
+ return -EIO;
+}
+
+static int cs_dsp_mock_regmap_write(void *context, const void *val_buf, size_t val_size)
+{
+ struct cs_dsp_test *priv = context;
+
+ priv->saw_bus_write = true;
+
+ /* Should never get here because the regmap is cache-only */
+ KUNIT_FAIL(priv->test, "Unexpected bus write @%#x", *(u32 *)val_buf);
+
+ return -EIO;
+}
+
+static const struct regmap_bus cs_dsp_mock_regmap_bus = {
+ .read = cs_dsp_mock_regmap_read,
+ .write = cs_dsp_mock_regmap_write,
+ .gather_write = cs_dsp_mock_regmap_gather_write,
+ .reg_format_endian_default = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian_default = REGMAP_ENDIAN_LITTLE,
+};
+
+static const struct reg_default adsp2_32bit_register_defaults[] = {
+ { 0xffe00, 0x0000 }, /* CONTROL */
+ { 0xffe02, 0x0000 }, /* CLOCKING */
+ { 0xffe04, 0x0001 }, /* STATUS1: RAM_RDY=1 */
+ { 0xffe30, 0x0000 }, /* WDMW_CONFIG_1 */
+ { 0xffe32, 0x0000 }, /* WDMA_CONFIG_2 */
+ { 0xffe34, 0x0000 }, /* RDMA_CONFIG_1 */
+ { 0xffe40, 0x0000 }, /* SCRATCH_0_1 */
+ { 0xffe42, 0x0000 }, /* SCRATCH_2_3 */
+};
+
+static const struct regmap_range adsp2_32bit_registers[] = {
+ regmap_reg_range(0x80000, 0x88ffe), /* PM */
+ regmap_reg_range(0xa0000, 0xa9ffe), /* XM */
+ regmap_reg_range(0xc0000, 0xc1ffe), /* YM */
+ regmap_reg_range(0xe0000, 0xe1ffe), /* ZM */
+ regmap_reg_range(0xffe00, 0xffe7c), /* CORE CTRL */
+};
+
+const unsigned int cs_dsp_mock_adsp2_32bit_sysbase = 0xffe00;
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_adsp2_32bit_sysbase, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+static const struct regmap_access_table adsp2_32bit_rw = {
+ .yes_ranges = adsp2_32bit_registers,
+ .n_yes_ranges = ARRAY_SIZE(adsp2_32bit_registers),
+};
+
+static const struct regmap_config cs_dsp_mock_regmap_adsp2_32bit = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 2,
+ .reg_format_endian = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+ .wr_table = &adsp2_32bit_rw,
+ .rd_table = &adsp2_32bit_rw,
+ .max_register = 0xffe7c,
+ .reg_defaults = adsp2_32bit_register_defaults,
+ .num_reg_defaults = ARRAY_SIZE(adsp2_32bit_register_defaults),
+ .cache_type = REGCACHE_MAPLE,
+};
+
+static const struct reg_default adsp2_16bit_register_defaults[] = {
+ { 0x1100, 0x0000 }, /* CONTROL */
+ { 0x1101, 0x0000 }, /* CLOCKING */
+ { 0x1104, 0x0001 }, /* STATUS1: RAM_RDY=1 */
+ { 0x1130, 0x0000 }, /* WDMW_CONFIG_1 */
+ { 0x1131, 0x0000 }, /* WDMA_CONFIG_2 */
+ { 0x1134, 0x0000 }, /* RDMA_CONFIG_1 */
+ { 0x1140, 0x0000 }, /* SCRATCH_0 */
+ { 0x1141, 0x0000 }, /* SCRATCH_1 */
+ { 0x1142, 0x0000 }, /* SCRATCH_2 */
+ { 0x1143, 0x0000 }, /* SCRATCH_3 */
+};
+
+static const struct regmap_range adsp2_16bit_registers[] = {
+ regmap_reg_range(0x001100, 0x001143), /* CORE CTRL */
+ regmap_reg_range(0x100000, 0x105fff), /* PM */
+ regmap_reg_range(0x180000, 0x1807ff), /* ZM */
+ regmap_reg_range(0x190000, 0x1947ff), /* XM */
+ regmap_reg_range(0x1a8000, 0x1a97ff), /* YM */
+};
+
+const unsigned int cs_dsp_mock_adsp2_16bit_sysbase = 0x001100;
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_adsp2_16bit_sysbase, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+static const struct regmap_access_table adsp2_16bit_rw = {
+ .yes_ranges = adsp2_16bit_registers,
+ .n_yes_ranges = ARRAY_SIZE(adsp2_16bit_registers),
+};
+
+static const struct regmap_config cs_dsp_mock_regmap_adsp2_16bit = {
+ .reg_bits = 32,
+ .val_bits = 16,
+ .reg_stride = 1,
+ .reg_format_endian = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+ .wr_table = &adsp2_16bit_rw,
+ .rd_table = &adsp2_16bit_rw,
+ .max_register = 0x1a97ff,
+ .reg_defaults = adsp2_16bit_register_defaults,
+ .num_reg_defaults = ARRAY_SIZE(adsp2_16bit_register_defaults),
+ .cache_type = REGCACHE_MAPLE,
+};
+
+static const struct reg_default halo_register_defaults[] = {
+ /* CORE */
+ { 0x2b80010, 0 }, /* HALO_CORE_SOFT_RESET */
+ { 0x2b805c0, 0 }, /* HALO_SCRATCH1 */
+ { 0x2b805c8, 0 }, /* HALO_SCRATCH2 */
+ { 0x2b805d0, 0 }, /* HALO_SCRATCH3 */
+ { 0x2b805c8, 0 }, /* HALO_SCRATCH4 */
+ { 0x2bc1000, 0 }, /* HALO_CCM_CORE_CONTROL */
+ { 0x2bc7000, 0 }, /* HALO_WDT_CONTROL */
+
+ /* SYSINFO */
+ { 0x25e2040, 0 }, /* HALO_AHBM_WINDOW_DEBUG_0 */
+ { 0x25e2044, 0 }, /* HALO_AHBM_WINDOW_DEBUG_1 */
+};
+
+static const struct regmap_range halo_readable_registers[] = {
+ regmap_reg_range(0x2000000, 0x2005fff), /* XM_PACKED */
+ regmap_reg_range(0x25e0000, 0x25e004f), /* SYSINFO */
+ regmap_reg_range(0x25e2000, 0x25e2047), /* SYSINFO */
+ regmap_reg_range(0x2800000, 0x2807fff), /* XM */
+ regmap_reg_range(0x2b80000, 0x2bc700b), /* CORE CTRL */
+ regmap_reg_range(0x2c00000, 0x2c047f3), /* YM_PACKED */
+ regmap_reg_range(0x3400000, 0x3405ff7), /* YM */
+ regmap_reg_range(0x3800000, 0x3804fff), /* PM_PACKED */
+};
+
+static const struct regmap_range halo_writeable_registers[] = {
+ regmap_reg_range(0x2000000, 0x2005fff), /* XM_PACKED */
+ regmap_reg_range(0x2800000, 0x2807fff), /* XM */
+ regmap_reg_range(0x2b80000, 0x2bc700b), /* CORE CTRL */
+ regmap_reg_range(0x2c00000, 0x2c047f3), /* YM_PACKED */
+ regmap_reg_range(0x3400000, 0x3405ff7), /* YM */
+ regmap_reg_range(0x3800000, 0x3804fff), /* PM_PACKED */
+};
+
+const unsigned int cs_dsp_mock_halo_core_base = 0x2b80000;
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_halo_core_base, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+const unsigned int cs_dsp_mock_halo_sysinfo_base = 0x25e0000;
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_halo_sysinfo_base, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+static const struct regmap_access_table halo_readable = {
+ .yes_ranges = halo_readable_registers,
+ .n_yes_ranges = ARRAY_SIZE(halo_readable_registers),
+};
+
+static const struct regmap_access_table halo_writeable = {
+ .yes_ranges = halo_writeable_registers,
+ .n_yes_ranges = ARRAY_SIZE(halo_writeable_registers),
+};
+
+static const struct regmap_config cs_dsp_mock_regmap_halo = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .reg_format_endian = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+ .wr_table = &halo_writeable,
+ .rd_table = &halo_readable,
+ .max_register = 0x3804ffc,
+ .reg_defaults = halo_register_defaults,
+ .num_reg_defaults = ARRAY_SIZE(halo_register_defaults),
+ .cache_type = REGCACHE_MAPLE,
+};
+
+/**
+ * cs_dsp_mock_regmap_drop_range() - drop a range of registers from the cache.
+ *
+ * @priv: Pointer to struct cs_dsp_test object.
+ * @first_reg: Address of first register to drop.
+ * @last_reg: Address of last register to drop.
+ */
+void cs_dsp_mock_regmap_drop_range(struct cs_dsp_test *priv,
+ unsigned int first_reg, unsigned int last_reg)
+{
+ regcache_drop_region(priv->dsp->regmap, first_reg, last_reg);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_regmap_drop_range, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_regmap_drop_regs() - drop a number of registers from the cache.
+ *
+ * @priv: Pointer to struct cs_dsp_test object.
+ * @first_reg: Address of first register to drop.
+ * @num_regs: Number of registers to drop.
+ */
+void cs_dsp_mock_regmap_drop_regs(struct cs_dsp_test *priv,
+ unsigned int first_reg, size_t num_regs)
+{
+ int stride = regmap_get_reg_stride(priv->dsp->regmap);
+ unsigned int last = first_reg + (stride * (num_regs - 1));
+
+ cs_dsp_mock_regmap_drop_range(priv, first_reg, last);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_regmap_drop_regs, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_regmap_drop_bytes() - drop a number of bytes from the cache.
+ *
+ * @priv: Pointer to struct cs_dsp_test object.
+ * @first_reg: Address of first register to drop.
+ * @num_bytes: Number of bytes to drop from the cache. Will be rounded
+ * down to a whole number of registers. Trailing bytes that
+ * are not a multiple of the register size will not be dropped.
+ * (This is intended to help detect math errors in test code.)
+ */
+void cs_dsp_mock_regmap_drop_bytes(struct cs_dsp_test *priv,
+ unsigned int first_reg, size_t num_bytes)
+{
+ size_t num_regs = num_bytes / regmap_get_val_bytes(priv->dsp->regmap);
+
+ cs_dsp_mock_regmap_drop_regs(priv, first_reg, num_regs);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_regmap_drop_bytes, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_regmap_drop_system_regs() - Drop DSP system registers from the cache.
+ *
+ * @priv: Pointer to struct cs_dsp_test object.
+ *
+ * Drops all DSP system registers from the regmap cache.
+ */
+void cs_dsp_mock_regmap_drop_system_regs(struct cs_dsp_test *priv)
+{
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ if (priv->dsp->base) {
+ regcache_drop_region(priv->dsp->regmap,
+ priv->dsp->base,
+ priv->dsp->base + 0x7c);
+ }
+ return;
+ case WMFW_HALO:
+ if (priv->dsp->base) {
+ regcache_drop_region(priv->dsp->regmap,
+ priv->dsp->base,
+ priv->dsp->base + 0x47000);
+ }
+
+ /* sysinfo registers are read-only so don't drop them */
+ return;
+ default:
+ return;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_regmap_drop_system_regs, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_regmap_is_dirty() - Test for dirty registers in the cache.
+ *
+ * @priv: Pointer to struct cs_dsp_test object.
+ * @drop_system_regs: If true the DSP system regs will be dropped from
+ * the cache before checking for dirty.
+ *
+ * All registers that are expected to be written must have been dropped
+ * from the cache (DSP system registers can be dropped by passing
+ * drop_system_regs == true). If any unexpected registers were written
+ * there will still be dirty entries in the cache and a cache sync will
+ * cause a write.
+ *
+ * Returns: true if there were dirty entries, false if not.
+ */
+bool cs_dsp_mock_regmap_is_dirty(struct cs_dsp_test *priv, bool drop_system_regs)
+{
+ if (drop_system_regs)
+ cs_dsp_mock_regmap_drop_system_regs(priv);
+
+ priv->saw_bus_write = false;
+ regcache_cache_only(priv->dsp->regmap, false);
+ regcache_sync(priv->dsp->regmap);
+ regcache_cache_only(priv->dsp->regmap, true);
+
+ return priv->saw_bus_write;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_regmap_is_dirty, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_regmap_init() - Initialize a mock regmap.
+ *
+ * @priv: Pointer to struct cs_dsp_test object. This must have a
+ * valid pointer to a struct cs_dsp in which the type and
+ * rev fields are set to the type of DSP to be simulated.
+ *
+ * On success the priv->dsp->regmap will point to the created
+ * regmap instance.
+ *
+ * Return: zero on success, else negative error code.
+ */
+int cs_dsp_mock_regmap_init(struct cs_dsp_test *priv)
+{
+ const struct regmap_config *config;
+ int ret;
+
+ switch (priv->dsp->type) {
+ case WMFW_HALO:
+ config = &cs_dsp_mock_regmap_halo;
+ break;
+ case WMFW_ADSP2:
+ if (priv->dsp->rev == 0)
+ config = &cs_dsp_mock_regmap_adsp2_16bit;
+ else
+ config = &cs_dsp_mock_regmap_adsp2_32bit;
+ break;
+ default:
+ config = NULL;
+ break;
+ }
+
+ priv->dsp->regmap = devm_regmap_init(priv->dsp->dev,
+ &cs_dsp_mock_regmap_bus,
+ priv,
+ config);
+ if (IS_ERR(priv->dsp->regmap)) {
+ ret = PTR_ERR(priv->dsp->regmap);
+ kunit_err(priv->test, "Failed to allocate register map: %d\n", ret);
+ return ret;
+ }
+
+ /* Put regmap in cache-only so it accumulates the writes done by cs_dsp */
+ regcache_cache_only(priv->dsp->regmap, true);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_regmap_init, "FW_CS_DSP_KUNIT_TEST_UTILS");
diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_utils.c b/drivers/firmware/cirrus/test/cs_dsp_mock_utils.c
new file mode 100644
index 000000000000..cbd0bf72b7de
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_mock_utils.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Utility module for cs_dsp KUnit testing.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <linux/module.h>
+
+MODULE_DESCRIPTION("Utilities for Cirrus Logic DSP driver testing");
+MODULE_AUTHOR("Richard Fitzgerald <rf@opensource.cirrus.com>");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("FW_CS_DSP");
diff --git a/drivers/firmware/cirrus/test/cs_dsp_mock_wmfw.c b/drivers/firmware/cirrus/test/cs_dsp_mock_wmfw.c
new file mode 100644
index 000000000000..5a3ac03ac37f
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_mock_wmfw.c
@@ -0,0 +1,473 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// wmfw file builder for cs_dsp KUnit tests.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/firmware.h>
+#include <linux/math.h>
+#include <linux/overflow.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+/* Buffer large enough for bin file content */
+#define CS_DSP_MOCK_WMFW_BUF_SIZE 131072
+
+struct cs_dsp_mock_wmfw_builder {
+ struct cs_dsp_test *test_priv;
+ int format_version;
+ void *buf;
+ size_t buf_size_bytes;
+ void *write_p;
+ size_t bytes_used;
+
+ void *alg_data_header;
+ unsigned int num_coeffs;
+};
+
+struct wmfw_adsp2_halo_header {
+ struct wmfw_header header;
+ struct wmfw_adsp2_sizes sizes;
+ struct wmfw_footer footer;
+} __packed;
+
+struct wmfw_long_string {
+ __le16 len;
+ u8 data[] __nonstring __counted_by(len);
+} __packed;
+
+struct wmfw_short_string {
+ u8 len;
+ u8 data[] __nonstring __counted_by(len);
+} __packed;
+
+KUNIT_DEFINE_ACTION_WRAPPER(vfree_action_wrapper, vfree, void *)
+
+/**
+ * cs_dsp_mock_wmfw_format_version() - Return format version.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_wmfw_builder.
+ *
+ * Return: Format version.
+ */
+int cs_dsp_mock_wmfw_format_version(struct cs_dsp_mock_wmfw_builder *builder)
+{
+ return builder->format_version;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_format_version, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_wmfw_get_firmware() - Get struct firmware wrapper for data.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_wmfw_builder.
+ *
+ * Return: Pointer to a struct firmware wrapper for the data.
+ */
+struct firmware *cs_dsp_mock_wmfw_get_firmware(struct cs_dsp_mock_wmfw_builder *builder)
+{
+ struct firmware *fw;
+
+ if (!builder)
+ return NULL;
+
+ fw = kunit_kzalloc(builder->test_priv->test, sizeof(*fw), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(builder->test_priv->test, fw);
+
+ fw->data = builder->buf;
+ fw->size = builder->bytes_used;
+
+ return fw;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_get_firmware, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_wmfw_add_raw_block() - Add a block to the wmfw file.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ * @block_type: Block type.
+ * @offset: Offset.
+ * @payload_data: Pointer to buffer containing the payload data,
+ * or NULL if no data.
+ * @payload_len_bytes: Length of payload data in bytes, or zero.
+ */
+void cs_dsp_mock_wmfw_add_raw_block(struct cs_dsp_mock_wmfw_builder *builder,
+ int block_type, unsigned int offset,
+ const void *payload_data, size_t payload_len_bytes)
+{
+ struct wmfw_region *header = builder->write_p;
+ unsigned int bytes_needed = struct_size_t(struct wmfw_region, data, payload_len_bytes);
+
+ KUNIT_ASSERT_TRUE(builder->test_priv->test,
+ (builder->write_p + bytes_needed) <
+ (builder->buf + CS_DSP_MOCK_WMFW_BUF_SIZE));
+
+ header->offset = cpu_to_le32(offset | (block_type << 24));
+ header->len = cpu_to_le32(payload_len_bytes);
+ if (payload_len_bytes > 0)
+ memcpy(header->data, payload_data, payload_len_bytes);
+
+ builder->write_p += bytes_needed;
+ builder->bytes_used += bytes_needed;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_add_raw_block, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_wmfw_add_info() - Add an info block to the wmfw file.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ * @info: Pointer to info string to be copied into the file.
+ *
+ * The string will be padded to a length that is a multiple of 4 bytes.
+ */
+void cs_dsp_mock_wmfw_add_info(struct cs_dsp_mock_wmfw_builder *builder,
+ const char *info)
+{
+ size_t info_len = strlen(info);
+ char *tmp = NULL;
+
+ if (info_len % 4) {
+ /* Create a padded string with length a multiple of 4 */
+ info_len = round_up(info_len, 4);
+ tmp = kunit_kzalloc(builder->test_priv->test, info_len, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(builder->test_priv->test, tmp);
+ memcpy(tmp, info, info_len);
+ info = tmp;
+ }
+
+ cs_dsp_mock_wmfw_add_raw_block(builder, WMFW_INFO_TEXT, 0, info, info_len);
+ kunit_kfree(builder->test_priv->test, tmp);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_add_info, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+/**
+ * cs_dsp_mock_wmfw_add_data_block() - Add a data block to the wmfw file.
+ *
+ * @builder: Pointer to struct cs_dsp_mock_bin_builder.
+ * @mem_region: Memory region for the block.
+ * @mem_offset_dsp_words: Offset to start of destination in DSP words.
+ * @payload_data: Pointer to buffer containing the payload data.
+ * @payload_len_bytes: Length of payload data in bytes.
+ */
+void cs_dsp_mock_wmfw_add_data_block(struct cs_dsp_mock_wmfw_builder *builder,
+ int mem_region, unsigned int mem_offset_dsp_words,
+ const void *payload_data, size_t payload_len_bytes)
+{
+ /* Blob payload length must be a multiple of 4 */
+ KUNIT_ASSERT_EQ(builder->test_priv->test, payload_len_bytes % 4, 0);
+
+ cs_dsp_mock_wmfw_add_raw_block(builder, mem_region, mem_offset_dsp_words,
+ payload_data, payload_len_bytes);
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_add_data_block, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+void cs_dsp_mock_wmfw_start_alg_info_block(struct cs_dsp_mock_wmfw_builder *builder,
+ unsigned int alg_id,
+ const char *name,
+ const char *description)
+{
+ struct wmfw_region *rgn = builder->write_p;
+ struct wmfw_adsp_alg_data *v1;
+ struct wmfw_short_string *shortstring;
+ struct wmfw_long_string *longstring;
+ size_t bytes_needed, name_len, description_len;
+ int offset;
+
+ /* Bytes needed for region header */
+ bytes_needed = offsetof(struct wmfw_region, data);
+
+ builder->alg_data_header = builder->write_p;
+ builder->num_coeffs = 0;
+
+ switch (builder->format_version) {
+ case 0:
+ KUNIT_FAIL(builder->test_priv->test, "wmfwV0 does not have alg blocks\n");
+ return;
+ case 1:
+ bytes_needed += offsetof(struct wmfw_adsp_alg_data, data);
+ KUNIT_ASSERT_TRUE(builder->test_priv->test,
+ (builder->write_p + bytes_needed) <
+ (builder->buf + CS_DSP_MOCK_WMFW_BUF_SIZE));
+
+ memset(builder->write_p, 0, bytes_needed);
+
+ /* Create region header */
+ rgn->offset = cpu_to_le32(WMFW_ALGORITHM_DATA << 24);
+
+ /* Create algorithm entry */
+ v1 = (struct wmfw_adsp_alg_data *)&rgn->data[0];
+ v1->id = cpu_to_le32(alg_id);
+ if (name)
+ strscpy(v1->name, name, sizeof(v1->name));
+
+ if (description)
+ strscpy(v1->descr, description, sizeof(v1->descr));
+ break;
+ default:
+ name_len = 0;
+ description_len = 0;
+
+ if (name)
+ name_len = strlen(name);
+
+ if (description)
+ description_len = strlen(description);
+
+ bytes_needed += sizeof(__le32); /* alg id */
+ bytes_needed += round_up(name_len + sizeof(u8), sizeof(__le32));
+ bytes_needed += round_up(description_len + sizeof(__le16), sizeof(__le32));
+ bytes_needed += sizeof(__le32); /* coeff count */
+
+ KUNIT_ASSERT_TRUE(builder->test_priv->test,
+ (builder->write_p + bytes_needed) <
+ (builder->buf + CS_DSP_MOCK_WMFW_BUF_SIZE));
+
+ memset(builder->write_p, 0, bytes_needed);
+
+ /* Create region header */
+ rgn->offset = cpu_to_le32(WMFW_ALGORITHM_DATA << 24);
+
+ /* Create algorithm entry */
+ *(__force __le32 *)&rgn->data[0] = cpu_to_le32(alg_id);
+
+ shortstring = (struct wmfw_short_string *)&rgn->data[4];
+ shortstring->len = name_len;
+
+ if (name_len)
+ memcpy(shortstring->data, name, name_len);
+
+ /* Round up to next __le32 */
+ offset = round_up(4 + struct_size_t(struct wmfw_short_string, data, name_len),
+ sizeof(__le32));
+
+ longstring = (struct wmfw_long_string *)&rgn->data[offset];
+ longstring->len = cpu_to_le16(description_len);
+
+ if (description_len)
+ memcpy(longstring->data, description, description_len);
+ break;
+ }
+
+ builder->write_p += bytes_needed;
+ builder->bytes_used += bytes_needed;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_start_alg_info_block, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+void cs_dsp_mock_wmfw_add_coeff_desc(struct cs_dsp_mock_wmfw_builder *builder,
+ const struct cs_dsp_mock_coeff_def *def)
+{
+ struct wmfw_adsp_coeff_data *v1;
+ struct wmfw_short_string *shortstring;
+ struct wmfw_long_string *longstring;
+ size_t bytes_needed, shortname_len, fullname_len, description_len;
+ __le32 *ple32;
+
+ KUNIT_ASSERT_NOT_NULL(builder->test_priv->test, builder->alg_data_header);
+
+ switch (builder->format_version) {
+ case 0:
+ return;
+ case 1:
+ bytes_needed = offsetof(struct wmfw_adsp_coeff_data, data);
+ KUNIT_ASSERT_TRUE(builder->test_priv->test,
+ (builder->write_p + bytes_needed) <
+ (builder->buf + CS_DSP_MOCK_WMFW_BUF_SIZE));
+
+ v1 = (struct wmfw_adsp_coeff_data *)builder->write_p;
+ memset(v1, 0, sizeof(*v1));
+ v1->hdr.offset = cpu_to_le16(def->offset_dsp_words);
+ v1->hdr.type = cpu_to_le16(def->mem_type);
+ v1->hdr.size = cpu_to_le32(bytes_needed - sizeof(v1->hdr));
+ v1->ctl_type = cpu_to_le16(def->type);
+ v1->flags = cpu_to_le16(def->flags);
+ v1->len = cpu_to_le32(def->length_bytes);
+
+ if (def->fullname)
+ strscpy(v1->name, def->fullname, sizeof(v1->name));
+
+ if (def->description)
+ strscpy(v1->descr, def->description, sizeof(v1->descr));
+ break;
+ default:
+ fullname_len = 0;
+ description_len = 0;
+ shortname_len = strlen(def->shortname);
+
+ if (def->fullname)
+ fullname_len = strlen(def->fullname);
+
+ if (def->description)
+ description_len = strlen(def->description);
+
+ bytes_needed = sizeof(__le32) * 2; /* type, offset and size */
+ bytes_needed += round_up(shortname_len + sizeof(u8), sizeof(__le32));
+ bytes_needed += round_up(fullname_len + sizeof(u8), sizeof(__le32));
+ bytes_needed += round_up(description_len + sizeof(__le16), sizeof(__le32));
+ bytes_needed += sizeof(__le32) * 2; /* flags, type and length */
+ KUNIT_ASSERT_TRUE(builder->test_priv->test,
+ (builder->write_p + bytes_needed) <
+ (builder->buf + CS_DSP_MOCK_WMFW_BUF_SIZE));
+
+ ple32 = (__force __le32 *)builder->write_p;
+ *ple32++ = cpu_to_le32(def->offset_dsp_words | (def->mem_type << 16));
+ *ple32++ = cpu_to_le32(bytes_needed - sizeof(__le32) - sizeof(__le32));
+
+ shortstring = (__force struct wmfw_short_string *)ple32;
+ shortstring->len = shortname_len;
+ memcpy(shortstring->data, def->shortname, shortname_len);
+
+ /* Round up to next __le32 multiple */
+ ple32 += round_up(struct_size_t(struct wmfw_short_string, data, shortname_len),
+ sizeof(*ple32)) / sizeof(*ple32);
+
+ shortstring = (__force struct wmfw_short_string *)ple32;
+ shortstring->len = fullname_len;
+ memcpy(shortstring->data, def->fullname, fullname_len);
+
+ /* Round up to next __le32 multiple */
+ ple32 += round_up(struct_size_t(struct wmfw_short_string, data, fullname_len),
+ sizeof(*ple32)) / sizeof(*ple32);
+
+ longstring = (__force struct wmfw_long_string *)ple32;
+ longstring->len = cpu_to_le16(description_len);
+ memcpy(longstring->data, def->description, description_len);
+
+ /* Round up to next __le32 multiple */
+ ple32 += round_up(struct_size_t(struct wmfw_long_string, data, description_len),
+ sizeof(*ple32)) / sizeof(*ple32);
+
+ *ple32++ = cpu_to_le32(def->type | (def->flags << 16));
+ *ple32 = cpu_to_le32(def->length_bytes);
+ break;
+ }
+
+ builder->write_p += bytes_needed;
+ builder->bytes_used += bytes_needed;
+ builder->num_coeffs++;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_add_coeff_desc, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+void cs_dsp_mock_wmfw_end_alg_info_block(struct cs_dsp_mock_wmfw_builder *builder)
+{
+ struct wmfw_region *rgn = builder->alg_data_header;
+ struct wmfw_adsp_alg_data *v1;
+ const struct wmfw_short_string *shortstring;
+ const struct wmfw_long_string *longstring;
+ size_t offset;
+
+ KUNIT_ASSERT_NOT_NULL(builder->test_priv->test, rgn);
+
+ /* Fill in data size */
+ rgn->len = cpu_to_le32((u8 *)builder->write_p - (u8 *)rgn->data);
+
+ /* Fill in coefficient count */
+ switch (builder->format_version) {
+ case 0:
+ return;
+ case 1:
+ v1 = (struct wmfw_adsp_alg_data *)&rgn->data[0];
+ v1->ncoeff = cpu_to_le32(builder->num_coeffs);
+ break;
+ default:
+ offset = 4; /* skip alg id */
+
+ /* Get name length and round up to __le32 multiple */
+ shortstring = (const struct wmfw_short_string *)&rgn->data[offset];
+ offset += round_up(struct_size_t(struct wmfw_short_string, data, shortstring->len),
+ sizeof(__le32));
+
+ /* Get description length and round up to __le32 multiple */
+ longstring = (const struct wmfw_long_string *)&rgn->data[offset];
+ offset += round_up(struct_size_t(struct wmfw_long_string, data,
+ le16_to_cpu(longstring->len)),
+ sizeof(__le32));
+
+ *(__force __le32 *)&rgn->data[offset] = cpu_to_le32(builder->num_coeffs);
+ break;
+ }
+
+ builder->alg_data_header = NULL;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_end_alg_info_block, "FW_CS_DSP_KUNIT_TEST_UTILS");
+
+static void cs_dsp_init_adsp2_halo_wmfw(struct cs_dsp_mock_wmfw_builder *builder)
+{
+ struct wmfw_adsp2_halo_header *hdr = builder->buf;
+ const struct cs_dsp *dsp = builder->test_priv->dsp;
+
+ memcpy(hdr->header.magic, "WMFW", sizeof(hdr->header.magic));
+ hdr->header.len = cpu_to_le32(sizeof(*hdr));
+ hdr->header.ver = builder->format_version;
+ hdr->header.core = dsp->type;
+ hdr->header.rev = cpu_to_le16(dsp->rev);
+
+ hdr->sizes.pm = cpu_to_le32(cs_dsp_mock_size_of_region(dsp, WMFW_ADSP2_PM));
+ hdr->sizes.xm = cpu_to_le32(cs_dsp_mock_size_of_region(dsp, WMFW_ADSP2_XM));
+ hdr->sizes.ym = cpu_to_le32(cs_dsp_mock_size_of_region(dsp, WMFW_ADSP2_YM));
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ hdr->sizes.zm = cpu_to_le32(cs_dsp_mock_size_of_region(dsp, WMFW_ADSP2_ZM));
+ break;
+ default:
+ break;
+ }
+
+ builder->write_p = &hdr[1];
+ builder->bytes_used += sizeof(*hdr);
+}
+
+/**
+ * cs_dsp_mock_wmfw_init() - Initialize a struct cs_dsp_mock_wmfw_builder.
+ *
+ * @priv: Pointer to struct cs_dsp_test.
+ * @format_version: Required wmfw format version.
+ *
+ * Return: Pointer to created struct cs_dsp_mock_wmfw_builder.
+ */
+struct cs_dsp_mock_wmfw_builder *cs_dsp_mock_wmfw_init(struct cs_dsp_test *priv,
+ int format_version)
+{
+ struct cs_dsp_mock_wmfw_builder *builder;
+
+ /* If format version isn't given use the default for the target core */
+ if (format_version < 0) {
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ format_version = 2;
+ break;
+ default:
+ format_version = 3;
+ break;
+ }
+ }
+
+ builder = kunit_kzalloc(priv->test, sizeof(*builder), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(priv->test, builder);
+
+ builder->test_priv = priv;
+ builder->format_version = format_version;
+
+ builder->buf = vmalloc(CS_DSP_MOCK_WMFW_BUF_SIZE);
+ KUNIT_ASSERT_NOT_NULL(priv->test, builder->buf);
+ kunit_add_action_or_reset(priv->test, vfree_action_wrapper, builder->buf);
+
+ builder->buf_size_bytes = CS_DSP_MOCK_WMFW_BUF_SIZE;
+
+ switch (priv->dsp->type) {
+ case WMFW_ADSP2:
+ case WMFW_HALO:
+ cs_dsp_init_adsp2_halo_wmfw(builder);
+ break;
+ default:
+ break;
+ }
+
+ return builder;
+}
+EXPORT_SYMBOL_NS_GPL(cs_dsp_mock_wmfw_init, "FW_CS_DSP_KUNIT_TEST_UTILS");
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_bin.c b/drivers/firmware/cirrus/test/cs_dsp_test_bin.c
new file mode 100644
index 000000000000..1e161bbc5b4a
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_bin.c
@@ -0,0 +1,2556 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/firmware.h>
+#include <linux/math.h>
+#include <linux/random.h>
+#include <linux/regmap.h>
+
+/*
+ * Test method is:
+ *
+ * 1) Create a mock regmap in cache-only mode so that all writes will be cached.
+ * 2) Create a XM header with an algorithm list in the cached regmap.
+ * 3) Create dummy wmfw file to satisfy cs_dsp.
+ * 4) Create bin file content.
+ * 5) Call cs_dsp_power_up() with the bin file.
+ * 6) Readback the cached value of registers that should have been written and
+ * check they have the correct value.
+ * 7) All the registers that are expected to have been written are dropped from
+ * the cache (including the XM header). This should leave the cache clean.
+ * 8) If the cache is still dirty there have been unexpected writes.
+ *
+ * There are multiple different schemes used for addressing across
+ * ADSP2 and Halo Core DSPs:
+ *
+ * dsp words: The addressing scheme used by the DSP, pointers and lengths
+ * in DSP memory use this. A memory region (XM, YM, ZM) is
+ * also required to create a unique DSP memory address.
+ * registers: Addresses in the register map. Older ADSP2 devices have
+ * 16-bit registers with an address stride of 1. Newer ADSP2
+ * devices have 32-bit registers with an address stride of 2.
+ * Halo Core devices have 32-bit registers with a stride of 4.
+ * unpacked: Registers that have a 1:1 mapping to DSP words
+ * packed: Registers that pack multiple DSP words more efficiently into
+ * multiple 32-bit registers. Because of this the relationship
+ * between a packed _register_ address and the corresponding
+ * _dsp word_ address is different from unpacked registers.
+ * Packed registers can only be accessed as a group of
+ * multiple registers, therefore can only read/write a group
+ * of multiple DSP words.
+ * Packed registers only exist on Halo Core DSPs.
+ *
+ * Addresses can also be relative to the start of an algorithm, and this
+ * can be expressed in dsp words, register addresses, or bytes.
+ */
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *)
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *)
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_bin_builder *bin_builder;
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+ struct firmware *wmfw;
+};
+
+struct bin_test_param {
+ const char *name;
+ int mem_type;
+ unsigned int offset_words;
+ int alg_idx;
+};
+
+static const struct cs_dsp_mock_alg_def bin_test_mock_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_size_words = 164,
+ .ym_size_words = 164,
+ .zm_size_words = 164,
+ },
+ {
+ .id = 0xfbfb,
+ .ver = 0x100000,
+ .xm_size_words = 99,
+ .ym_size_words = 99,
+ .zm_size_words = 99,
+ },
+ {
+ .id = 0xc321,
+ .ver = 0x100000,
+ .xm_size_words = 120,
+ .ym_size_words = 120,
+ .zm_size_words = 120,
+ },
+ {
+ .id = 0xb123,
+ .ver = 0x100000,
+ .xm_size_words = 96,
+ .ym_size_words = 96,
+ .zm_size_words = 96,
+ },
+};
+
+/*
+ * Convert number of DSP words to number of packed registers rounded
+ * down to the nearest register.
+ * There are 3 registers for every 4 packed words.
+ */
+static unsigned int _num_words_to_num_packed_regs(unsigned int num_dsp_words)
+{
+ return (num_dsp_words * 3) / 4;
+}
+
+/* bin file that patches a single DSP word */
+static void bin_patch_one_word(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ u32 reg_val, payload_data;
+ unsigned int alg_base_words, reg_addr;
+ struct firmware *fw;
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ param->offset_words * reg_inc_per_word,
+ &payload_data, sizeof(payload_data));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ ((alg_base_words + param->offset_words) * reg_inc_per_word);
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr,
+ &reg_val, sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_EQ(test, reg_val, payload_data);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1);
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* bin file with a single payload that patches consecutive words */
+static void bin_patch_one_multiword(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ u32 payload_data[16], readback[16];
+ unsigned int alg_base_words, reg_addr;
+ struct firmware *fw;
+
+ static_assert(ARRAY_SIZE(readback) == ARRAY_SIZE(payload_data));
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+ memset(readback, 0, sizeof(readback));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ param->offset_words * reg_inc_per_word,
+ payload_data, sizeof(payload_data));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ ((alg_base_words + param->offset_words) * reg_inc_per_word);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, sizeof(payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr,
+ reg_addr + (reg_inc_per_word * ARRAY_SIZE(payload_data)));
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* bin file with a multiple one-word payloads that patch consecutive words */
+static void bin_patch_multi_oneword(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ u32 payload_data[16], readback[16];
+ unsigned int alg_base_words, reg_addr;
+ struct firmware *fw;
+ int i;
+
+ static_assert(ARRAY_SIZE(readback) == ARRAY_SIZE(payload_data));
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+ memset(readback, 0, sizeof(readback));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+
+ /* Add one payload per word */
+ for (i = 0; i < ARRAY_SIZE(payload_data); ++i) {
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (param->offset_words + i) * reg_inc_per_word,
+ &payload_data[i], sizeof(payload_data[i]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ ((alg_base_words + param->offset_words) * reg_inc_per_word);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, sizeof(payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr,
+ reg_addr + (reg_inc_per_word * ARRAY_SIZE(payload_data)));
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file with a multiple one-word payloads that patch a block of consecutive
+ * words but the payloads are not in address order.
+ */
+static void bin_patch_multi_oneword_unordered(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ u32 payload_data[16], readback[16];
+ static const u8 word_order[] = { 10, 2, 12, 4, 0, 11, 6, 1, 3, 15, 5, 13, 8, 7, 9, 14 };
+ unsigned int alg_base_words, reg_addr;
+ struct firmware *fw;
+ int i;
+
+ static_assert(ARRAY_SIZE(readback) == ARRAY_SIZE(payload_data));
+ static_assert(ARRAY_SIZE(word_order) == ARRAY_SIZE(payload_data));
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+ memset(readback, 0, sizeof(readback));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+
+ /* Add one payload per word */
+ for (i = 0; i < ARRAY_SIZE(word_order); ++i) {
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (param->offset_words + word_order[i]) *
+ reg_inc_per_word,
+ &payload_data[word_order[i]], sizeof(payload_data[0]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ ((alg_base_words + param->offset_words) * reg_inc_per_word);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, sizeof(payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr,
+ reg_addr + (reg_inc_per_word * ARRAY_SIZE(payload_data)));
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file with a multiple one-word payloads. The payloads are not in address
+ * order and collectively do not patch a contiguous block of memory.
+ */
+static void bin_patch_multi_oneword_sparse_unordered(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ static const u8 word_offsets[] = {
+ 11, 69, 59, 61, 32, 75, 4, 38, 70, 13, 79, 47, 46, 53, 18, 44,
+ 54, 35, 51, 21, 26, 45, 27, 41, 66, 2, 17, 56, 40, 9, 8, 20,
+ 29, 19, 63, 42, 12, 16, 43, 3, 5, 55, 52, 22
+ };
+ u32 payload_data[44];
+ unsigned int alg_base_words, reg_addr;
+ struct firmware *fw;
+ u32 reg_val;
+ int i;
+
+ static_assert(ARRAY_SIZE(word_offsets) == ARRAY_SIZE(payload_data));
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+
+ /* Add one payload per word */
+ for (i = 0; i < ARRAY_SIZE(word_offsets); ++i) {
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ word_offsets[i] * reg_inc_per_word,
+ &payload_data[i], sizeof(payload_data[i]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ for (i = 0; i < ARRAY_SIZE(word_offsets); ++i) {
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ ((alg_base_words + word_offsets[i]) * reg_inc_per_word);
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &reg_val,
+ sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &reg_val, &payload_data[i], sizeof(reg_val));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1);
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file that patches a single DSP word in each of the memory regions
+ * of one algorithm.
+ */
+static void bin_patch_one_word_multiple_mems(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ unsigned int alg_xm_base_words, alg_ym_base_words, alg_zm_base_words;
+ unsigned int reg_addr;
+ u32 payload_data[3];
+ struct firmware *fw;
+ u32 reg_val;
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+
+ alg_xm_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ WMFW_ADSP2_XM);
+ alg_ym_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ WMFW_ADSP2_YM);
+
+ if (cs_dsp_mock_has_zm(priv)) {
+ alg_zm_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ WMFW_ADSP2_ZM);
+ } else {
+ alg_zm_base_words = 0;
+ }
+
+ /* Add words to XM, YM and ZM */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ WMFW_ADSP2_XM,
+ param->offset_words * reg_inc_per_word,
+ &payload_data[0], sizeof(payload_data[0]));
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ WMFW_ADSP2_YM,
+ param->offset_words * reg_inc_per_word,
+ &payload_data[1], sizeof(payload_data[1]));
+
+ if (cs_dsp_mock_has_zm(priv)) {
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ WMFW_ADSP2_ZM,
+ param->offset_words * reg_inc_per_word,
+ &payload_data[2], sizeof(payload_data[2]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM) +
+ ((alg_xm_base_words + param->offset_words) * reg_inc_per_word);
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &reg_val, sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_EQ(test, reg_val, payload_data[0]);
+
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM) +
+ ((alg_ym_base_words + param->offset_words) * reg_inc_per_word);
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &reg_val, sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_EQ(test, reg_val, payload_data[1]);
+
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1);
+
+ if (cs_dsp_mock_has_zm(priv)) {
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_ZM) +
+ ((alg_zm_base_words + param->offset_words) * reg_inc_per_word);
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &reg_val,
+ sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_EQ(test, reg_val, payload_data[2]);
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1);
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file that patches a single DSP word in multiple algorithms.
+ */
+static void bin_patch_one_word_multiple_algs(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ u32 payload_data[ARRAY_SIZE(bin_test_mock_algs)];
+ unsigned int alg_base_words;
+ unsigned int reg_inc_per_word, reg_addr;
+ struct firmware *fw;
+ u32 reg_val;
+ int i;
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+
+ /* Add one payload per algorithm */
+ for (i = 0; i < ARRAY_SIZE(bin_test_mock_algs); ++i) {
+ reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[i].id,
+ bin_test_mock_algs[i].ver,
+ param->mem_type,
+ param->offset_words * reg_inc_per_word,
+ &payload_data[i], sizeof(payload_data[i]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ for (i = 0; i < ARRAY_SIZE(bin_test_mock_algs); ++i) {
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[i].id,
+ param->mem_type);
+ reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ ((alg_base_words + param->offset_words) * reg_inc_per_word);
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &reg_val,
+ sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_EQ(test, reg_val, payload_data[i]);
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1);
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file that patches a single DSP word in multiple algorithms.
+ * The algorithms are not patched in the same order they appear in the XM header.
+ */
+static void bin_patch_one_word_multiple_algs_unordered(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ static const u8 alg_order[] = { 3, 0, 2, 1 };
+ u32 payload_data[ARRAY_SIZE(bin_test_mock_algs)];
+ unsigned int alg_base_words;
+ unsigned int reg_inc_per_word, reg_addr;
+ struct firmware *fw;
+ u32 reg_val;
+ int i, alg_idx;
+
+ static_assert(ARRAY_SIZE(alg_order) == ARRAY_SIZE(bin_test_mock_algs));
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+
+ /* Add one payload per algorithm */
+ for (i = 0; i < ARRAY_SIZE(bin_test_mock_algs); ++i) {
+ alg_idx = alg_order[i];
+ reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[alg_idx].id,
+ bin_test_mock_algs[alg_idx].ver,
+ param->mem_type,
+ param->offset_words * reg_inc_per_word,
+ &payload_data[i], sizeof(payload_data[i]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ for (i = 0; i < ARRAY_SIZE(bin_test_mock_algs); ++i) {
+ alg_idx = alg_order[i];
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[alg_idx].id,
+ param->mem_type);
+ reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ ((alg_base_words + param->offset_words) * reg_inc_per_word);
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &reg_val,
+ sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_EQ(test, reg_val, payload_data[i]);
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_range(priv, reg_addr, reg_addr + reg_inc_per_word - 1);
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* bin file that patches a single packed block of DSP words */
+static void bin_patch_1_packed(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ u32 packed_payload[3], readback[3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ packed_payload, sizeof(packed_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that is one word longer than a packed block using one
+ * packed block followed by one unpacked word.
+ */
+static void bin_patch_1_packed_1_single_trailing(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[1], readback[3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ /* Patch packed block */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ /* ... and the unpacked word following that */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 4) - alg_base_words) * 4,
+ unpacked_payload, sizeof(unpacked_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (patch_pos_words + 4) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that is two words longer than a packed block using one
+ * packed block followed by two blocks of one unpacked word.
+ */
+static void bin_patch_1_packed_2_single_trailing(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payloads[2], readback[3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payloads));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payloads, sizeof(unpacked_payloads));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ /* Patch packed block */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ /* ... and the unpacked words following that */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 4) - alg_base_words) * 4,
+ &unpacked_payloads[0], sizeof(unpacked_payloads[0]));
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 5) - alg_base_words) * 4,
+ &unpacked_payloads[1], sizeof(unpacked_payloads[1]));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payloads */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (patch_pos_words + 4) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payloads)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payloads, sizeof(unpacked_payloads));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payloads));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that is three words longer than a packed block using one
+ * packed block followed by three blocks of one unpacked word.
+ */
+static void bin_patch_1_packed_3_single_trailing(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payloads[3], readback[3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payloads));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payloads, sizeof(unpacked_payloads));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ /* Patch packed block */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ /* ... and the unpacked words following that */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 4) - alg_base_words) * 4,
+ &unpacked_payloads[0], sizeof(unpacked_payloads[0]));
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 5) - alg_base_words) * 4,
+ &unpacked_payloads[1], sizeof(unpacked_payloads[1]));
+
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 6) - alg_base_words) * 4,
+ &unpacked_payloads[2], sizeof(unpacked_payloads[2]));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payloads */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (patch_pos_words + 4) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payloads)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payloads, sizeof(unpacked_payloads));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payloads));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that is two words longer than a packed block using one
+ * packed block followed by a block of two unpacked words.
+ */
+static void bin_patch_1_packed_2_trailing(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[2], readback[3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ /* Patch packed block */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ /* ... and the unpacked words following that */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 4) - alg_base_words) * 4,
+ unpacked_payload, sizeof(unpacked_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (patch_pos_words + 4) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that is three words longer than a packed block using one
+ * packed block followed by a block of three unpacked words.
+ */
+static void bin_patch_1_packed_3_trailing(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[3], readback[3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ /* Patch packed block */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ /* ... and the unpacked words following that */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((patch_pos_words + 4) - alg_base_words) * 4,
+ unpacked_payload, sizeof(unpacked_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (patch_pos_words + 4) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that starts one word before a packed boundary using one
+ * unpacked word followed by one packed block.
+ */
+static void bin_patch_1_single_leading_1_packed(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[1], readback[3];
+ unsigned int alg_base_words, packed_patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+ memset(readback, 0, sizeof(readback));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round packed start word up to a packed boundary and move to the next boundary */
+ packed_patch_pos_words = round_up(alg_base_words + param->offset_words, 4) + 4;
+
+ /* Patch the leading unpacked word */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 1) - alg_base_words) * 4,
+ unpacked_payload, sizeof(unpacked_payload));
+ /* ... then the packed block */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(packed_patch_pos_words);
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (packed_patch_pos_words - 1) * 4;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that starts two words before a packed boundary using two
+ * unpacked words followed by one packed block.
+ */
+static void bin_patch_2_single_leading_1_packed(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[2], readback[3];
+ unsigned int alg_base_words, packed_patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round packed start word up to a packed boundary and move to the next boundary */
+ packed_patch_pos_words = round_up(alg_base_words + param->offset_words, 4) + 4;
+
+ /* Patch the leading unpacked words */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 2) - alg_base_words) * 4,
+ &unpacked_payload[0], sizeof(unpacked_payload[0]));
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 1) - alg_base_words) * 4,
+ &unpacked_payload[1], sizeof(unpacked_payload[1]));
+ /* ... then the packed block */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(packed_patch_pos_words);
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (packed_patch_pos_words - 2) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that starts two words before a packed boundary using one
+ * block of two unpacked words followed by one packed block.
+ */
+static void bin_patch_2_leading_1_packed(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[2], readback[3];
+ unsigned int alg_base_words, packed_patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round packed start word up to a packed boundary and move to the next boundary */
+ packed_patch_pos_words = round_up(alg_base_words + param->offset_words, 4) + 4;
+
+ /* Patch the leading unpacked words */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 2) - alg_base_words) * 4,
+ unpacked_payload, sizeof(unpacked_payload));
+ /* ... then the packed block */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(packed_patch_pos_words);
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (packed_patch_pos_words - 2) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that starts three words before a packed boundary using three
+ * unpacked words followed by one packed block.
+ */
+static void bin_patch_3_single_leading_1_packed(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[3], readback[3];
+ unsigned int alg_base_words, packed_patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round packed start word up to a packed boundary and move to the next boundary */
+ packed_patch_pos_words = round_up(alg_base_words + param->offset_words, 4) + 4;
+
+ /* Patch the leading unpacked words */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 3) - alg_base_words) * 4,
+ &unpacked_payload[0], sizeof(unpacked_payload[0]));
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 2) - alg_base_words) * 4,
+ &unpacked_payload[1], sizeof(unpacked_payload[1]));
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 1) - alg_base_words) * 4,
+ &unpacked_payload[2], sizeof(unpacked_payload[2]));
+ /* ... then the packed block */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(packed_patch_pos_words);
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (packed_patch_pos_words - 3) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Patch data that starts three words before a packed boundary using one
+ * block of three unpacked words followed by one packed block.
+ */
+static void bin_patch_3_leading_1_packed(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ unsigned int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ u32 packed_payload[3], unpacked_payload[3], readback[3];
+ unsigned int alg_base_words, packed_patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+ static_assert(sizeof(readback) >= sizeof(unpacked_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+ get_random_bytes(unpacked_payload, sizeof(unpacked_payload));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round packed start word up to a packed boundary and move to the next boundary */
+ packed_patch_pos_words = round_up(alg_base_words + param->offset_words, 4) + 4;
+
+ /* Patch the leading unpacked words */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ unpacked_mem_type,
+ ((packed_patch_pos_words - 3) - alg_base_words) * 4,
+ unpacked_payload, sizeof(unpacked_payload));
+ /* ... then the packed block */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(packed_patch_pos_words);
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ &packed_payload, sizeof(packed_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, &packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload));
+
+ /* Content of unpacked registers should match unpacked_payload */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ (packed_patch_pos_words - 3) * 4;
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, &readback,
+ sizeof(unpacked_payload)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, &readback, unpacked_payload, sizeof(unpacked_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* bin file with a multiple payloads that each patch one packed block. */
+static void bin_patch_multi_onepacked(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ u32 packed_payloads[8][3], readback[8][3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int payload_offset;
+ unsigned int reg_addr;
+ struct firmware *fw;
+ int i;
+
+ static_assert(sizeof(readback) == sizeof(packed_payloads));
+
+ get_random_bytes(packed_payloads, sizeof(packed_payloads));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+
+ /* Add one payload per packed block */
+ for (i = 0; i < ARRAY_SIZE(packed_payloads); ++i) {
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words + (i * 4));
+ payload_offset = (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4;
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ payload_offset,
+ &packed_payloads[i], sizeof(packed_payloads[i]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payloads */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payloads, sizeof(packed_payloads));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payloads));
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file with a multiple payloads that each patch one packed block.
+ * The payloads are not in address order.
+ */
+static void bin_patch_multi_onepacked_unordered(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ static const u8 payload_order[] = { 4, 3, 6, 1, 0, 7, 5, 2 };
+ u32 packed_payloads[8][3], readback[8][3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int payload_offset;
+ unsigned int reg_addr;
+ struct firmware *fw;
+ int i;
+
+ static_assert(ARRAY_SIZE(payload_order) == ARRAY_SIZE(packed_payloads));
+ static_assert(sizeof(readback) == sizeof(packed_payloads));
+
+ get_random_bytes(packed_payloads, sizeof(packed_payloads));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+
+ /* Add one payload per packed block */
+ for (i = 0; i < ARRAY_SIZE(payload_order); ++i) {
+ patch_pos_in_packed_regs =
+ _num_words_to_num_packed_regs(patch_pos_words + (payload_order[i] * 4));
+ payload_offset = (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4;
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ payload_offset,
+ &packed_payloads[payload_order[i]],
+ sizeof(packed_payloads[0]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content in registers should match the order of data in packed_payloads */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payloads, sizeof(packed_payloads));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payloads));
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file with a multiple payloads that each patch one packed block.
+ * The payloads are not in address order. The patched memory is not contiguous.
+ */
+static void bin_patch_multi_onepacked_sparse_unordered(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ static const u8 word_offsets[] = { 60, 24, 76, 4, 40, 52, 48, 36, 12 };
+ u32 packed_payloads[9][3], readback[3];
+ unsigned int alg_base_words, alg_base_in_packed_regs;
+ unsigned int patch_pos_words, patch_pos_in_packed_regs, payload_offset;
+ unsigned int reg_addr;
+ struct firmware *fw;
+ int i;
+
+ static_assert(ARRAY_SIZE(word_offsets) == ARRAY_SIZE(packed_payloads));
+ static_assert(sizeof(readback) == sizeof(packed_payloads[0]));
+
+ get_random_bytes(packed_payloads, sizeof(packed_payloads));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Add one payload per packed block */
+ for (i = 0; i < ARRAY_SIZE(word_offsets); ++i) {
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + word_offsets[i], 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+ payload_offset = (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4;
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ param->mem_type,
+ payload_offset,
+ &packed_payloads[i],
+ sizeof(packed_payloads[0]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed registers should match packed_payloads */
+ for (i = 0; i < ARRAY_SIZE(word_offsets); ++i) {
+ patch_pos_words = round_up(alg_base_words + word_offsets[i], 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payloads[i], sizeof(packed_payloads[i]));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payloads[i]));
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file that patches a single packed block in each of the memory regions
+ * of one algorithm.
+ */
+static void bin_patch_1_packed_multiple_mems(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ u32 packed_xm_payload[3], packed_ym_payload[3], readback[3];
+ unsigned int alg_xm_base_words, alg_ym_base_words;
+ unsigned int xm_patch_pos_words, ym_patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr;
+ struct firmware *fw;
+
+ static_assert(sizeof(readback) == sizeof(packed_xm_payload));
+ static_assert(sizeof(readback) == sizeof(packed_ym_payload));
+
+ get_random_bytes(packed_xm_payload, sizeof(packed_xm_payload));
+ get_random_bytes(packed_ym_payload, sizeof(packed_ym_payload));
+
+ alg_xm_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ WMFW_HALO_XM_PACKED);
+ alg_ym_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[param->alg_idx].id,
+ WMFW_HALO_YM_PACKED);
+
+ /* Round patch start word up to a packed boundary */
+ xm_patch_pos_words = round_up(alg_xm_base_words + param->offset_words, 4);
+ ym_patch_pos_words = round_up(alg_ym_base_words + param->offset_words, 4);
+
+ /* Add XM and YM patches */
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_xm_base_words);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(xm_patch_pos_words);
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ WMFW_HALO_XM_PACKED,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ packed_xm_payload, sizeof(packed_xm_payload));
+
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_ym_base_words);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(ym_patch_pos_words);
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[param->alg_idx].id,
+ bin_test_mock_algs[param->alg_idx].ver,
+ WMFW_HALO_YM_PACKED,
+ (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4,
+ packed_ym_payload, sizeof(packed_ym_payload));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of packed XM registers should match packed_xm_payload */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(xm_patch_pos_words);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_HALO_XM_PACKED) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_xm_payload, sizeof(packed_xm_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_xm_payload));
+
+ /* Content of packed YM registers should match packed_ym_payload */
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(ym_patch_pos_words);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_HALO_YM_PACKED) +
+ (patch_pos_in_packed_regs * 4);
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_ym_payload, sizeof(packed_ym_payload));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_ym_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file that patches a single packed block in multiple algorithms.
+ */
+static void bin_patch_1_packed_multiple_algs(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ u32 packed_payload[ARRAY_SIZE(bin_test_mock_algs)][3];
+ u32 readback[ARRAY_SIZE(bin_test_mock_algs)][3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr, payload_offset;
+ struct firmware *fw;
+ int i;
+
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+
+ /* For each algorithm patch one DSP word to a value from packed_payload */
+ for (i = 0; i < ARRAY_SIZE(bin_test_mock_algs); ++i) {
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[i].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ payload_offset = (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4;
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[i].id,
+ bin_test_mock_algs[i].ver,
+ param->mem_type,
+ payload_offset,
+ packed_payload[i], sizeof(packed_payload[i]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ memset(readback, 0, sizeof(readback));
+
+ /*
+ * Readback the registers that should have been written. Place
+ * the values into the expected location in readback[] so that
+ * the content of readback[] should match packed_payload[]
+ */
+ for (i = 0; i < ARRAY_SIZE(bin_test_mock_algs); ++i) {
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[i].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr,
+ readback[i], sizeof(readback[i])),
+ 0);
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload[i]));
+ }
+
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file that patches a single packed block in multiple algorithms.
+ * The algorithms are not patched in the same order they appear in the XM header.
+ */
+static void bin_patch_1_packed_multiple_algs_unordered(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ static const u8 alg_order[] = { 3, 0, 2, 1 };
+ u32 packed_payload[ARRAY_SIZE(bin_test_mock_algs)][3];
+ u32 readback[ARRAY_SIZE(bin_test_mock_algs)][3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr, payload_offset;
+ struct firmware *fw;
+ int i, alg_idx;
+
+ static_assert(ARRAY_SIZE(alg_order) == ARRAY_SIZE(bin_test_mock_algs));
+ static_assert(sizeof(readback) == sizeof(packed_payload));
+
+ get_random_bytes(packed_payload, sizeof(packed_payload));
+
+ /*
+ * For each algorithm index in alg_order[] patch one DSP word in
+ * that algorithm to a value from packed_payload.
+ */
+ for (i = 0; i < ARRAY_SIZE(alg_order); ++i) {
+ alg_idx = alg_order[i];
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[alg_idx].id,
+ param->mem_type);
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+
+ /* Round patch start word up to a packed boundary */
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ payload_offset = (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4;
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[alg_idx].id,
+ bin_test_mock_algs[alg_idx].ver,
+ param->mem_type,
+ payload_offset,
+ packed_payload[i], sizeof(packed_payload[i]));
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ memset(readback, 0, sizeof(readback));
+
+ /*
+ * Readback the registers that should have been written. Place
+ * the values into the expected location in readback[] so that
+ * the content of readback[] should match packed_payload[]
+ */
+ for (i = 0; i < ARRAY_SIZE(alg_order); ++i) {
+ alg_idx = alg_order[i];
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[alg_idx].id,
+ param->mem_type);
+
+ patch_pos_words = round_up(alg_base_words + param->offset_words, 4);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr,
+ readback[i], sizeof(readback[i])),
+ 0);
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(packed_payload[i]));
+ }
+
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload, sizeof(packed_payload));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * bin file that contains a mix of packed and unpacked words.
+ * payloads are in random offset order. Offsets that are on a packed boundary
+ * are written as a packed block. Offsets that are not on a packed boundary
+ * are written as a single unpacked word.
+ */
+static void bin_patch_mixed_packed_unpacked_random(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ const struct bin_test_param *param = test->param_value;
+ static const u8 offset_words[] = {
+ 58, 68, 50, 10, 44, 17, 74, 36, 8, 7, 49, 11, 78, 57, 65, 2,
+ 48, 38, 22, 70, 77, 21, 61, 56, 75, 34, 27, 3, 31, 20, 43, 63,
+ 5, 30, 32, 25, 33, 79, 29, 0, 37, 60, 69, 52, 13, 12, 24, 26,
+ 4, 51, 76, 72, 16, 6, 39, 62, 15, 41, 28, 73, 53, 40, 45, 54,
+ 14, 55, 46, 66, 64, 59, 23, 9, 67, 47, 19, 71, 35, 18, 42, 1,
+ };
+ struct {
+ u32 packed[80][3];
+ u32 unpacked[80];
+ } *payload;
+ u32 readback[3];
+ unsigned int alg_base_words, patch_pos_words;
+ unsigned int alg_base_in_packed_regs, patch_pos_in_packed_regs;
+ unsigned int reg_addr, payload_offset;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ struct firmware *fw;
+ int i;
+
+ payload = kunit_kmalloc(test, sizeof(*payload), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, payload);
+
+ get_random_bytes(payload->packed, sizeof(payload->packed));
+ get_random_bytes(payload->unpacked, sizeof(payload->unpacked));
+
+ /* Create a patch entry for every offset in offset_words[] */
+ for (i = 0; i < ARRAY_SIZE(offset_words); ++i) {
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[0].id,
+ param->mem_type);
+ /*
+ * If the offset is on a packed boundary use a packed payload else
+ * use an unpacked word
+ */
+ patch_pos_words = alg_base_words + offset_words[i];
+ if ((patch_pos_words % 4) == 0) {
+ alg_base_in_packed_regs = _num_words_to_num_packed_regs(alg_base_words);
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+ payload_offset = (patch_pos_in_packed_regs - alg_base_in_packed_regs) * 4;
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[0].id,
+ bin_test_mock_algs[0].ver,
+ param->mem_type,
+ payload_offset,
+ payload->packed[i],
+ sizeof(payload->packed[i]));
+ } else {
+ payload_offset = offset_words[i] * 4;
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[0].id,
+ bin_test_mock_algs[0].ver,
+ unpacked_mem_type,
+ payload_offset,
+ &payload->unpacked[i],
+ sizeof(payload->unpacked[i]));
+ }
+ }
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /*
+ * Readback the packed registers that should have been written.
+ * Place the values into the expected location in readback[] so
+ * that the content of readback[] should match payload->packed[]
+ */
+ for (i = 0; i < ARRAY_SIZE(offset_words); ++i) {
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[0].id,
+ param->mem_type);
+ patch_pos_words = alg_base_words + offset_words[i];
+
+ /* Skip if the offset is not on a packed boundary */
+ if ((patch_pos_words % 4) != 0)
+ continue;
+
+ patch_pos_in_packed_regs = _num_words_to_num_packed_regs(patch_pos_words);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type) +
+ (patch_pos_in_packed_regs * 4);
+
+ memset(readback, 0, sizeof(readback));
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(readback)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload->packed[i], sizeof(payload->packed[i]));
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(payload->packed[i]));
+ }
+
+ /*
+ * Readback the unpacked registers that should have been written.
+ * Place the values into the expected location in readback[] so
+ * that the content of readback[] should match payload->unpacked[]
+ */
+ for (i = 0; i < ARRAY_SIZE(offset_words); ++i) {
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[0].id,
+ unpacked_mem_type);
+
+ patch_pos_words = alg_base_words + offset_words[i];
+
+ /* Skip if the offset is on a packed boundary */
+ if ((patch_pos_words % 4) == 0)
+ continue;
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type) +
+ ((patch_pos_words) * 4);
+
+ readback[0] = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr,
+ &readback[0], sizeof(readback[0])),
+ 0);
+ KUNIT_EXPECT_EQ(test, readback[0], payload->unpacked[i]);
+
+ /* Drop expected writes from the cache */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(payload->unpacked[i]));
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* Bin file with name and multiple info blocks */
+static void bin_patch_name_and_info(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ unsigned int reg_inc_per_word = cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ u32 reg_val, payload_data;
+ char *infobuf;
+ unsigned int alg_base_words, reg_addr;
+ struct firmware *fw;
+
+ get_random_bytes(&payload_data, sizeof(payload_data));
+
+ alg_base_words = cs_dsp_mock_xm_header_get_alg_base_in_words(priv,
+ bin_test_mock_algs[0].id,
+ WMFW_ADSP2_YM);
+
+ /* Add a name block and info block */
+ cs_dsp_mock_bin_add_name(priv->local->bin_builder, "The name");
+ cs_dsp_mock_bin_add_info(priv->local->bin_builder, "Some info");
+
+ /* Add a big block of info */
+ infobuf = kunit_kzalloc(test, 512, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, infobuf);
+
+ for (; strlcat(infobuf, "Waffle{Blah}\n", 512) < 512; )
+ ;
+
+ cs_dsp_mock_bin_add_info(priv->local->bin_builder, infobuf);
+
+ /* Add a patch */
+ cs_dsp_mock_bin_add_patch(priv->local->bin_builder,
+ bin_test_mock_algs[0].id,
+ bin_test_mock_algs[0].ver,
+ WMFW_ADSP2_YM,
+ 0,
+ &payload_data, sizeof(payload_data));
+
+ fw = cs_dsp_mock_bin_get_firmware(priv->local->bin_builder);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, priv->local->wmfw, "mock_wmfw",
+ fw, "mock_bin", "misc"),
+ 0);
+
+ /* Content of registers should match payload_data */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM);
+ reg_addr += alg_base_words * reg_inc_per_word;
+ reg_val = 0;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr,
+ &reg_val, sizeof(reg_val)),
+ 0);
+ KUNIT_EXPECT_EQ(test, reg_val, payload_data);
+}
+
+static int cs_dsp_bin_test_common_init(struct kunit *test, struct cs_dsp *dsp)
+{
+ struct cs_dsp_test *priv;
+ struct cs_dsp_mock_xm_header *xm_hdr;
+ struct device *test_dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!priv->local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /* Create an XM header */
+ xm_hdr = cs_dsp_create_mock_xm_header(priv,
+ bin_test_mock_algs,
+ ARRAY_SIZE(bin_test_mock_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xm_hdr);
+ ret = cs_dsp_mock_xm_header_write_to_regmap(xm_hdr);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ priv->local->bin_builder =
+ cs_dsp_mock_bin_init(priv, 1,
+ cs_dsp_mock_xm_header_get_fw_version_from_regmap(priv));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->local->bin_builder);
+
+ /* We must provide a dummy wmfw to load */
+ priv->local->wmfw_builder = cs_dsp_mock_wmfw_init(priv, -1);
+ priv->local->wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops);
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_bin_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_bin_test_common_init(test, dsp);
+}
+
+static int cs_dsp_bin_test_adsp2_32bit_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 1;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_bin_test_common_init(test, dsp);
+}
+
+static int cs_dsp_bin_test_adsp2_16bit_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_bin_test_common_init(test, dsp);
+}
+
+/* Parameterize on choice of XM or YM with a range of word offsets */
+static const struct bin_test_param x_or_y_and_offset_param_cases[] = {
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 0 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 1 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 2 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 3 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 4 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 23 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 22 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 21 },
+ { .mem_type = WMFW_ADSP2_XM, .offset_words = 20 },
+
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 0 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 1 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 2 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 3 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 4 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 23 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 22 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 21 },
+ { .mem_type = WMFW_ADSP2_YM, .offset_words = 20 },
+};
+
+/* Parameterize on ZM with a range of word offsets */
+static const struct bin_test_param z_and_offset_param_cases[] = {
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 0 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 1 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 2 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 3 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 4 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 23 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 22 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 21 },
+ { .mem_type = WMFW_ADSP2_ZM, .offset_words = 20 },
+};
+
+/* Parameterize on choice of packed XM or YM with a range of word offsets */
+static const struct bin_test_param packed_x_or_y_and_offset_param_cases[] = {
+ { .mem_type = WMFW_HALO_XM_PACKED, .offset_words = 0 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .offset_words = 4 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .offset_words = 8 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .offset_words = 12 },
+
+ { .mem_type = WMFW_HALO_YM_PACKED, .offset_words = 0 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .offset_words = 4 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .offset_words = 8 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .offset_words = 12 },
+};
+
+static void x_or_y_or_z_and_offset_param_desc(const struct bin_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s@%u",
+ cs_dsp_mem_region_name(param->mem_type),
+ param->offset_words);
+}
+
+KUNIT_ARRAY_PARAM(x_or_y_and_offset,
+ x_or_y_and_offset_param_cases,
+ x_or_y_or_z_and_offset_param_desc);
+
+KUNIT_ARRAY_PARAM(z_and_offset,
+ z_and_offset_param_cases,
+ x_or_y_or_z_and_offset_param_desc);
+
+KUNIT_ARRAY_PARAM(packed_x_or_y_and_offset,
+ packed_x_or_y_and_offset_param_cases,
+ x_or_y_or_z_and_offset_param_desc);
+
+/* Parameterize on choice of packed XM or YM */
+static const struct bin_test_param packed_x_or_y_param_cases[] = {
+ { .mem_type = WMFW_HALO_XM_PACKED, .offset_words = 0 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .offset_words = 0 },
+};
+
+static void x_or_y_or_z_param_desc(const struct bin_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s", cs_dsp_mem_region_name(param->mem_type));
+}
+
+KUNIT_ARRAY_PARAM(packed_x_or_y, packed_x_or_y_param_cases, x_or_y_or_z_param_desc);
+
+static const struct bin_test_param offset_param_cases[] = {
+ { .offset_words = 0 },
+ { .offset_words = 1 },
+ { .offset_words = 2 },
+ { .offset_words = 3 },
+ { .offset_words = 4 },
+ { .offset_words = 23 },
+ { .offset_words = 22 },
+ { .offset_words = 21 },
+ { .offset_words = 20 },
+};
+
+static void offset_param_desc(const struct bin_test_param *param, char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "@%u", param->offset_words);
+}
+
+KUNIT_ARRAY_PARAM(offset, offset_param_cases, offset_param_desc);
+
+static const struct bin_test_param alg_param_cases[] = {
+ { .alg_idx = 0 },
+ { .alg_idx = 1 },
+ { .alg_idx = 2 },
+ { .alg_idx = 3 },
+};
+
+static void alg_param_desc(const struct bin_test_param *param, char *desc)
+{
+ WARN_ON(param->alg_idx >= ARRAY_SIZE(bin_test_mock_algs));
+
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "alg[%u] (%#x)",
+ param->alg_idx, bin_test_mock_algs[param->alg_idx].id);
+}
+
+KUNIT_ARRAY_PARAM(alg, alg_param_cases, alg_param_desc);
+
+static const struct bin_test_param x_or_y_and_alg_param_cases[] = {
+ { .mem_type = WMFW_ADSP2_XM, .alg_idx = 0 },
+ { .mem_type = WMFW_ADSP2_XM, .alg_idx = 1 },
+ { .mem_type = WMFW_ADSP2_XM, .alg_idx = 2 },
+ { .mem_type = WMFW_ADSP2_XM, .alg_idx = 3 },
+
+ { .mem_type = WMFW_ADSP2_YM, .alg_idx = 0 },
+ { .mem_type = WMFW_ADSP2_YM, .alg_idx = 1 },
+ { .mem_type = WMFW_ADSP2_YM, .alg_idx = 2 },
+ { .mem_type = WMFW_ADSP2_YM, .alg_idx = 3 },
+};
+
+static void x_or_y_or_z_and_alg_param_desc(const struct bin_test_param *param, char *desc)
+{
+ WARN_ON(param->alg_idx >= ARRAY_SIZE(bin_test_mock_algs));
+
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s alg[%u] (%#x)",
+ cs_dsp_mem_region_name(param->mem_type),
+ param->alg_idx, bin_test_mock_algs[param->alg_idx].id);
+}
+
+KUNIT_ARRAY_PARAM(x_or_y_and_alg, x_or_y_and_alg_param_cases, x_or_y_or_z_and_alg_param_desc);
+
+static const struct bin_test_param z_and_alg_param_cases[] = {
+ { .mem_type = WMFW_ADSP2_ZM, .alg_idx = 0 },
+ { .mem_type = WMFW_ADSP2_ZM, .alg_idx = 1 },
+ { .mem_type = WMFW_ADSP2_ZM, .alg_idx = 2 },
+ { .mem_type = WMFW_ADSP2_ZM, .alg_idx = 3 },
+};
+
+KUNIT_ARRAY_PARAM(z_and_alg, z_and_alg_param_cases, x_or_y_or_z_and_alg_param_desc);
+
+static const struct bin_test_param packed_x_or_y_and_alg_param_cases[] = {
+ { .mem_type = WMFW_HALO_XM_PACKED, .alg_idx = 0 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .alg_idx = 1 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .alg_idx = 2 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .alg_idx = 3 },
+
+ { .mem_type = WMFW_HALO_YM_PACKED, .alg_idx = 0 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .alg_idx = 1 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .alg_idx = 2 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .alg_idx = 3 },
+};
+
+KUNIT_ARRAY_PARAM(packed_x_or_y_and_alg, packed_x_or_y_and_alg_param_cases,
+ x_or_y_or_z_and_alg_param_desc);
+
+static struct kunit_case cs_dsp_bin_test_cases_halo[] = {
+ /* Unpacked memory */
+ KUNIT_CASE_PARAM(bin_patch_one_word, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_multiword, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword_unordered, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_mems, offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_mems, alg_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword_sparse_unordered, x_or_y_and_alg_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_algs, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_algs_unordered, x_or_y_and_offset_gen_params),
+
+ /* Packed memory tests */
+ KUNIT_CASE_PARAM(bin_patch_1_packed,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_1_single_trailing,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_2_single_trailing,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_3_single_trailing,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_2_trailing,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_3_trailing,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_single_leading_1_packed,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_2_single_leading_1_packed,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_2_leading_1_packed,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_3_single_leading_1_packed,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_3_leading_1_packed,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_onepacked,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_onepacked_unordered,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_multiple_mems, offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_multiple_mems, alg_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_onepacked_sparse_unordered,
+ packed_x_or_y_and_alg_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_multiple_algs,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_1_packed_multiple_algs_unordered,
+ packed_x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_mixed_packed_unpacked_random,
+ packed_x_or_y_gen_params),
+
+ KUNIT_CASE(bin_patch_name_and_info),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_bin_test_cases_adsp2[] = {
+ /* XM and YM */
+ KUNIT_CASE_PARAM(bin_patch_one_word, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_multiword, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword_unordered, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword_sparse_unordered, x_or_y_and_alg_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_algs, x_or_y_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_algs_unordered, x_or_y_and_offset_gen_params),
+
+ /* ZM */
+ KUNIT_CASE_PARAM(bin_patch_one_word, z_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_multiword, z_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword, z_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword_unordered, z_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_multi_oneword_sparse_unordered, z_and_alg_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_algs, z_and_offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_algs_unordered, z_and_offset_gen_params),
+
+ /* Other */
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_mems, offset_gen_params),
+ KUNIT_CASE_PARAM(bin_patch_one_word_multiple_mems, alg_gen_params),
+
+ KUNIT_CASE(bin_patch_name_and_info),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_bin_test_halo = {
+ .name = "cs_dsp_bin_halo",
+ .init = cs_dsp_bin_test_halo_init,
+ .test_cases = cs_dsp_bin_test_cases_halo,
+};
+
+static struct kunit_suite cs_dsp_bin_test_adsp2_32bit = {
+ .name = "cs_dsp_bin_adsp2_32bit",
+ .init = cs_dsp_bin_test_adsp2_32bit_init,
+ .test_cases = cs_dsp_bin_test_cases_adsp2,
+};
+
+static struct kunit_suite cs_dsp_bin_test_adsp2_16bit = {
+ .name = "cs_dsp_bin_adsp2_16bit",
+ .init = cs_dsp_bin_test_adsp2_16bit_init,
+ .test_cases = cs_dsp_bin_test_cases_adsp2,
+};
+
+kunit_test_suites(&cs_dsp_bin_test_halo,
+ &cs_dsp_bin_test_adsp2_32bit,
+ &cs_dsp_bin_test_adsp2_16bit);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c b/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c
new file mode 100644
index 000000000000..5dcf62f19faf
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c
@@ -0,0 +1,600 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+//
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/random.h>
+#include <linux/regmap.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *);
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *);
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_bin_builder *bin_builder;
+ struct cs_dsp_mock_xm_header *xm_header;
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+ struct firmware *wmfw;
+ int wmfw_version;
+};
+
+struct cs_dsp_bin_test_param {
+ int block_type;
+};
+
+static const struct cs_dsp_mock_alg_def cs_dsp_bin_err_test_mock_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_size_words = 164,
+ .ym_size_words = 164,
+ .zm_size_words = 164,
+ },
+};
+
+/* Load a bin containing unknown blocks. They should be skipped. */
+static void bin_load_with_unknown_blocks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ u8 random_data[8];
+ const unsigned int payload_size_bytes = 64;
+
+ payload_data = kunit_kmalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+ get_random_bytes(payload_data, payload_size_bytes);
+
+ readback = kunit_kzalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Add some unknown blocks at the start of the bin */
+ get_random_bytes(random_data, sizeof(random_data));
+ cs_dsp_mock_bin_add_raw_block(local->bin_builder,
+ cs_dsp_bin_err_test_mock_algs[0].id,
+ cs_dsp_bin_err_test_mock_algs[0].ver,
+ 0xf5, 0,
+ random_data, sizeof(random_data));
+ cs_dsp_mock_bin_add_raw_block(local->bin_builder,
+ cs_dsp_bin_err_test_mock_algs[0].id,
+ cs_dsp_bin_err_test_mock_algs[0].ver,
+ 0xf500, 0,
+ random_data, sizeof(random_data));
+ cs_dsp_mock_bin_add_raw_block(local->bin_builder,
+ cs_dsp_bin_err_test_mock_algs[0].id,
+ cs_dsp_bin_err_test_mock_algs[0].ver,
+ 0xc300, 0,
+ random_data, sizeof(random_data));
+
+ /* Add a single payload to be written to DSP memory */
+ cs_dsp_mock_bin_add_raw_block(local->bin_builder,
+ cs_dsp_bin_err_test_mock_algs[0].id,
+ cs_dsp_bin_err_test_mock_algs[0].ver,
+ WMFW_ADSP2_YM, 0,
+ payload_data, payload_size_bytes);
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ /* Check that the payload was written to memory */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+}
+
+/* Load a bin that doesn't have a valid magic marker. */
+static void bin_err_wrong_magic(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+
+ /* Sanity-check that the wmfw loads ok without the bin */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+
+ memcpy((void *)bin->data, "WMFW", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ memcpy((void *)bin->data, "xMDR", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ memcpy((void *)bin->data, "WxDR", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ memcpy((void *)bin->data, "WMxR", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ memcpy((void *)bin->data, "WMDx", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ memset((void *)bin->data, 0, 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+}
+
+/* Load a bin that is too short for a valid header. */
+static void bin_err_too_short_for_header(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+
+ /* Sanity-check that the wmfw loads ok without the bin */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ do {
+ bin->size--;
+
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+ } while (bin->size > 0);
+}
+
+/* Header length field isn't a valid header length. */
+static void bin_err_bad_header_length(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+ struct wmfw_coeff_hdr *header;
+ unsigned int real_len, len;
+
+ /* Sanity-check that the wmfw loads ok without the bin */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ header = (struct wmfw_coeff_hdr *)bin->data;
+ real_len = le32_to_cpu(header->len);
+
+ for (len = 0; len < real_len; len++) {
+ header->len = cpu_to_le32(len);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+ }
+
+ for (len = real_len + 1; len < real_len + 7; len++) {
+ header->len = cpu_to_le32(len);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+ }
+
+ header->len = cpu_to_le32(0xffffffff);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ header->len = cpu_to_le32(0x80000000);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ header->len = cpu_to_le32(0x7fffffff);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+}
+
+/* Wrong core type in header. */
+static void bin_err_bad_core_type(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+ struct wmfw_coeff_hdr *header;
+
+ /* Sanity-check that the wmfw loads ok without the bin */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ header = (struct wmfw_coeff_hdr *)bin->data;
+
+ header->core_ver = cpu_to_le32(0);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ header->core_ver = cpu_to_le32(1);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ header->core_ver = cpu_to_le32(priv->dsp->type + 1);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ header->core_ver = cpu_to_le32(0xff);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+}
+
+/* File too short to contain a full block header */
+static void bin_too_short_for_block_header(struct kunit *test)
+{
+ const struct cs_dsp_bin_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+ unsigned int header_length;
+
+ /* Sanity-check that the wmfw loads ok without the bin */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ header_length = bin->size;
+ kunit_kfree(test, bin);
+
+ cs_dsp_mock_bin_add_raw_block(local->bin_builder,
+ cs_dsp_bin_err_test_mock_algs[0].id,
+ cs_dsp_bin_err_test_mock_algs[0].ver,
+ param->block_type, 0,
+ NULL, 0);
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ KUNIT_ASSERT_GT(test, bin->size, header_length);
+
+ for (bin->size--; bin->size > header_length; bin->size--) {
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+ }
+}
+
+/* File too short to contain the block payload */
+static void bin_too_short_for_block_payload(struct kunit *test)
+{
+ const struct cs_dsp_bin_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+ static const u8 payload[256] = { };
+ int i;
+
+ /* Sanity-check that the wmfw loads ok without the bin */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ cs_dsp_mock_bin_add_raw_block(local->bin_builder,
+ cs_dsp_bin_err_test_mock_algs[0].id,
+ cs_dsp_bin_err_test_mock_algs[0].ver,
+ param->block_type, 0,
+ payload, sizeof(payload));
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ for (i = 0; i < sizeof(payload); i++) {
+ bin->size--;
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+ }
+}
+
+/* Block payload length is a garbage value */
+static void bin_block_payload_len_garbage(struct kunit *test)
+{
+ const struct cs_dsp_bin_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *bin;
+ struct wmfw_coeff_hdr *header;
+ struct wmfw_coeff_item *block;
+ u32 payload = 0;
+
+ /* Sanity-check that the wmfw loads ok without the bin */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ cs_dsp_mock_bin_add_raw_block(local->bin_builder,
+ cs_dsp_bin_err_test_mock_algs[0].id,
+ cs_dsp_bin_err_test_mock_algs[0].ver,
+ param->block_type, 0,
+ &payload, sizeof(payload));
+
+ bin = cs_dsp_mock_bin_get_firmware(local->bin_builder);
+ header = (struct wmfw_coeff_hdr *)bin->data;
+ block = (struct wmfw_coeff_item *)&bin->data[le32_to_cpu(header->len)];
+
+ /* Sanity check that we're looking at the correct part of the bin */
+ KUNIT_ASSERT_EQ(test, le16_to_cpu(block->type), param->block_type);
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(block->len), sizeof(payload));
+
+ block->len = cpu_to_le32(0x8000);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ block->len = cpu_to_le32(0xffff);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ block->len = cpu_to_le32(0x7fffffff);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ block->len = cpu_to_le32(0x80000000);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+
+ block->len = cpu_to_le32(0xffffffff);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, local->wmfw, "wmfw", bin, "bin", "misc"),
+ 0);
+}
+
+static void cs_dsp_bin_err_test_exit(struct kunit *test)
+{
+ /*
+ * Testing error conditions can produce a lot of log output
+ * from cs_dsp error messages, so rate limit the test cases.
+ */
+ usleep_range(200, 500);
+}
+
+static int cs_dsp_bin_err_test_common_init(struct kunit *test, struct cs_dsp *dsp,
+ int wmfw_version)
+{
+ struct cs_dsp_test *priv;
+ struct cs_dsp_test_local *local;
+ struct device *test_dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+ priv->local = local;
+ priv->local->wmfw_version = wmfw_version;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * There must always be a XM header with at least 1 algorithm, so create
+ * a dummy one that tests can use and extract it to a data payload.
+ */
+ local->xm_header = cs_dsp_create_mock_xm_header(priv,
+ cs_dsp_bin_err_test_mock_algs,
+ ARRAY_SIZE(cs_dsp_bin_err_test_mock_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->xm_header);
+
+ local->wmfw_builder = cs_dsp_mock_wmfw_init(priv, priv->local->wmfw_version);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->wmfw_builder);
+
+ /* Add dummy XM header payload to wmfw */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ local->wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ local->bin_builder =
+ cs_dsp_mock_bin_init(priv, 1,
+ cs_dsp_mock_xm_header_get_fw_version_from_regmap(priv));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->bin_builder);
+
+ /* Init cs_dsp */
+ dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops);
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_bin_err_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_bin_err_test_common_init(test, dsp, 3);
+}
+
+static int cs_dsp_bin_err_test_adsp2_32bit_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 1;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_bin_err_test_common_init(test, dsp, 2);
+}
+
+static int cs_dsp_bin_err_test_adsp2_16bit_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_bin_err_test_common_init(test, dsp, 1);
+}
+
+static struct kunit_case cs_dsp_bin_err_test_cases_halo[] = {
+
+ { } /* terminator */
+};
+
+static void cs_dsp_bin_err_block_types_desc(const struct cs_dsp_bin_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "block_type:%#x", param->block_type);
+}
+
+/* Some block types to test against, including illegal types */
+static const struct cs_dsp_bin_test_param bin_test_block_types_cases[] = {
+ { .block_type = WMFW_INFO_TEXT << 8 },
+ { .block_type = WMFW_METADATA << 8 },
+ { .block_type = WMFW_ADSP2_PM },
+ { .block_type = WMFW_ADSP2_XM },
+ { .block_type = 0x33 },
+ { .block_type = 0xf500 },
+ { .block_type = 0xc000 },
+};
+
+KUNIT_ARRAY_PARAM(bin_test_block_types,
+ bin_test_block_types_cases,
+ cs_dsp_bin_err_block_types_desc);
+
+static struct kunit_case cs_dsp_bin_err_test_cases_adsp2[] = {
+ KUNIT_CASE(bin_load_with_unknown_blocks),
+ KUNIT_CASE(bin_err_wrong_magic),
+ KUNIT_CASE(bin_err_too_short_for_header),
+ KUNIT_CASE(bin_err_bad_header_length),
+ KUNIT_CASE(bin_err_bad_core_type),
+
+ KUNIT_CASE_PARAM(bin_too_short_for_block_header, bin_test_block_types_gen_params),
+ KUNIT_CASE_PARAM(bin_too_short_for_block_payload, bin_test_block_types_gen_params),
+ KUNIT_CASE_PARAM(bin_block_payload_len_garbage, bin_test_block_types_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_bin_err_test_halo = {
+ .name = "cs_dsp_bin_err_halo",
+ .init = cs_dsp_bin_err_test_halo_init,
+ .exit = cs_dsp_bin_err_test_exit,
+ .test_cases = cs_dsp_bin_err_test_cases_halo,
+};
+
+static struct kunit_suite cs_dsp_bin_err_test_adsp2_32bit = {
+ .name = "cs_dsp_bin_err_adsp2_32bit",
+ .init = cs_dsp_bin_err_test_adsp2_32bit_init,
+ .exit = cs_dsp_bin_err_test_exit,
+ .test_cases = cs_dsp_bin_err_test_cases_adsp2,
+};
+
+static struct kunit_suite cs_dsp_bin_err_test_adsp2_16bit = {
+ .name = "cs_dsp_bin_err_adsp2_16bit",
+ .init = cs_dsp_bin_err_test_adsp2_16bit_init,
+ .exit = cs_dsp_bin_err_test_exit,
+ .test_cases = cs_dsp_bin_err_test_cases_adsp2,
+};
+
+kunit_test_suites(&cs_dsp_bin_err_test_halo,
+ &cs_dsp_bin_err_test_adsp2_32bit,
+ &cs_dsp_bin_err_test_adsp2_16bit);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_callbacks.c b/drivers/firmware/cirrus/test/cs_dsp_test_callbacks.c
new file mode 100644
index 000000000000..8a9b66a3b7d3
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_callbacks.c
@@ -0,0 +1,688 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+//
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <kunit/test-bug.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/random.h>
+#include <linux/regmap.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+#define ADSP2_LOCK_REGION_CTRL 0x7A
+#define ADSP2_WDT_TIMEOUT_STS_MASK 0x2000
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *)
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *)
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+
+ int num_control_add;
+ int num_control_remove;
+ int num_pre_run;
+ int num_post_run;
+ int num_pre_stop;
+ int num_post_stop;
+ int num_watchdog_expired;
+
+ struct cs_dsp_coeff_ctl *passed_ctl[16];
+ struct cs_dsp *passed_dsp;
+};
+
+struct cs_dsp_callbacks_test_param {
+ const struct cs_dsp_client_ops *ops;
+ const char *case_name;
+};
+
+static const struct cs_dsp_mock_alg_def cs_dsp_callbacks_test_mock_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_size_words = 164,
+ .ym_size_words = 164,
+ .zm_size_words = 164,
+ },
+};
+
+static const struct cs_dsp_mock_coeff_def mock_coeff_template = {
+ .shortname = "Dummy Coeff",
+ .type = WMFW_CTL_TYPE_BYTES,
+ .mem_type = WMFW_ADSP2_YM,
+ .flags = WMFW_CTL_FLAG_VOLATILE,
+ .length_bytes = 4,
+};
+
+static int cs_dsp_test_control_add_callback(struct cs_dsp_coeff_ctl *ctl)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+
+ local->passed_ctl[local->num_control_add] = ctl;
+ local->num_control_add++;
+
+ return 0;
+}
+
+static void cs_dsp_test_control_remove_callback(struct cs_dsp_coeff_ctl *ctl)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+
+ local->passed_ctl[local->num_control_remove] = ctl;
+ local->num_control_remove++;
+}
+
+static int cs_dsp_test_pre_run_callback(struct cs_dsp *dsp)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+
+ local->passed_dsp = dsp;
+ local->num_pre_run++;
+
+ return 0;
+}
+
+static int cs_dsp_test_post_run_callback(struct cs_dsp *dsp)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+
+ local->passed_dsp = dsp;
+ local->num_post_run++;
+
+ return 0;
+}
+
+static void cs_dsp_test_pre_stop_callback(struct cs_dsp *dsp)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+
+ local->passed_dsp = dsp;
+ local->num_pre_stop++;
+}
+
+static void cs_dsp_test_post_stop_callback(struct cs_dsp *dsp)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+
+ local->passed_dsp = dsp;
+ local->num_post_stop++;
+}
+
+static void cs_dsp_test_watchdog_expired_callback(struct cs_dsp *dsp)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+
+ local->passed_dsp = dsp;
+ local->num_watchdog_expired++;
+}
+
+static const struct cs_dsp_client_ops cs_dsp_callback_test_client_ops = {
+ .control_add = cs_dsp_test_control_add_callback,
+ .control_remove = cs_dsp_test_control_remove_callback,
+ .pre_run = cs_dsp_test_pre_run_callback,
+ .post_run = cs_dsp_test_post_run_callback,
+ .pre_stop = cs_dsp_test_pre_stop_callback,
+ .post_stop = cs_dsp_test_post_stop_callback,
+ .watchdog_expired = cs_dsp_test_watchdog_expired_callback,
+};
+
+static const struct cs_dsp_client_ops cs_dsp_callback_test_empty_client_ops = {
+ /* No entries */
+};
+
+static void cs_dsp_test_run_stop_callbacks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+
+ KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0);
+ KUNIT_EXPECT_EQ(test, local->num_pre_run, 1);
+ KUNIT_EXPECT_EQ(test, local->num_post_run, 1);
+ KUNIT_EXPECT_EQ(test, local->num_pre_stop, 0);
+ KUNIT_EXPECT_EQ(test, local->num_post_stop, 0);
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_dsp, priv->dsp);
+ local->passed_dsp = NULL;
+
+ cs_dsp_stop(priv->dsp);
+ KUNIT_EXPECT_EQ(test, local->num_pre_run, 1);
+ KUNIT_EXPECT_EQ(test, local->num_post_run, 1);
+ KUNIT_EXPECT_EQ(test, local->num_pre_stop, 1);
+ KUNIT_EXPECT_EQ(test, local->num_post_stop, 1);
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_dsp, priv->dsp);
+ local->passed_dsp = NULL;
+
+ KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0);
+ KUNIT_EXPECT_EQ(test, local->num_pre_run, 2);
+ KUNIT_EXPECT_EQ(test, local->num_post_run, 2);
+ KUNIT_EXPECT_EQ(test, local->num_pre_stop, 1);
+ KUNIT_EXPECT_EQ(test, local->num_post_stop, 1);
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_dsp, priv->dsp);
+ local->passed_dsp = NULL;
+
+ cs_dsp_stop(priv->dsp);
+ KUNIT_EXPECT_EQ(test, local->num_pre_run, 2);
+ KUNIT_EXPECT_EQ(test, local->num_post_run, 2);
+ KUNIT_EXPECT_EQ(test, local->num_pre_stop, 2);
+ KUNIT_EXPECT_EQ(test, local->num_post_stop, 2);
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_dsp, priv->dsp);
+ local->passed_dsp = NULL;
+}
+
+static void cs_dsp_test_ctl_v1_callbacks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ int i;
+
+ /* Add a control for each memory */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_callbacks_test_mock_algs[0].id,
+ "dummyalg", NULL);
+ def.shortname = "zm";
+ def.mem_type = WMFW_ADSP2_ZM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ def.shortname = "ym";
+ def.mem_type = WMFW_ADSP2_YM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ def.shortname = "xm";
+ def.mem_type = WMFW_ADSP2_XM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /* There should have been an add callback for each control */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list), 3);
+ KUNIT_EXPECT_EQ(test, local->num_control_add, 3);
+ KUNIT_EXPECT_EQ(test, local->num_control_remove, 0);
+
+ i = 0;
+ list_for_each_entry_reverse(ctl, &priv->dsp->ctl_list, list)
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_ctl[i++], ctl);
+
+ /*
+ * Call cs_dsp_remove() and there should be a remove callback
+ * for each control
+ */
+ memset(local->passed_ctl, 0, sizeof(local->passed_ctl));
+ cs_dsp_remove(priv->dsp);
+
+ /* Prevent double cleanup */
+ kunit_remove_action(priv->test, _cs_dsp_remove_wrapper, priv->dsp);
+
+ KUNIT_EXPECT_EQ(test, local->num_control_add, 3);
+ KUNIT_EXPECT_EQ(test, local->num_control_remove, 3);
+
+ i = 0;
+ list_for_each_entry_reverse(ctl, &priv->dsp->ctl_list, list)
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_ctl[i++], ctl);
+}
+
+static void cs_dsp_test_ctl_v2_callbacks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ char name[2] = { };
+ int i;
+
+ /* Add some controls */
+ def.shortname = name;
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_callbacks_test_mock_algs[0].id,
+ "dummyalg", NULL);
+ for (i = 0; i < ARRAY_SIZE(local->passed_ctl); ++i) {
+ name[0] = 'A' + i;
+ def.offset_dsp_words = i;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /* There should have been an add callback for each control */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list),
+ ARRAY_SIZE(local->passed_ctl));
+ KUNIT_EXPECT_EQ(test, local->num_control_add, ARRAY_SIZE(local->passed_ctl));
+ KUNIT_EXPECT_EQ(test, local->num_control_remove, 0);
+
+ i = 0;
+ list_for_each_entry_reverse(ctl, &priv->dsp->ctl_list, list)
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_ctl[i++], ctl);
+
+ /*
+ * Call cs_dsp_remove() and there should be a remove callback
+ * for each control
+ */
+ memset(local->passed_ctl, 0, sizeof(local->passed_ctl));
+ cs_dsp_remove(priv->dsp);
+
+ /* Prevent double cleanup */
+ kunit_remove_action(priv->test, _cs_dsp_remove_wrapper, priv->dsp);
+
+ KUNIT_EXPECT_EQ(test, local->num_control_add, ARRAY_SIZE(local->passed_ctl));
+ KUNIT_EXPECT_EQ(test, local->num_control_remove, ARRAY_SIZE(local->passed_ctl));
+
+ i = 0;
+ list_for_each_entry_reverse(ctl, &priv->dsp->ctl_list, list)
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_ctl[i++], ctl);
+}
+
+static void cs_dsp_test_no_callbacks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct firmware *wmfw;
+
+ /* Add a controls */
+ def.shortname = "A";
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_callbacks_test_mock_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Run a sequence of ops that would invoke callbacks */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0);
+ cs_dsp_stop(priv->dsp);
+ cs_dsp_remove(priv->dsp);
+
+ /* Prevent double cleanup */
+ kunit_remove_action(priv->test, _cs_dsp_remove_wrapper, priv->dsp);
+
+ /* Something went very wrong if any of our callbacks were called */
+ KUNIT_EXPECT_EQ(test, local->num_control_add, 0);
+ KUNIT_EXPECT_EQ(test, local->num_control_remove, 0);
+ KUNIT_EXPECT_EQ(test, local->num_pre_run, 0);
+ KUNIT_EXPECT_EQ(test, local->num_post_run, 0);
+ KUNIT_EXPECT_EQ(test, local->num_pre_stop, 0);
+ KUNIT_EXPECT_EQ(test, local->num_post_stop, 0);
+}
+
+static void cs_dsp_test_adsp2v2_watchdog_callback(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+
+ KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0);
+
+ /* Set the watchdog timeout bit */
+ regmap_write(priv->dsp->regmap, priv->dsp->base + ADSP2_LOCK_REGION_CTRL,
+ ADSP2_WDT_TIMEOUT_STS_MASK);
+
+ /* Notify an interrupt and the watchdog callback should be called */
+ cs_dsp_adsp2_bus_error(priv->dsp);
+ KUNIT_EXPECT_EQ(test, local->num_watchdog_expired, 1);
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_dsp, priv->dsp);
+}
+
+static void cs_dsp_test_adsp2v2_watchdog_no_callbacks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0);
+
+ /* Set the watchdog timeout bit */
+ regmap_write(priv->dsp->regmap, priv->dsp->base + ADSP2_LOCK_REGION_CTRL,
+ ADSP2_WDT_TIMEOUT_STS_MASK);
+
+ /* Notify an interrupt, which will look for a watchdog callback */
+ cs_dsp_adsp2_bus_error(priv->dsp);
+ KUNIT_EXPECT_EQ(test, local->num_watchdog_expired, 0);
+}
+
+static void cs_dsp_test_halo_watchdog_callback(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+
+ KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0);
+
+ /* Notify an interrupt and the watchdog callback should be called */
+ cs_dsp_halo_wdt_expire(priv->dsp);
+ KUNIT_EXPECT_EQ(test, local->num_watchdog_expired, 1);
+ KUNIT_EXPECT_PTR_EQ(test, local->passed_dsp, priv->dsp);
+}
+
+static void cs_dsp_test_halo_watchdog_no_callbacks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ KUNIT_EXPECT_EQ(test, cs_dsp_run(priv->dsp), 0);
+
+ /* Notify an interrupt, which will look for a watchdog callback */
+ cs_dsp_halo_wdt_expire(priv->dsp);
+ KUNIT_EXPECT_EQ(test, local->num_watchdog_expired, 0);
+}
+
+static int cs_dsp_callbacks_test_common_init(struct kunit *test, struct cs_dsp *dsp,
+ int wmfw_version)
+{
+ const struct cs_dsp_callbacks_test_param *param = test->param_value;
+ struct cs_dsp_test *priv;
+ struct cs_dsp_test_local *local;
+ struct device *test_dev;
+ struct cs_dsp_mock_xm_header *xm_header;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+ priv->local = local;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * There must always be a XM header with at least 1 algorithm,
+ * so create a dummy one and pre-populate XM so the wmfw doesn't
+ * have to contain an XM blob.
+ */
+ xm_header = cs_dsp_create_mock_xm_header(priv,
+ cs_dsp_callbacks_test_mock_algs,
+ ARRAY_SIZE(cs_dsp_callbacks_test_mock_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xm_header);
+ cs_dsp_mock_xm_header_write_to_regmap(xm_header);
+
+ local->wmfw_builder = cs_dsp_mock_wmfw_init(priv, wmfw_version);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->wmfw_builder);
+
+ /* Add dummy XM header payload to wmfw */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_XM, 0,
+ xm_header->blob_data,
+ xm_header->blob_size_bytes);
+
+ /* Init cs_dsp */
+ dsp->client_ops = param->ops;
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_callbacks_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_callbacks_test_common_init(test, dsp, 3);
+}
+
+static int cs_dsp_callbacks_test_adsp2_32bit_init(struct kunit *test, int rev)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = rev;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_callbacks_test_common_init(test, dsp, 2);
+}
+
+static int cs_dsp_callbacks_test_adsp2v2_32bit_init(struct kunit *test)
+{
+ return cs_dsp_callbacks_test_adsp2_32bit_init(test, 2);
+}
+
+static int cs_dsp_callbacks_test_adsp2v1_32bit_init(struct kunit *test)
+{
+ return cs_dsp_callbacks_test_adsp2_32bit_init(test, 1);
+}
+
+static int cs_dsp_callbacks_test_adsp2_16bit_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_callbacks_test_common_init(test, dsp, 1);
+}
+
+static void cs_dsp_callbacks_param_desc(const struct cs_dsp_callbacks_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s", param->case_name);
+}
+
+/* Parameterize on different client callback ops tables */
+static const struct cs_dsp_callbacks_test_param cs_dsp_callbacks_ops_cases[] = {
+ { .ops = &cs_dsp_callback_test_client_ops, .case_name = "all ops" },
+};
+
+KUNIT_ARRAY_PARAM(cs_dsp_callbacks_ops,
+ cs_dsp_callbacks_ops_cases,
+ cs_dsp_callbacks_param_desc);
+
+static const struct cs_dsp_callbacks_test_param cs_dsp_no_callbacks_cases[] = {
+ { .ops = &cs_dsp_callback_test_empty_client_ops, .case_name = "empty ops" },
+};
+
+KUNIT_ARRAY_PARAM(cs_dsp_no_callbacks,
+ cs_dsp_no_callbacks_cases,
+ cs_dsp_callbacks_param_desc);
+
+static struct kunit_case cs_dsp_callbacks_adsp2_wmfwv1_test_cases[] = {
+ KUNIT_CASE_PARAM(cs_dsp_test_run_stop_callbacks, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_ctl_v1_callbacks, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_no_callbacks, cs_dsp_no_callbacks_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_callbacks_adsp2_wmfwv2_test_cases[] = {
+ KUNIT_CASE_PARAM(cs_dsp_test_run_stop_callbacks, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_ctl_v2_callbacks, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_no_callbacks, cs_dsp_no_callbacks_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_callbacks_halo_test_cases[] = {
+ KUNIT_CASE_PARAM(cs_dsp_test_run_stop_callbacks, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_ctl_v2_callbacks, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_no_callbacks, cs_dsp_no_callbacks_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_watchdog_adsp2v2_test_cases[] = {
+ KUNIT_CASE_PARAM(cs_dsp_test_adsp2v2_watchdog_callback, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_adsp2v2_watchdog_no_callbacks, cs_dsp_no_callbacks_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_watchdog_halo_test_cases[] = {
+ KUNIT_CASE_PARAM(cs_dsp_test_halo_watchdog_callback, cs_dsp_callbacks_ops_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_test_halo_watchdog_no_callbacks, cs_dsp_no_callbacks_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_callbacks_test_halo = {
+ .name = "cs_dsp_callbacks_halo",
+ .init = cs_dsp_callbacks_test_halo_init,
+ .test_cases = cs_dsp_callbacks_halo_test_cases,
+};
+
+static struct kunit_suite cs_dsp_callbacks_test_adsp2v2_32bit = {
+ .name = "cs_dsp_callbacks_adsp2v2_32bit_wmfwv2",
+ .init = cs_dsp_callbacks_test_adsp2v2_32bit_init,
+ .test_cases = cs_dsp_callbacks_adsp2_wmfwv2_test_cases,
+};
+
+static struct kunit_suite cs_dsp_callbacks_test_adsp2v1_32bit = {
+ .name = "cs_dsp_callbacks_adsp2v1_32bit_wmfwv2",
+ .init = cs_dsp_callbacks_test_adsp2v1_32bit_init,
+ .test_cases = cs_dsp_callbacks_adsp2_wmfwv2_test_cases,
+};
+
+static struct kunit_suite cs_dsp_callbacks_test_adsp2_16bit = {
+ .name = "cs_dsp_callbacks_adsp2_16bit_wmfwv1",
+ .init = cs_dsp_callbacks_test_adsp2_16bit_init,
+ .test_cases = cs_dsp_callbacks_adsp2_wmfwv1_test_cases,
+};
+
+static struct kunit_suite cs_dsp_watchdog_test_adsp2v2_32bit = {
+ .name = "cs_dsp_watchdog_adsp2v2_32bit",
+ .init = cs_dsp_callbacks_test_adsp2v2_32bit_init,
+ .test_cases = cs_dsp_watchdog_adsp2v2_test_cases,
+};
+
+static struct kunit_suite cs_dsp_watchdog_test_halo_32bit = {
+ .name = "cs_dsp_watchdog_halo",
+ .init = cs_dsp_callbacks_test_halo_init,
+ .test_cases = cs_dsp_watchdog_halo_test_cases,
+};
+
+kunit_test_suites(&cs_dsp_callbacks_test_halo,
+ &cs_dsp_callbacks_test_adsp2v2_32bit,
+ &cs_dsp_callbacks_test_adsp2v1_32bit,
+ &cs_dsp_callbacks_test_adsp2_16bit,
+ &cs_dsp_watchdog_test_adsp2v2_32bit,
+ &cs_dsp_watchdog_test_halo_32bit);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_control_cache.c b/drivers/firmware/cirrus/test/cs_dsp_test_control_cache.c
new file mode 100644
index 000000000000..83386cc978e3
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_control_cache.c
@@ -0,0 +1,3282 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/list.h>
+#include <linux/random.h>
+#include <linux/regmap.h>
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *);
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_stop_wrapper, cs_dsp_stop, struct cs_dsp *);
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *);
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_xm_header *xm_header;
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+ int wmfw_version;
+};
+
+struct cs_dsp_ctl_cache_test_param {
+ int mem_type;
+ int alg_id;
+ unsigned int offs_words;
+ unsigned int len_bytes;
+ u16 ctl_type;
+ u16 flags;
+};
+
+static const struct cs_dsp_mock_alg_def cs_dsp_ctl_cache_test_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_base_words = 60,
+ .xm_size_words = 1000,
+ .ym_base_words = 0,
+ .ym_size_words = 1000,
+ .zm_base_words = 0,
+ .zm_size_words = 1000,
+ },
+ {
+ .id = 0xb,
+ .ver = 0x100001,
+ .xm_base_words = 1060,
+ .xm_size_words = 1000,
+ .ym_base_words = 1000,
+ .ym_size_words = 1000,
+ .zm_base_words = 1000,
+ .zm_size_words = 1000,
+ },
+ {
+ .id = 0x9f1234,
+ .ver = 0x100500,
+ .xm_base_words = 2060,
+ .xm_size_words = 32,
+ .ym_base_words = 2000,
+ .ym_size_words = 32,
+ .zm_base_words = 2000,
+ .zm_size_words = 32,
+ },
+ {
+ .id = 0xff00ff,
+ .ver = 0x300113,
+ .xm_base_words = 2100,
+ .xm_size_words = 32,
+ .ym_base_words = 2032,
+ .ym_size_words = 32,
+ .zm_base_words = 2032,
+ .zm_size_words = 32,
+ },
+};
+
+static const struct cs_dsp_mock_coeff_def mock_coeff_template = {
+ .shortname = "Dummy Coeff",
+ .type = WMFW_CTL_TYPE_BYTES,
+ .mem_type = WMFW_ADSP2_YM,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ .length_bytes = 4,
+};
+
+static const char * const cs_dsp_ctl_cache_test_fw_names[] = {
+ "misc", "mbc/vss", "haps",
+};
+
+static int _find_alg_entry(struct kunit *test, unsigned int alg_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_cache_test_algs); ++i) {
+ if (cs_dsp_ctl_cache_test_algs[i].id == alg_id)
+ break;
+ }
+
+ KUNIT_ASSERT_LT(test, i, ARRAY_SIZE(cs_dsp_ctl_cache_test_algs));
+
+ return i;
+}
+
+static int _get_alg_mem_base_words(struct kunit *test, int alg_index, int mem_type)
+{
+ switch (mem_type) {
+ case WMFW_ADSP2_XM:
+ return cs_dsp_ctl_cache_test_algs[alg_index].xm_base_words;
+ case WMFW_ADSP2_YM:
+ return cs_dsp_ctl_cache_test_algs[alg_index].ym_base_words;
+ case WMFW_ADSP2_ZM:
+ return cs_dsp_ctl_cache_test_algs[alg_index].zm_base_words;
+ default:
+ KUNIT_FAIL(test, "Bug in test: illegal memory type %d\n", mem_type);
+ return 0;
+ }
+}
+
+static struct cs_dsp_mock_wmfw_builder *_create_dummy_wmfw(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_wmfw_builder *builder;
+
+ builder = cs_dsp_mock_wmfw_init(priv, local->wmfw_version);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder);
+
+ /* Init an XM header */
+ cs_dsp_mock_wmfw_add_data_block(builder,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ return builder;
+}
+
+/*
+ * Memory allocated for control cache must be large enough.
+ * This creates multiple controls of different sizes so only works on
+ * wmfw V2 and later.
+ */
+static void cs_dsp_ctl_v2_cache_alloc(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ unsigned int reg, alg_base_words, alg_size_bytes;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ char ctl_name[4];
+ u32 *reg_vals;
+ int num_ctls;
+
+ /* Create some DSP data to initialize the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, 0, WMFW_ADSP2_YM);
+ alg_size_bytes = cs_dsp_ctl_cache_test_algs[0].ym_size_words *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ reg_vals = kunit_kzalloc(test, alg_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM);
+ reg += alg_base_words * cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, alg_size_bytes);
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[0].id,
+ "dummyalg", NULL);
+
+ /* Create controls of different sizes */
+ def.mem_type = WMFW_ADSP2_YM;
+ def.shortname = ctl_name;
+ num_ctls = 0;
+ for (def.length_bytes = 4; def.length_bytes <= 64; def.length_bytes += 4) {
+ snprintf(ctl_name, ARRAY_SIZE(ctl_name), "%x", def.length_bytes);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ num_ctls++;
+ def.offset_dsp_words += def.length_bytes / sizeof(u32);
+ }
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&dsp->ctl_list), num_ctls);
+
+ /* Check that the block allocated for the cache is large enough */
+ list_for_each_entry(ctl, &dsp->ctl_list, list)
+ KUNIT_EXPECT_GE(test, ksize(ctl->cache), ctl->len);
+}
+
+/*
+ * Content of registers backing a control should be read into the
+ * control cache when the firmware is downloaded.
+ */
+static void cs_dsp_ctl_cache_init(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /*
+ * The data should have been populated into the control cache
+ * so should be readable through the control.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * For a non-volatile write-only control the cache should be zero-filled
+ * when the firmware is downloaded.
+ */
+static void cs_dsp_ctl_cache_init_write_only(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *readback, *zeros;
+
+ zeros = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, zeros);
+
+ readback = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create a non-volatile write-only control */
+ def.flags = param->flags & ~WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /*
+ * The control cache should have been zero-filled so should be
+ * readable through the control.
+ */
+ get_random_bytes(readback, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, zeros, param->len_bytes);
+}
+
+/*
+ * Multiple different firmware with identical controls.
+ * This is legal because different firmwares could contain the same
+ * algorithm.
+ * The control cache should be initialized only with the data from
+ * the firmware containing it.
+ */
+static void cs_dsp_ctl_cache_init_multiple_fw_same_controls(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder[3];
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *walkctl, *ctl[3];
+ struct firmware *wmfw;
+ u32 *reg_vals[3], *readback;
+ int i;
+
+ static_assert(ARRAY_SIZE(ctl) == ARRAY_SIZE(builder));
+ static_assert(ARRAY_SIZE(reg_vals) == ARRAY_SIZE(builder));
+ static_assert(ARRAY_SIZE(cs_dsp_ctl_cache_test_fw_names) >= ARRAY_SIZE(builder));
+
+ /* Create an identical control in each firmware but with different alg id */
+ for (i = 0; i < ARRAY_SIZE(builder); i++) {
+ builder[i] = _create_dummy_wmfw(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder[i]);
+
+ cs_dsp_mock_wmfw_start_alg_info_block(builder[i],
+ cs_dsp_ctl_cache_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(builder[i], &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(builder[i]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ reg_vals[i] = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals[i]);
+ }
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /*
+ * For each firmware create random content in the register backing
+ * the control. Then download, start, stop and power-down.
+ */
+ for (i = 0; i < ARRAY_SIZE(builder); i++) {
+ alg_base_words = _get_alg_mem_base_words(test, 0, def.mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, def.mem_type);
+ reg += (alg_base_words + def.offset_dsp_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+
+ get_random_bytes(reg_vals[i], def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals[i], def.length_bytes);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder[i]);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(dsp, wmfw,
+ cs_dsp_ctl_cache_test_fw_names[i],
+ NULL, NULL,
+ cs_dsp_ctl_cache_test_fw_names[i]),
+ 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+ }
+
+ /* There should now be 3 controls */
+ KUNIT_ASSERT_EQ(test, list_count_nodes(&dsp->ctl_list), 3);
+
+ /*
+ * There's no requirement for the control list to be in any
+ * particular order, so don't assume the order.
+ */
+ for (i = 0; i < ARRAY_SIZE(ctl); i++)
+ ctl[i] = NULL;
+
+ list_for_each_entry(walkctl, &dsp->ctl_list, list) {
+ if (strcmp(walkctl->fw_name, cs_dsp_ctl_cache_test_fw_names[0]) == 0)
+ ctl[0] = walkctl;
+ else if (strcmp(walkctl->fw_name, cs_dsp_ctl_cache_test_fw_names[1]) == 0)
+ ctl[1] = walkctl;
+ else if (strcmp(walkctl->fw_name, cs_dsp_ctl_cache_test_fw_names[2]) == 0)
+ ctl[2] = walkctl;
+ }
+
+ KUNIT_ASSERT_NOT_NULL(test, ctl[0]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[1]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[2]);
+
+ /*
+ * The data should have been populated into the control cache
+ * so should be readable through the control.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[0], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[0], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[1], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[1], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[2], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[2], def.length_bytes);
+}
+
+/*
+ * Multiple different firmware with controls identical except for alg id.
+ * This is legal because the controls are qualified by algorithm id.
+ * The control cache should be initialized only with the data from
+ * the firmware containing it.
+ */
+static void cs_dsp_ctl_cache_init_multiple_fwalgid_same_controls(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder[3];
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *walkctl, *ctl[3];
+ struct firmware *wmfw;
+ u32 *reg_vals[3], *readback;
+ int i;
+
+ static_assert(ARRAY_SIZE(ctl) == ARRAY_SIZE(builder));
+ static_assert(ARRAY_SIZE(reg_vals) == ARRAY_SIZE(builder));
+ static_assert(ARRAY_SIZE(cs_dsp_ctl_cache_test_fw_names) >= ARRAY_SIZE(builder));
+
+ /* Create an identical control in each firmware but with different alg id */
+ for (i = 0; i < ARRAY_SIZE(builder); i++) {
+ builder[i] = _create_dummy_wmfw(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder[i]);
+
+ cs_dsp_mock_wmfw_start_alg_info_block(builder[i],
+ cs_dsp_ctl_cache_test_algs[i].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(builder[i], &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(builder[i]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ reg_vals[i] = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals[i]);
+ }
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /*
+ * For each firmware create random content in the register backing
+ * the control. Then download, start, stop and power-down.
+ */
+ for (i = 0; i < ARRAY_SIZE(builder); i++) {
+ alg_base_words = _get_alg_mem_base_words(test, i, def.mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, def.mem_type);
+ reg += (alg_base_words + def.offset_dsp_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+
+ get_random_bytes(reg_vals[i], def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals[i], def.length_bytes);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder[i]);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(dsp, wmfw,
+ cs_dsp_ctl_cache_test_fw_names[i],
+ NULL, NULL,
+ cs_dsp_ctl_cache_test_fw_names[i]),
+ 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+ }
+
+ /* There should now be 3 controls */
+ KUNIT_ASSERT_EQ(test, list_count_nodes(&dsp->ctl_list), 3);
+
+ /*
+ * There's no requirement for the control list to be in any
+ * particular order, so don't assume the order.
+ */
+ for (i = 0; i < ARRAY_SIZE(ctl); i++)
+ ctl[i] = NULL;
+
+ list_for_each_entry(walkctl, &dsp->ctl_list, list) {
+ if (cs_dsp_ctl_cache_test_algs[0].id == walkctl->alg_region.alg)
+ ctl[0] = walkctl;
+ else if (cs_dsp_ctl_cache_test_algs[1].id == walkctl->alg_region.alg)
+ ctl[1] = walkctl;
+ else if (cs_dsp_ctl_cache_test_algs[2].id == walkctl->alg_region.alg)
+ ctl[2] = walkctl;
+ }
+
+ KUNIT_ASSERT_NOT_NULL(test, ctl[0]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[1]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[2]);
+
+ /*
+ * The data should have been populated into the control cache
+ * so should be readable through the control.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[0], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[0], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[1], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[1], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[2], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[2], def.length_bytes);
+}
+
+/*
+ * Firmware with controls at the same position in different memories.
+ * The control cache should be initialized with content from the
+ * correct memory region.
+ */
+static void cs_dsp_ctl_cache_init_multiple_mems(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *walkctl, *ctl[3];
+ struct firmware *wmfw;
+ u32 *reg_vals[3], *readback;
+ int i;
+
+ static_assert(ARRAY_SIZE(ctl) == ARRAY_SIZE(reg_vals));
+
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ reg_vals[i] = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals[i]);
+ get_random_bytes(reg_vals[i], def.length_bytes);
+ }
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[0].id,
+ "dummyalg", NULL);
+
+ /* Create controls identical except for memory region */
+ def.mem_type = WMFW_ADSP2_YM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ def.mem_type = WMFW_ADSP2_XM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ if (cs_dsp_mock_has_zm(priv)) {
+ def.mem_type = WMFW_ADSP2_ZM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Create random content in the registers backing each control */
+ alg_base_words = _get_alg_mem_base_words(test, 0, WMFW_ADSP2_YM);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM);
+ reg += (alg_base_words + def.offset_dsp_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals[0], def.length_bytes);
+
+ alg_base_words = _get_alg_mem_base_words(test, 0, WMFW_ADSP2_XM);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM);
+ reg += (alg_base_words + def.offset_dsp_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals[1], def.length_bytes);
+
+ if (cs_dsp_mock_has_zm(priv)) {
+ alg_base_words = _get_alg_mem_base_words(test, 0, WMFW_ADSP2_ZM);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_ZM);
+ reg += (alg_base_words + def.offset_dsp_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals[2], def.length_bytes);
+ }
+
+ /* Download, run, stop and power-down the firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+
+ /* There should now be 2 or 3 controls */
+ KUNIT_ASSERT_EQ(test, list_count_nodes(&dsp->ctl_list),
+ cs_dsp_mock_has_zm(priv) ? 3 : 2);
+
+ /*
+ * There's no requirement for the control list to be in any
+ * particular order, so don't assume the order.
+ */
+ for (i = 0; i < ARRAY_SIZE(ctl); i++)
+ ctl[i] = NULL;
+
+ list_for_each_entry(walkctl, &dsp->ctl_list, list) {
+ if (walkctl->alg_region.type == WMFW_ADSP2_YM)
+ ctl[0] = walkctl;
+ if (walkctl->alg_region.type == WMFW_ADSP2_XM)
+ ctl[1] = walkctl;
+ if (walkctl->alg_region.type == WMFW_ADSP2_ZM)
+ ctl[2] = walkctl;
+ }
+
+
+ /*
+ * The data should have been populated into the control cache
+ * so should be readable through the control.
+ */
+ KUNIT_ASSERT_NOT_NULL(test, ctl[0]);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[0], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[0], def.length_bytes);
+
+ KUNIT_ASSERT_NOT_NULL(test, ctl[1]);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[1], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[1], def.length_bytes);
+
+ if (cs_dsp_mock_has_zm(priv)) {
+ KUNIT_ASSERT_NOT_NULL(test, ctl[2]);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[2], 0, readback,
+ def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[2], def.length_bytes);
+ }
+}
+
+/*
+ * Firmware with controls at the same position in different algorithms
+ * The control cache should be initialized with content from the
+ * memory of the algorithm it points to.
+ */
+static void cs_dsp_ctl_cache_init_multiple_algs(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *walkctl, *ctl[3];
+ struct firmware *wmfw;
+ u32 *reg_vals[3], *readback;
+ int i;
+
+ static_assert(ARRAY_SIZE(ctl) == ARRAY_SIZE(reg_vals));
+ static_assert(ARRAY_SIZE(reg_vals) <= ARRAY_SIZE(cs_dsp_ctl_cache_test_algs));
+
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ reg_vals[i] = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals[i]);
+ get_random_bytes(reg_vals[i], def.length_bytes);
+ }
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create controls identical except for algorithm */
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[i].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+ }
+
+ /* Create random content in the registers backing each control */
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ alg_base_words = _get_alg_mem_base_words(test, i, def.mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, def.mem_type);
+ reg += (alg_base_words + def.offset_dsp_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals[i], def.length_bytes);
+ }
+
+ /* Download, run, stop and power-down the firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+
+ /* There should now be 3 controls */
+ KUNIT_ASSERT_EQ(test, list_count_nodes(&dsp->ctl_list), 3);
+
+ /*
+ * There's no requirement for the control list to be in any
+ * particular order, so don't assume the order.
+ */
+ for (i = 0; i < ARRAY_SIZE(ctl); i++)
+ ctl[i] = NULL;
+
+ list_for_each_entry(walkctl, &dsp->ctl_list, list) {
+ if (walkctl->alg_region.alg == cs_dsp_ctl_cache_test_algs[0].id)
+ ctl[0] = walkctl;
+ if (walkctl->alg_region.alg == cs_dsp_ctl_cache_test_algs[1].id)
+ ctl[1] = walkctl;
+ if (walkctl->alg_region.alg == cs_dsp_ctl_cache_test_algs[2].id)
+ ctl[2] = walkctl;
+ }
+
+ KUNIT_ASSERT_NOT_NULL(test, ctl[0]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[1]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[2]);
+
+ /*
+ * The data should have been populated into the control cache
+ * so should be readable through the control.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[0], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[0], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[1], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[1], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[2], 0, readback,
+ def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[2], def.length_bytes);
+}
+
+/*
+ * Firmware with controls in the same algorithm and memory but at
+ * different offsets.
+ * The control cache should be initialized with content from the
+ * correct offset.
+ * Only for wmfw format V2 and later. V1 only supports one control per
+ * memory per algorithm.
+ */
+static void cs_dsp_ctl_cache_init_multiple_offsets(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ unsigned int reg, alg_base_words, alg_base_reg;
+ struct cs_dsp_coeff_ctl *walkctl, *ctl[3];
+ struct firmware *wmfw;
+ u32 *reg_vals[3], *readback;
+ int i;
+
+ static_assert(ARRAY_SIZE(ctl) == ARRAY_SIZE(reg_vals));
+ static_assert(ARRAY_SIZE(reg_vals) <= ARRAY_SIZE(cs_dsp_ctl_cache_test_algs));
+
+ for (i = 0; i < ARRAY_SIZE(reg_vals); i++) {
+ reg_vals[i] = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals[i]);
+ get_random_bytes(reg_vals[i], def.length_bytes);
+ }
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[0].id,
+ "dummyalg", NULL);
+
+ /* Create controls identical except for offset */
+ def.length_bytes = 8;
+ def.offset_dsp_words = 0;
+ def.shortname = "CtlA";
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ def.offset_dsp_words = 5;
+ def.shortname = "CtlB";
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ def.offset_dsp_words = 8;
+ def.shortname = "CtlC";
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Create random content in the registers backing each control */
+ alg_base_words = _get_alg_mem_base_words(test, 0, def.mem_type);
+ alg_base_reg = cs_dsp_mock_base_addr_for_mem(priv, def.mem_type);
+ alg_base_reg += alg_base_words * cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+
+ reg = alg_base_reg;
+ regmap_raw_write(dsp->regmap, reg, reg_vals[0], def.length_bytes);
+ reg = alg_base_reg + (5 * cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv));
+ regmap_raw_write(dsp->regmap, reg, reg_vals[1], def.length_bytes);
+ reg = alg_base_reg + (8 * cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv));
+ regmap_raw_write(dsp->regmap, reg, reg_vals[2], def.length_bytes);
+
+ /* Download, run, stop and power-down the firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+
+ /* There should now be 3 controls */
+ KUNIT_ASSERT_EQ(test, list_count_nodes(&dsp->ctl_list), 3);
+
+ /*
+ * There's no requirement for the control list to be in any
+ * particular order, so don't assume the order.
+ */
+ for (i = 0; i < ARRAY_SIZE(ctl); i++)
+ ctl[i] = NULL;
+
+ list_for_each_entry(walkctl, &dsp->ctl_list, list) {
+ if (walkctl->offset == 0)
+ ctl[0] = walkctl;
+ if (walkctl->offset == 5)
+ ctl[1] = walkctl;
+ if (walkctl->offset == 8)
+ ctl[2] = walkctl;
+ }
+
+ KUNIT_ASSERT_NOT_NULL(test, ctl[0]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[1]);
+ KUNIT_ASSERT_NOT_NULL(test, ctl[2]);
+
+ /*
+ * The data should have been populated into the control cache
+ * so should be readable through the control.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[0], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[0], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[1], 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[1], def.length_bytes);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl[2], 0, readback,
+ def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals[2], def.length_bytes);
+}
+
+/*
+ * Read from a cached control before the firmware is started.
+ * Should return the data in the cache.
+ */
+static void cs_dsp_ctl_cache_read_not_started(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the data from the control cache */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a cached control after the firmware has been stopped.
+ * Should return the data in the cache.
+ */
+static void cs_dsp_ctl_cache_read_stopped(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the data from the control cache */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a cached control after the DSP has been powered-up and
+ * then powered-down without running.
+ * Should return the data in the cache.
+ */
+static void cs_dsp_ctl_cache_read_powered_down(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP then power-down */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the data from the control cache */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a cached control after the firmware has been run and
+ * stopped, then the DSP has been powered-down.
+ * Should return the data in the cache.
+ */
+static void cs_dsp_ctl_cache_read_stopped_powered_down(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware then power-down */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the data from the control cache */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a cached control when a different firmware is currently
+ * loaded into the DSP.
+ * Should return the data in the cache.
+ */
+static void cs_dsp_ctl_cache_read_not_current_loaded_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Power-down DSP then power-up with a different firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the data from the control cache */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a cached control when a different firmware is currently
+ * running.
+ * Should return the data in the cache.
+ */
+static void cs_dsp_ctl_cache_read_not_current_running_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP then power-down */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(dsp);
+
+ /* Power-up with a different firmware and run it */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the data from the control cache */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a cached control with non-zero flags while the firmware is
+ * running.
+ * Should return the data in the cache, not from the registers.
+ */
+static void cs_dsp_ctl_cache_read_running(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *init_reg_vals, *new_reg_vals, *readback;
+
+ init_reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, init_reg_vals);
+
+ new_reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create data in the registers backing the control */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(init_reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, init_reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start the firmware running */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /*
+ * Change the values in the registers backing the control then drop
+ * them from the regmap cache. This allows checking that the control
+ * read is returning values from the control cache and not accessing
+ * the registers.
+ */
+ KUNIT_ASSERT_EQ(test,
+ regmap_raw_write(dsp->regmap, reg, new_reg_vals, param->len_bytes),
+ 0);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ /* Control should readback the origin data from its cache */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, init_reg_vals, param->len_bytes);
+
+ /* Stop and power-down the DSP */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Control should readback from the cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, init_reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a cached control with flags == 0 while the firmware is
+ * running.
+ * Should behave as volatile and read from the registers.
+ * (This is for backwards compatibility with old firmware versions)
+ */
+static void cs_dsp_ctl_cache_read_running_zero_flags(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *init_reg_vals, *new_reg_vals, *readback;
+
+ init_reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, init_reg_vals);
+
+ new_reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Zero-fill the registers backing the control */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, init_reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = 0;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start the firmware running */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Change the values in the registers backing the control */
+ get_random_bytes(new_reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, new_reg_vals, param->len_bytes);
+
+ /* Control should readback the new data from the registers */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, new_reg_vals, param->len_bytes);
+
+ /* Stop and power-down the DSP */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Change the values in the registers backing the control */
+ regmap_raw_write(dsp->regmap, reg, init_reg_vals, param->len_bytes);
+
+ /* Control should readback from the cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, new_reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control while the firmware is running.
+ * This should be a writethrough operation, writing to the cache and
+ * the registers.
+ */
+static void cs_dsp_ctl_cache_writethrough(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ memset(reg_vals, 0, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Write new data to the control, it should be written to the registers */
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+ KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write unchanged data to a cached control while the firmware is running.
+ * The control write should return 0 to indicate that the content
+ * didn't change.
+ */
+static void cs_dsp_ctl_cache_writethrough_unchanged(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /*
+ * If the control is write-only the cache will have been zero-initialized
+ * so the first write will always indicate a change.
+ */
+ if (def.flags && !(def.flags & WMFW_CTL_FLAG_READABLE)) {
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals,
+ param->len_bytes),
+ 1);
+ }
+
+ /*
+ * Write the same data to the control, cs_dsp_coeff_lock_and_write_ctrl()
+ * should return 0 to indicate the content didn't change.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+ KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write unchanged data to a cached control while the firmware is not started.
+ * The control write should return 0 to indicate that the cache content
+ * didn't change.
+ */
+static void cs_dsp_ctl_cache_write_unchanged_not_started(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /*
+ * If the control is write-only the cache will have been zero-initialized
+ * so the first write will always indicate a change.
+ */
+ if (def.flags && !(def.flags & WMFW_CTL_FLAG_READABLE)) {
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals,
+ param->len_bytes),
+ 1);
+ }
+
+ /*
+ * Write the same data to the control, cs_dsp_coeff_lock_and_write_ctrl()
+ * should return 0 to indicate the content didn't change.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+ KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control while the firmware is loaded but not
+ * started.
+ * This should write to the cache only.
+ */
+static void cs_dsp_ctl_cache_write_not_started(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Write new data to the control, it should not be written to the registers */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ /* Registers should not have been written so regmap cache should still be clean */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control after the firmware has been loaded,
+ * started and stopped.
+ * This should write to the cache only.
+ */
+static void cs_dsp_ctl_cache_write_stopped(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Write new data to the control, it should not be written to the registers */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ /* Registers should not have been written so regmap cache should still be clean */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control after the firmware has been loaded,
+ * then the DSP powered-down.
+ * This should write to the cache only.
+ */
+static void cs_dsp_ctl_cache_write_powered_down(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP then power-down */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Write new data to the control, it should not be written to the registers */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ /* Registers should not have been written so regmap cache should still be clean */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control after the firmware has been loaded,
+ * started, stopped, and then the DSP powered-down.
+ * This should write to the cache only.
+ */
+static void cs_dsp_ctl_cache_write_stopped_powered_down(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware then power-down */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Write new data to the control, it should not be written to the registers */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ /* Registers should not have been written so regmap cache should still be clean */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control that is not in the currently loaded firmware.
+ * This should write to the cache only.
+ */
+static void cs_dsp_ctl_cache_write_not_current_loaded_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Get the control */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Power-down DSP then power-up with a different firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+
+ /* Control from unloaded firmware should be disabled */
+ KUNIT_EXPECT_FALSE(test, ctl->enabled);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /*
+ * It should be possible to write new data to the control from
+ * the first firmware. But this should not be written to the
+ * registers.
+ */
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ /* Registers should not have been written so regmap cache should still be clean */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control that is not in the currently running firmware.
+ * This should write to the cache only.
+ */
+static void cs_dsp_ctl_cache_write_not_current_running_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP then power-down */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(dsp);
+
+ /* Get the control */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Power-up with a different firmware and run it */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Control from unloaded firmware should be disabled */
+ KUNIT_EXPECT_FALSE(test, ctl->enabled);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /*
+ * It should be possible to write new data to the control from
+ * the first firmware. But this should not be written to the
+ * registers.
+ */
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ /* Registers should not have been written so regmap cache should still be clean */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control before running the firmware.
+ * The value written to the cache should be synced out to the registers
+ * backing the control when the firmware is run.
+ */
+static void cs_dsp_ctl_cache_sync_write_before_run(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Write new data to the control, it should not be written to the registers */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMNEQ(test, readback, reg_vals, param->len_bytes);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control while the firmware is running.
+ * The value written should be synced out to the registers
+ * backing the control when the firmware is next run.
+ */
+static void cs_dsp_ctl_cache_sync_write_while_running(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *init_vals, *ctl_vals, *readback;
+
+ init_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, init_vals);
+
+ ctl_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctl_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Zero-fill the registers backing the control */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP and start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Write new data to the control */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(ctl_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, ctl_vals, param->len_bytes),
+ 1);
+
+ /* Stop firmware and zero the registers backing the control */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+ KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, init_vals, param->len_bytes);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control after stopping the firmware.
+ * The value written to the cache should be synced out to the registers
+ * backing the control when the firmware is next run.
+ */
+static void cs_dsp_ctl_cache_sync_write_after_stop(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ /* Write new data to the control, it should not be written to the registers */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMNEQ(test, readback, reg_vals, param->len_bytes);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Write to a cached control that is not in the currently loaded firmware.
+ * The value written to the cache should be synced out to the registers
+ * backing the control the next time the firmware containing the
+ * control is run.
+ */
+static void cs_dsp_ctl_cache_sync_write_not_current_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Get the control */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Power-down DSP then power-up with a different firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+
+ /* Write new data to the control, it should not be written to the registers */
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMNEQ(test, readback, reg_vals, param->len_bytes);
+
+ /* Power-down DSP then power-up with the original firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * The value in the control cache should be synced out to the registers
+ * backing the control every time the firmware containing the control
+ * is run.
+ */
+static void cs_dsp_ctl_cache_sync_reapply_every_run(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *init_vals, *readback, *ctl_vals;
+
+ init_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, init_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ ctl_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctl_vals);
+
+ /* Zero-fill the registers backing the control */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Write new data to the control */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(ctl_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, ctl_vals, param->len_bytes),
+ 1);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+
+ /* Stop the firmware and reset the registers */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Start the firmware again and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+}
+
+/*
+ * The value in the control cache should be retained if the same
+ * firmware is downloaded again. It should be synced out to the
+ * registers backing the control after the firmware containing the
+ * control is downloaded again and run.
+ */
+static void cs_dsp_ctl_cache_sync_reapply_after_fw_reload(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *init_vals, *readback, *ctl_vals;
+
+ init_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, init_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ ctl_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctl_vals);
+
+ /* Zero-fill the registers backing the control */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Write new data to the control */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(ctl_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, ctl_vals, param->len_bytes),
+ 1);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+
+ /* Stop the firmware and power-down the DSP */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Reset the registers */
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Download the firmware again, the cache content should not change */
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+}
+
+/*
+ * The value in the control cache should be retained after a different
+ * firmware is downloaded.
+ * When the firmware containing the control is downloaded and run
+ * the value in the control cache should be synced out to the registers
+ * backing the control.
+ */
+static void cs_dsp_ctl_cache_sync_reapply_after_fw_swap(struct kunit *test)
+{
+ const struct cs_dsp_ctl_cache_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *init_vals, *readback, *ctl_vals;
+
+ init_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, init_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ ctl_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctl_vals);
+
+ /* Zero-fill the registers backing the control */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_cache_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP but don't start firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Write new data to the control */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ get_random_bytes(ctl_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, ctl_vals, param->len_bytes),
+ 1);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+
+ /* Stop the firmware and power-down the DSP */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Reset the registers */
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Download and run a different firmware */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_power_down(dsp);
+
+ /* Reset the registers */
+ regmap_raw_write(dsp->regmap, reg, init_vals, param->len_bytes);
+
+ /* Download the original firmware again */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ KUNIT_EXPECT_TRUE(test, ctl->set);
+
+ /* Start the firmware and the cached data should be written to registers */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+ KUNIT_EXPECT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+
+ /* Control should readback the new data from the control cache */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, param->len_bytes);
+}
+
+static int cs_dsp_ctl_cache_test_common_init(struct kunit *test, struct cs_dsp *dsp,
+ int wmfw_version)
+{
+ struct cs_dsp_test *priv;
+ struct cs_dsp_test_local *local;
+ struct device *test_dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+ priv->local = local;
+ priv->local->wmfw_version = wmfw_version;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * There must always be a XM header with at least 1 algorithm, so create
+ * a dummy one that tests can use and extract it to a data blob.
+ */
+ local->xm_header = cs_dsp_create_mock_xm_header(priv,
+ cs_dsp_ctl_cache_test_algs,
+ ARRAY_SIZE(cs_dsp_ctl_cache_test_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->xm_header);
+
+ /* Create wmfw builder */
+ local->wmfw_builder = _create_dummy_wmfw(test);
+
+ /* Init cs_dsp */
+ dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops);
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_ctl_cache_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_ctl_cache_test_common_init(test, dsp, 3);
+}
+
+static int cs_dsp_ctl_cache_test_adsp2_32bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 1;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_ctl_cache_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_ctl_cache_test_adsp2_32bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_ctl_cache_test_adsp2_32bit_init(test, 1);
+}
+
+static int cs_dsp_ctl_cache_test_adsp2_32bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_ctl_cache_test_adsp2_32bit_init(test, 2);
+}
+
+static int cs_dsp_ctl_cache_test_adsp2_16bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_ctl_cache_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_ctl_cache_test_adsp2_16bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_ctl_cache_test_adsp2_16bit_init(test, 1);
+}
+
+static int cs_dsp_ctl_cache_test_adsp2_16bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_ctl_cache_test_adsp2_16bit_init(test, 2);
+}
+
+static void cs_dsp_ctl_all_param_desc(const struct cs_dsp_ctl_cache_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "alg:%#x %s@%u len:%u flags:%#x",
+ param->alg_id, cs_dsp_mem_region_name(param->mem_type),
+ param->offs_words, param->len_bytes, param->flags);
+}
+
+/* All parameters populated, with various lengths */
+static const struct cs_dsp_ctl_cache_test_param all_pop_varying_len_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 8 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 12 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 16 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 48 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 100 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 512 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 1000 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_len, all_pop_varying_len_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, with various offsets */
+static const struct cs_dsp_ctl_cache_test_param all_pop_varying_offset_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 0, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 2, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 3, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 8, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 10, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 128, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 180, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_offset, all_pop_varying_offset_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, with various X and Y memory regions */
+static const struct cs_dsp_ctl_cache_test_param all_pop_varying_xy_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_XM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_xy, all_pop_varying_xy_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, using ZM */
+static const struct cs_dsp_ctl_cache_test_param all_pop_z_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_ZM, .offs_words = 1, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_z, all_pop_z_cases, cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, with various algorithm ids */
+static const struct cs_dsp_ctl_cache_test_param all_pop_varying_alg_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xb, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0x9f1234, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xff00ff, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_alg, all_pop_varying_alg_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * non-volatile readable control
+ */
+static const struct cs_dsp_ctl_cache_test_param all_pop_nonvol_readable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_nonvol_readable_flags,
+ all_pop_nonvol_readable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * non-volatile readable control, except flags==0
+ */
+static const struct cs_dsp_ctl_cache_test_param all_pop_nonvol_readable_nonzero_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_nonvol_readable_nonzero_flags,
+ all_pop_nonvol_readable_nonzero_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * non-volatile writeable control
+ */
+static const struct cs_dsp_ctl_cache_test_param all_pop_nonvol_writeable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_nonvol_writeable_flags,
+ all_pop_nonvol_writeable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * non-volatile write-only control of varying lengths
+ */
+static const struct cs_dsp_ctl_cache_test_param all_pop_nonvol_write_only_length_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 512,
+ .flags = WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 512,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_nonvol_write_only_length,
+ all_pop_nonvol_write_only_length_cases,
+ cs_dsp_ctl_all_param_desc);
+
+static struct kunit_case cs_dsp_ctl_cache_test_cases_v1[] = {
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_nonvol_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init_write_only,
+ all_pop_nonvol_write_only_length_gen_params),
+
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_fw_same_controls),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_fwalgid_same_controls),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_mems),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_algs),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_started,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_stopped,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_powered_down,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_stopped_powered_down,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_current_loaded_fw,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_current_running_fw,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_running,
+ all_pop_nonvol_readable_nonzero_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_running_zero_flags,
+ all_pop_varying_len_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_unchanged_not_started,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_started,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_stopped,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_powered_down,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_stopped_powered_down,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_current_loaded_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_current_running_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_before_run,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_while_running,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_after_stop,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_not_current_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_every_run,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_after_fw_reload,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_after_fw_swap,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_ctl_cache_test_cases_v2[] = {
+ KUNIT_CASE(cs_dsp_ctl_v2_cache_alloc),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_nonvol_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init_write_only,
+ all_pop_nonvol_write_only_length_gen_params),
+
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_fw_same_controls),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_fwalgid_same_controls),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_mems),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_algs),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_offsets),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_started,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_stopped,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_powered_down,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_stopped_powered_down,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_current_loaded_fw,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_current_running_fw,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_running,
+ all_pop_nonvol_readable_nonzero_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_running_zero_flags,
+ all_pop_varying_len_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_unchanged_not_started,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_started,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_stopped,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_powered_down,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_stopped_powered_down,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_current_loaded_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_current_running_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_before_run,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_while_running,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_after_stop,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_not_current_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_every_run,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_after_fw_reload,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_after_fw_swap,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_ctl_cache_test_cases_v3[] = {
+ KUNIT_CASE(cs_dsp_ctl_v2_cache_alloc),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init, all_pop_nonvol_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_init_write_only,
+ all_pop_nonvol_write_only_length_gen_params),
+
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_fw_same_controls),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_fwalgid_same_controls),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_mems),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_algs),
+ KUNIT_CASE(cs_dsp_ctl_cache_init_multiple_offsets),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_started,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_stopped,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_powered_down,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_stopped_powered_down,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_current_loaded_fw,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_not_current_running_fw,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_read_running,
+ all_pop_nonvol_readable_nonzero_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough, all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_writethrough_unchanged,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_unchanged_not_started,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_started,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_stopped,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_powered_down,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_stopped_powered_down,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_current_loaded_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_write_not_current_running_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_before_run,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_while_running,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_after_stop,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_write_not_current_fw,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_every_run,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_after_fw_reload,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_cache_sync_reapply_after_fw_swap,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_ctl_cache_test_halo = {
+ .name = "cs_dsp_ctl_cache_wmfwV3_halo",
+ .init = cs_dsp_ctl_cache_test_halo_init,
+ .test_cases = cs_dsp_ctl_cache_test_cases_v3,
+};
+
+static struct kunit_suite cs_dsp_ctl_cache_test_adsp2_32bit_wmfw1 = {
+ .name = "cs_dsp_ctl_cache_wmfwV1_adsp2_32bit",
+ .init = cs_dsp_ctl_cache_test_adsp2_32bit_wmfw1_init,
+ .test_cases = cs_dsp_ctl_cache_test_cases_v1,
+};
+
+static struct kunit_suite cs_dsp_ctl_cache_test_adsp2_32bit_wmfw2 = {
+ .name = "cs_dsp_ctl_cache_wmfwV2_adsp2_32bit",
+ .init = cs_dsp_ctl_cache_test_adsp2_32bit_wmfw2_init,
+ .test_cases = cs_dsp_ctl_cache_test_cases_v2,
+};
+
+static struct kunit_suite cs_dsp_ctl_cache_test_adsp2_16bit_wmfw1 = {
+ .name = "cs_dsp_ctl_cache_wmfwV1_adsp2_16bit",
+ .init = cs_dsp_ctl_cache_test_adsp2_16bit_wmfw1_init,
+ .test_cases = cs_dsp_ctl_cache_test_cases_v1,
+};
+
+static struct kunit_suite cs_dsp_ctl_cache_test_adsp2_16bit_wmfw2 = {
+ .name = "cs_dsp_ctl_cache_wmfwV2_adsp2_16bit",
+ .init = cs_dsp_ctl_cache_test_adsp2_16bit_wmfw2_init,
+ .test_cases = cs_dsp_ctl_cache_test_cases_v2,
+};
+
+kunit_test_suites(&cs_dsp_ctl_cache_test_halo,
+ &cs_dsp_ctl_cache_test_adsp2_32bit_wmfw1,
+ &cs_dsp_ctl_cache_test_adsp2_32bit_wmfw2,
+ &cs_dsp_ctl_cache_test_adsp2_16bit_wmfw1,
+ &cs_dsp_ctl_cache_test_adsp2_16bit_wmfw2);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_control_parse.c b/drivers/firmware/cirrus/test/cs_dsp_test_control_parse.c
new file mode 100644
index 000000000000..cb90964740ea
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_control_parse.c
@@ -0,0 +1,1851 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *);
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *);
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_xm_header *xm_header;
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+ int wmfw_version;
+};
+
+struct cs_dsp_ctl_parse_test_param {
+ int mem_type;
+ int alg_id;
+ unsigned int offset;
+ unsigned int length;
+ u16 ctl_type;
+ u16 flags;
+};
+
+static const struct cs_dsp_mock_alg_def cs_dsp_ctl_parse_test_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_size_words = 164,
+ .ym_size_words = 164,
+ .zm_size_words = 164,
+ },
+ {
+ .id = 0xb,
+ .ver = 0x100001,
+ .xm_size_words = 8,
+ .ym_size_words = 8,
+ .zm_size_words = 8,
+ },
+ {
+ .id = 0x9f1234,
+ .ver = 0x100500,
+ .xm_size_words = 16,
+ .ym_size_words = 16,
+ .zm_size_words = 16,
+ },
+ {
+ .id = 0xff00ff,
+ .ver = 0x300113,
+ .xm_size_words = 16,
+ .ym_size_words = 16,
+ .zm_size_words = 16,
+ },
+};
+
+static const struct cs_dsp_mock_coeff_def mock_coeff_template = {
+ .shortname = "Dummy Coeff",
+ .type = WMFW_CTL_TYPE_BYTES,
+ .mem_type = WMFW_ADSP2_YM,
+ .flags = WMFW_CTL_FLAG_VOLATILE,
+ .length_bytes = 4,
+};
+
+/* Algorithm info block without controls should load */
+static void cs_dsp_ctl_parse_no_coeffs(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+}
+
+/*
+ * V1 controls do not have names, the name field in the coefficient entry
+ * should be ignored.
+ */
+static void cs_dsp_ctl_parse_v1_name(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.fullname = "Dummy";
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 0);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * V1 controls do not have names, the name field in the coefficient entry
+ * should be ignored. Test with a zero-length name string.
+ */
+static void cs_dsp_ctl_parse_empty_v1_name(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.fullname = "\0";
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 0);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * V1 controls do not have names, the name field in the coefficient entry
+ * should be ignored. Test with a maximum length name string.
+ */
+static void cs_dsp_ctl_parse_max_v1_name(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ char *name;
+
+ name = kunit_kzalloc(test, 256, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, name);
+ memset(name, 'A', 255);
+ def.fullname = name;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 0);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/* Short name from coeff descriptor should be used as control name. */
+static void cs_dsp_ctl_parse_short_name(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(def.shortname));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Short name from coeff descriptor should be used as control name.
+ * Test with a short name that is a single character.
+ */
+static void cs_dsp_ctl_parse_min_short_name(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.shortname = "Q";
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 1);
+ KUNIT_EXPECT_EQ(test, ctl->subname[0], 'Q');
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Short name from coeff descriptor should be used as control name.
+ * Test with a maximum length name.
+ */
+static void cs_dsp_ctl_parse_max_short_name(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ char *name;
+ struct firmware *wmfw;
+
+ name = kunit_kmalloc(test, 255, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, name);
+ memset(name, 'A', 255);
+
+ def.shortname = name;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 255);
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, name, ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Full name from coeff descriptor should be ignored. It is a variable
+ * length field so affects the position of subsequent fields.
+ * Test with a 1-character full name.
+ */
+static void cs_dsp_ctl_parse_with_min_fullname(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.fullname = "Q";
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(def.shortname));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Full name from coeff descriptor should be ignored. It is a variable
+ * length field so affects the position of subsequent fields.
+ * Test with a maximum length full name.
+ */
+static void cs_dsp_ctl_parse_with_max_fullname(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ char *fullname;
+
+ fullname = kunit_kmalloc(test, 255, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, fullname);
+ memset(fullname, 'A', 255);
+ def.fullname = fullname;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(def.shortname));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Description from coeff descriptor should be ignored. It is a variable
+ * length field so affects the position of subsequent fields.
+ * Test with a 1-character description
+ */
+static void cs_dsp_ctl_parse_with_min_description(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.description = "Q";
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(def.shortname));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Description from coeff descriptor should be ignored. It is a variable
+ * length field so affects the position of subsequent fields.
+ * Test with a maximum length description
+ */
+static void cs_dsp_ctl_parse_with_max_description(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ char *description;
+
+ description = kunit_kmalloc(test, 65535, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, description);
+ memset(description, 'A', 65535);
+ def.description = description;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(def.shortname));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Full name and description from coeff descriptor are variable length
+ * fields so affects the position of subsequent fields.
+ * Test with a maximum length full name and description
+ */
+static void cs_dsp_ctl_parse_with_max_fullname_and_description(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ char *fullname, *description;
+
+ fullname = kunit_kmalloc(test, 255, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, fullname);
+ memset(fullname, 'A', 255);
+ def.fullname = fullname;
+
+ description = kunit_kmalloc(test, 65535, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, description);
+ memset(description, 'A', 65535);
+ def.description = description;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(def.shortname));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+static const char * const cs_dsp_ctl_alignment_test_names[] = {
+ "1", "12", "123", "1234", "12345", "123456", "1234567",
+ "12345678", "123456789", "123456789A", "123456789AB",
+ "123456789ABC", "123456789ABCD", "123456789ABCDE",
+ "123456789ABCDEF",
+};
+
+/*
+ * Variable-length string fields are padded to a multiple of 4-bytes.
+ * Test this with various lengths of short name.
+ */
+static void cs_dsp_ctl_shortname_alignment(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ int i;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_alignment_test_names); i++) {
+ def.shortname = cs_dsp_ctl_alignment_test_names[i];
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_alignment_test_names); i++) {
+ mutex_lock(&priv->dsp->pwr_lock);
+ ctl = cs_dsp_get_ctl(priv->dsp, cs_dsp_ctl_alignment_test_names[i],
+ def.mem_type, cs_dsp_ctl_parse_test_algs[0].id);
+ mutex_unlock(&priv->dsp->pwr_lock);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, i + 1);
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, cs_dsp_ctl_alignment_test_names[i],
+ ctl->subname_len);
+ /* Test fields that are parsed after the variable-length fields */
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+ }
+}
+
+/*
+ * Variable-length string fields are padded to a multiple of 4-bytes.
+ * Test this with various lengths of full name.
+ */
+static void cs_dsp_ctl_fullname_alignment(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ char ctl_name[4];
+ struct firmware *wmfw;
+ int i;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_alignment_test_names); i++) {
+ /*
+ * Create a unique control name of 3 characters so that
+ * the shortname field is exactly 4 bytes long including
+ * the length byte.
+ */
+ snprintf(ctl_name, sizeof(ctl_name), "%03d", i);
+ KUNIT_ASSERT_EQ(test, strlen(ctl_name), 3);
+ def.shortname = ctl_name;
+
+ def.fullname = cs_dsp_ctl_alignment_test_names[i];
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_alignment_test_names); i++) {
+ snprintf(ctl_name, sizeof(ctl_name), "%03d", i);
+
+ mutex_lock(&priv->dsp->pwr_lock);
+ ctl = cs_dsp_get_ctl(priv->dsp, ctl_name, def.mem_type,
+ cs_dsp_ctl_parse_test_algs[0].id);
+ mutex_unlock(&priv->dsp->pwr_lock);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 3);
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, ctl_name, ctl->subname_len);
+ /* Test fields that are parsed after the variable-length fields */
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+ }
+}
+
+/*
+ * Variable-length string fields are padded to a multiple of 4-bytes.
+ * Test this with various lengths of description.
+ */
+static void cs_dsp_ctl_description_alignment(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ char ctl_name[4];
+ struct firmware *wmfw;
+ int i;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_alignment_test_names); i++) {
+ /*
+ * Create a unique control name of 3 characters so that
+ * the shortname field is exactly 4 bytes long including
+ * the length byte.
+ */
+ snprintf(ctl_name, sizeof(ctl_name), "%03d", i);
+ KUNIT_ASSERT_EQ(test, strlen(ctl_name), 3);
+ def.shortname = ctl_name;
+
+ def.description = cs_dsp_ctl_alignment_test_names[i];
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_alignment_test_names); i++) {
+ snprintf(ctl_name, sizeof(ctl_name), "%03d", i);
+
+ mutex_lock(&priv->dsp->pwr_lock);
+ ctl = cs_dsp_get_ctl(priv->dsp, ctl_name, def.mem_type,
+ cs_dsp_ctl_parse_test_algs[0].id);
+ mutex_unlock(&priv->dsp->pwr_lock);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 3);
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, ctl_name, ctl->subname_len);
+ /* Test fields that are parsed after the variable-length fields */
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+ }
+}
+
+static const char * const cs_dsp_get_ctl_test_names[] = {
+ "Up", "Down", "Switch", "Mute",
+ "Left Up", "Left Down", "Right Up", "Right Down",
+ "Left Mute", "Right Mute",
+ "_trunc_1", "_trunc_2", " trunc",
+};
+
+/* Test using cs_dsp_get_ctl() to lookup various controls. */
+static void cs_dsp_get_ctl_test(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ int i;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_get_ctl_test_names); i++) {
+ def.shortname = cs_dsp_get_ctl_test_names[i];
+ def.offset_dsp_words = i;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_get_ctl_test_names); i++) {
+ mutex_lock(&priv->dsp->pwr_lock);
+ ctl = cs_dsp_get_ctl(priv->dsp, cs_dsp_get_ctl_test_names[i],
+ def.mem_type, cs_dsp_ctl_parse_test_algs[0].id);
+ mutex_unlock(&priv->dsp->pwr_lock);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, strlen(cs_dsp_get_ctl_test_names[i]));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, cs_dsp_get_ctl_test_names[i],
+ ctl->subname_len);
+ KUNIT_EXPECT_EQ(test, ctl->offset, i);
+ }
+}
+
+/*
+ * cs_dsp_get_ctl() searches for the control in the currently loaded
+ * firmware, so create identical controls in multiple firmware and
+ * test that the correct one is found.
+ */
+static void cs_dsp_get_ctl_test_multiple_wmfw(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct cs_dsp_mock_wmfw_builder *builder2;
+ struct firmware *wmfw;
+
+ def.shortname = "_A_CONTROL";
+
+ /* Create a second mock wmfw builder */
+ builder2 = cs_dsp_mock_wmfw_init(priv,
+ cs_dsp_mock_wmfw_format_version(local->wmfw_builder));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder2);
+ cs_dsp_mock_wmfw_add_data_block(builder2,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ /* Load a 'misc' firmware with a control */
+ def.offset_dsp_words = 1;
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* Load a 'mbc/vss' firmware with a control of the same name */
+ def.offset_dsp_words = 2;
+ cs_dsp_mock_wmfw_start_alg_info_block(builder2,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(builder2, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(builder2);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_fw2", NULL, NULL, "mbc/vss"), 0);
+
+ /* A lookup should return the control for the current firmware */
+ mutex_lock(&priv->dsp->pwr_lock);
+ ctl = cs_dsp_get_ctl(priv->dsp, def.shortname,
+ def.mem_type, cs_dsp_ctl_parse_test_algs[0].id);
+ mutex_unlock(&priv->dsp->pwr_lock);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->offset, 2);
+
+ /* Re-load the 'misc' firmware and a lookup should return its control */
+ cs_dsp_power_down(priv->dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ mutex_lock(&priv->dsp->pwr_lock);
+ ctl = cs_dsp_get_ctl(priv->dsp, def.shortname,
+ def.mem_type, cs_dsp_ctl_parse_test_algs[0].id);
+ mutex_unlock(&priv->dsp->pwr_lock);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->offset, 1);
+}
+
+/* Test that the value of the memory type field is parsed correctly. */
+static void cs_dsp_ctl_parse_memory_type(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ /* kunit_skip() marks the test skipped forever, so just return */
+ if ((param->mem_type == WMFW_ADSP2_ZM) && !cs_dsp_mock_has_zm(priv))
+ return;
+
+ def.mem_type = param->mem_type;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->alg_region.type, param->mem_type);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Test that the algorithm id from the parent alg-info block is
+ * correctly stored in the cs_dsp_coeff_ctl.
+ */
+static void cs_dsp_ctl_parse_alg_id(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ param->alg_id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->alg_region.alg, param->alg_id);
+ KUNIT_EXPECT_EQ(test, ctl->alg_region.type, def.mem_type);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/*
+ * Test that the values of (alg id, memory type) tuple is parsed correctly.
+ * The alg id is parsed from the alg-info block, but the memory type is
+ * parsed from the coefficient info descriptor.
+ */
+static void cs_dsp_ctl_parse_alg_mem(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ /* kunit_skip() marks the test skipped forever, so just return */
+ if ((param->mem_type == WMFW_ADSP2_ZM) && !cs_dsp_mock_has_zm(priv))
+ return;
+
+ def.mem_type = param->mem_type;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ param->alg_id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->alg_region.alg, param->alg_id);
+ KUNIT_EXPECT_EQ(test, ctl->alg_region.type, param->mem_type);
+}
+
+/* Test that the value of the offset field is parsed correctly. */
+static void cs_dsp_ctl_parse_offset(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.offset_dsp_words = param->offset;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->offset, param->offset);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/* Test that the value of the length field is parsed correctly. */
+static void cs_dsp_ctl_parse_length(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.length_bytes = param->length;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->offset, def.offset_dsp_words);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->len, param->length);
+}
+
+/* Test that the value of the control type field is parsed correctly. */
+static void cs_dsp_ctl_parse_ctl_type(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+
+ def.type = param->ctl_type;
+ def.flags = param->flags;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->type, param->ctl_type);
+ KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/* Test that the value of the flags field is parsed correctly. */
+static void cs_dsp_ctl_parse_flags(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 reg_val;
+
+ /*
+ * Non volatile controls will be read to initialize the cache
+ * so the regmap cache must contain something to read.
+ */
+ reg_val = 0xf11100;
+ regmap_raw_write(priv->dsp->regmap,
+ cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM),
+ &reg_val, sizeof(reg_val));
+
+ def.flags = param->flags;
+ def.mem_type = WMFW_ADSP2_YM;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->type, def.type);
+ KUNIT_EXPECT_EQ(test, ctl->flags, param->flags);
+ KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
+}
+
+/* Test that invalid combinations of (control type, flags) are rejected. */
+static void cs_dsp_ctl_illegal_type_flags(struct kunit *test)
+{
+ const struct cs_dsp_ctl_parse_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct firmware *wmfw;
+ u32 reg_val;
+
+ /*
+ * Non volatile controls will be read to initialize the cache
+ * so the regmap cache must contain something to read.
+ */
+ reg_val = 0xf11100;
+ regmap_raw_write(priv->dsp->regmap,
+ cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM),
+ &reg_val, sizeof(reg_val));
+
+ def.type = param->ctl_type;
+ def.flags = param->flags;
+ def.mem_type = WMFW_ADSP2_YM;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_LT(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+}
+
+/* Test that the correct firmware name is entered in the cs_dsp_coeff_ctl. */
+static void cs_dsp_ctl_parse_fw_name(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *walkctl, *ctl1, *ctl2;
+ struct cs_dsp_mock_wmfw_builder *builder2;
+ struct firmware *wmfw;
+
+ /* Create a second mock wmfw builder */
+ builder2 = cs_dsp_mock_wmfw_init(priv,
+ cs_dsp_mock_wmfw_format_version(local->wmfw_builder));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder2);
+ cs_dsp_mock_wmfw_add_data_block(builder2,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ /* Load a 'misc' firmware with a control */
+ def.offset_dsp_words = 1;
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* Load a 'mbc/vss' firmware with a control */
+ def.offset_dsp_words = 2;
+ cs_dsp_mock_wmfw_start_alg_info_block(builder2,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(builder2, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(builder2);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_fw2", NULL, NULL, "mbc/vss"), 0);
+
+ /* Both controls should be in the list (order not guaranteed) */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list), 2);
+ ctl1 = NULL;
+ ctl2 = NULL;
+ list_for_each_entry(walkctl, &priv->dsp->ctl_list, list) {
+ if (strcmp(walkctl->fw_name, "misc") == 0)
+ ctl1 = walkctl;
+ else if (strcmp(walkctl->fw_name, "mbc/vss") == 0)
+ ctl2 = walkctl;
+ }
+
+ KUNIT_EXPECT_NOT_NULL(test, ctl1);
+ KUNIT_EXPECT_NOT_NULL(test, ctl2);
+ KUNIT_EXPECT_EQ(test, ctl1->offset, 1);
+ KUNIT_EXPECT_EQ(test, ctl2->offset, 2);
+}
+
+/* Controls are unique if the algorithm ID is different */
+static void cs_dsp_ctl_alg_id_uniqueness(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl1, *ctl2;
+ struct firmware *wmfw;
+
+ /* Create an algorithm containing the control */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Create a different algorithm containing an identical control */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[1].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* Both controls should be in the list */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list), 2);
+ ctl1 = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ ctl2 = list_next_entry(ctl1, list);
+ KUNIT_EXPECT_NOT_NULL(test, ctl1);
+ KUNIT_EXPECT_NOT_NULL(test, ctl2);
+ KUNIT_EXPECT_NE(test, ctl1->alg_region.alg, ctl2->alg_region.alg);
+ KUNIT_EXPECT_EQ(test, ctl1->alg_region.type, ctl2->alg_region.type);
+ KUNIT_EXPECT_EQ(test, ctl1->offset, ctl2->offset);
+ KUNIT_EXPECT_EQ(test, ctl1->type, ctl2->type);
+ KUNIT_EXPECT_EQ(test, ctl1->flags, ctl2->flags);
+ KUNIT_EXPECT_EQ(test, ctl1->len, ctl2->len);
+ KUNIT_EXPECT_STREQ(test, ctl1->fw_name, ctl2->fw_name);
+ KUNIT_EXPECT_EQ(test, ctl1->subname_len, ctl2->subname_len);
+ if (ctl1->subname_len)
+ KUNIT_EXPECT_MEMEQ(test, ctl1->subname, ctl2->subname, ctl1->subname_len);
+}
+
+/* Controls are unique if the memory region is different */
+static void cs_dsp_ctl_mem_uniqueness(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl1, *ctl2;
+ struct firmware *wmfw;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ /* Create control in XM */
+ def.mem_type = WMFW_ADSP2_XM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ /* Create control in YM */
+ def.mem_type = WMFW_ADSP2_YM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* Both controls should be in the list */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list), 2);
+ ctl1 = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ ctl2 = list_next_entry(ctl1, list);
+ KUNIT_EXPECT_NOT_NULL(test, ctl1);
+ KUNIT_EXPECT_NOT_NULL(test, ctl2);
+ KUNIT_EXPECT_EQ(test, ctl1->alg_region.alg, ctl2->alg_region.alg);
+ KUNIT_EXPECT_NE(test, ctl1->alg_region.type, ctl2->alg_region.type);
+ KUNIT_EXPECT_EQ(test, ctl1->offset, ctl2->offset);
+ KUNIT_EXPECT_EQ(test, ctl1->type, ctl2->type);
+ KUNIT_EXPECT_EQ(test, ctl1->flags, ctl2->flags);
+ KUNIT_EXPECT_EQ(test, ctl1->len, ctl2->len);
+ KUNIT_EXPECT_STREQ(test, ctl1->fw_name, ctl2->fw_name);
+ KUNIT_EXPECT_EQ(test, ctl1->subname_len, ctl2->subname_len);
+ if (ctl1->subname_len)
+ KUNIT_EXPECT_MEMEQ(test, ctl1->subname, ctl2->subname, ctl1->subname_len);
+}
+
+/* Controls are unique if they are in different firmware */
+static void cs_dsp_ctl_fw_uniqueness(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl1, *ctl2;
+ struct cs_dsp_mock_wmfw_builder *builder2;
+ struct firmware *wmfw;
+
+ /* Create a second mock wmfw builder */
+ builder2 = cs_dsp_mock_wmfw_init(priv,
+ cs_dsp_mock_wmfw_format_version(local->wmfw_builder));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder2);
+ cs_dsp_mock_wmfw_add_data_block(builder2,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ /* Load a 'misc' firmware with a control */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* Load a 'mbc/vss' firmware with the same control */
+ cs_dsp_mock_wmfw_start_alg_info_block(builder2,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(builder2, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(builder2);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw2",
+ NULL, NULL, "mbc/vss"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* Both controls should be in the list */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list), 2);
+ ctl1 = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ ctl2 = list_next_entry(ctl1, list);
+ KUNIT_EXPECT_NOT_NULL(test, ctl1);
+ KUNIT_EXPECT_NOT_NULL(test, ctl2);
+ KUNIT_EXPECT_EQ(test, ctl1->alg_region.alg, ctl2->alg_region.alg);
+ KUNIT_EXPECT_EQ(test, ctl1->alg_region.type, ctl2->alg_region.type);
+ KUNIT_EXPECT_EQ(test, ctl1->offset, ctl2->offset);
+ KUNIT_EXPECT_EQ(test, ctl1->type, ctl2->type);
+ KUNIT_EXPECT_EQ(test, ctl1->flags, ctl2->flags);
+ KUNIT_EXPECT_EQ(test, ctl1->len, ctl2->len);
+ KUNIT_EXPECT_STRNEQ(test, ctl1->fw_name, ctl2->fw_name);
+ KUNIT_EXPECT_EQ(test, ctl1->subname_len, ctl2->subname_len);
+ if (ctl1->subname_len)
+ KUNIT_EXPECT_MEMEQ(test, ctl1->subname, ctl2->subname, ctl1->subname_len);
+}
+
+/*
+ * Controls from a wmfw are only added to the list once. If the same
+ * wmfw is reloaded the controls are not added again.
+ * This creates multiple algorithms with one control each, which will
+ * work on both V1 format and >=V2 format controls.
+ */
+static void cs_dsp_ctl_squash_reloaded_controls(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctls[ARRAY_SIZE(cs_dsp_ctl_parse_test_algs)];
+ struct cs_dsp_coeff_ctl *walkctl;
+ struct firmware *wmfw;
+ int i;
+
+ /* Create some algorithms with a control */
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_parse_test_algs); i++) {
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[i].id,
+ "dummyalg", NULL);
+ def.mem_type = WMFW_ADSP2_YM;
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+ }
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* All controls should be in the list */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list),
+ ARRAY_SIZE(cs_dsp_ctl_parse_test_algs));
+
+ /* Take a copy of the pointers to controls to compare against. */
+ i = 0;
+ list_for_each_entry(walkctl, &priv->dsp->ctl_list, list) {
+ KUNIT_ASSERT_LT(test, i, ARRAY_SIZE(ctls));
+ ctls[i++] = walkctl;
+ }
+
+
+ /* Load the wmfw again */
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* The number of controls should be the same */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list),
+ ARRAY_SIZE(cs_dsp_ctl_parse_test_algs));
+
+ /* And they should be the same objects */
+ i = 0;
+ list_for_each_entry(walkctl, &priv->dsp->ctl_list, list) {
+ KUNIT_ASSERT_LT(test, i, ARRAY_SIZE(ctls));
+ KUNIT_ASSERT_PTR_EQ(test, walkctl, ctls[i++]);
+ }
+}
+
+/*
+ * Controls from a wmfw are only added to the list once. If the same
+ * wmfw is reloaded the controls are not added again.
+ * This tests >=V2 firmware that can have multiple named controls in
+ * the same algorithm.
+ */
+static void cs_dsp_ctl_v2_squash_reloaded_controls(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctls[ARRAY_SIZE(cs_dsp_get_ctl_test_names)];
+ struct cs_dsp_coeff_ctl *walkctl;
+ struct firmware *wmfw;
+ int i;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+
+ /* Create some controls */
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_get_ctl_test_names); i++) {
+ def.shortname = cs_dsp_get_ctl_test_names[i];
+ def.offset_dsp_words = i;
+ if (i & BIT(0))
+ def.mem_type = WMFW_ADSP2_XM;
+ else
+ def.mem_type = WMFW_ADSP2_YM;
+
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* All controls should be in the list */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list),
+ ARRAY_SIZE(cs_dsp_get_ctl_test_names));
+
+ /* Take a copy of the pointers to controls to compare against. */
+ i = 0;
+ list_for_each_entry(walkctl, &priv->dsp->ctl_list, list) {
+ KUNIT_ASSERT_LT(test, i, ARRAY_SIZE(ctls));
+ ctls[i++] = walkctl;
+ }
+
+
+ /* Load the wmfw again */
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+ cs_dsp_power_down(priv->dsp);
+
+ /* The number of controls should be the same */
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&priv->dsp->ctl_list),
+ ARRAY_SIZE(cs_dsp_get_ctl_test_names));
+
+ /* And they should be the same objects */
+ i = 0;
+ list_for_each_entry(walkctl, &priv->dsp->ctl_list, list) {
+ KUNIT_ASSERT_LT(test, i, ARRAY_SIZE(ctls));
+ KUNIT_ASSERT_PTR_EQ(test, walkctl, ctls[i++]);
+ }
+}
+
+static const char * const cs_dsp_ctl_v2_compare_len_names[] = {
+ "LEFT",
+ "LEFT_",
+ "LEFT_SPK",
+ "LEFT_SPK_V",
+ "LEFT_SPK_VOL",
+ "LEFT_SPK_MUTE",
+ "LEFT_SPK_1",
+ "LEFT_X",
+ "LEFT2",
+};
+
+/*
+ * When comparing shortnames the full length of both strings is
+ * considered, not only the characters in of the shortest string.
+ * So that "LEFT" is not the same as "LEFT2".
+ * This is specifically to test for the bug that was fixed by commit:
+ * 7ac1102b227b ("firmware: cs_dsp: Fix new control name check")
+ */
+static void cs_dsp_ctl_v2_compare_len(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ int i;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_parse_test_algs[0].id,
+ "dummyalg", NULL);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_v2_compare_len_names); i++) {
+ def.shortname = cs_dsp_ctl_v2_compare_len_names[i];
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ }
+
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(priv->dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_v2_compare_len_names); i++) {
+ mutex_lock(&priv->dsp->pwr_lock);
+ ctl = cs_dsp_get_ctl(priv->dsp, cs_dsp_ctl_v2_compare_len_names[i],
+ def.mem_type, cs_dsp_ctl_parse_test_algs[0].id);
+ mutex_unlock(&priv->dsp->pwr_lock);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len,
+ strlen(cs_dsp_ctl_v2_compare_len_names[i]));
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, cs_dsp_ctl_v2_compare_len_names[i],
+ ctl->subname_len);
+ }
+}
+
+static int cs_dsp_ctl_parse_test_common_init(struct kunit *test, struct cs_dsp *dsp,
+ int wmfw_version)
+{
+ struct cs_dsp_test *priv;
+ struct cs_dsp_test_local *local;
+ struct device *test_dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+ priv->local = local;
+ priv->local->wmfw_version = wmfw_version;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * There must always be a XM header with at least 1 algorithm, so create
+ * a dummy one that tests can use and extract it to a data blob.
+ */
+ local->xm_header = cs_dsp_create_mock_xm_header(priv,
+ cs_dsp_ctl_parse_test_algs,
+ ARRAY_SIZE(cs_dsp_ctl_parse_test_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->xm_header);
+
+ local->wmfw_builder = cs_dsp_mock_wmfw_init(priv, priv->local->wmfw_version);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->wmfw_builder);
+
+ /* Add dummy XM header blob to wmfw */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ /* Init cs_dsp */
+ dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops);
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_ctl_parse_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_ctl_parse_test_common_init(test, dsp, 3);
+}
+
+static int cs_dsp_ctl_parse_test_adsp2_32bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 1;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_ctl_parse_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_ctl_parse_test_adsp2_32bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_ctl_parse_test_adsp2_32bit_init(test, 1);
+}
+
+static int cs_dsp_ctl_parse_test_adsp2_32bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_ctl_parse_test_adsp2_32bit_init(test, 2);
+}
+
+static int cs_dsp_ctl_parse_test_adsp2_16bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_ctl_parse_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_ctl_parse_test_adsp2_16bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_ctl_parse_test_adsp2_16bit_init(test, 1);
+}
+
+static int cs_dsp_ctl_parse_test_adsp2_16bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_ctl_parse_test_adsp2_16bit_init(test, 2);
+}
+
+static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_mem_type_param_cases[] = {
+ { .mem_type = WMFW_ADSP2_XM },
+ { .mem_type = WMFW_ADSP2_YM },
+ { .mem_type = WMFW_ADSP2_ZM },
+};
+
+static void cs_dsp_ctl_mem_type_desc(const struct cs_dsp_ctl_parse_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s",
+ cs_dsp_mem_region_name(param->mem_type));
+}
+
+KUNIT_ARRAY_PARAM(cs_dsp_ctl_mem_type,
+ cs_dsp_ctl_mem_type_param_cases,
+ cs_dsp_ctl_mem_type_desc);
+
+static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_alg_id_param_cases[] = {
+ { .alg_id = 0xb },
+ { .alg_id = 0xfafa },
+ { .alg_id = 0x9f1234 },
+ { .alg_id = 0xff00ff },
+};
+
+static void cs_dsp_ctl_alg_id_desc(const struct cs_dsp_ctl_parse_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "alg_id:%#x", param->alg_id);
+}
+
+KUNIT_ARRAY_PARAM(cs_dsp_ctl_alg_id,
+ cs_dsp_ctl_alg_id_param_cases,
+ cs_dsp_ctl_alg_id_desc);
+
+static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_offset_param_cases[] = {
+ { .offset = 0x0 },
+ { .offset = 0x1 },
+ { .offset = 0x2 },
+ { .offset = 0x3 },
+ { .offset = 0x4 },
+ { .offset = 0x5 },
+ { .offset = 0x6 },
+ { .offset = 0x7 },
+ { .offset = 0xe0 },
+ { .offset = 0xf1 },
+ { .offset = 0xfffe },
+ { .offset = 0xffff },
+};
+
+static void cs_dsp_ctl_offset_desc(const struct cs_dsp_ctl_parse_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "offset:%#x", param->offset);
+}
+
+KUNIT_ARRAY_PARAM(cs_dsp_ctl_offset,
+ cs_dsp_ctl_offset_param_cases,
+ cs_dsp_ctl_offset_desc);
+
+static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_length_param_cases[] = {
+ { .length = 0x4 },
+ { .length = 0x8 },
+ { .length = 0x18 },
+ { .length = 0xf000 },
+};
+
+static void cs_dsp_ctl_length_desc(const struct cs_dsp_ctl_parse_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "length:%#x", param->length);
+}
+
+KUNIT_ARRAY_PARAM(cs_dsp_ctl_length,
+ cs_dsp_ctl_length_param_cases,
+ cs_dsp_ctl_length_desc);
+
+/* Note: some control types mandate specific flags settings */
+static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_type_param_cases[] = {
+ { .ctl_type = WMFW_CTL_TYPE_BYTES,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_ACKED,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE |
+ WMFW_CTL_FLAG_SYS },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_SYS },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE |
+ WMFW_CTL_FLAG_SYS },
+};
+
+static void cs_dsp_ctl_type_flags_desc(const struct cs_dsp_ctl_parse_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "ctl_type:%#x flags:%#x",
+ param->ctl_type, param->flags);
+}
+
+KUNIT_ARRAY_PARAM(cs_dsp_ctl_type,
+ cs_dsp_ctl_type_param_cases,
+ cs_dsp_ctl_type_flags_desc);
+
+static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_flags_param_cases[] = {
+ { .flags = 0 },
+ { .flags = WMFW_CTL_FLAG_READABLE },
+ { .flags = WMFW_CTL_FLAG_WRITEABLE },
+ { .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE },
+ { .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE },
+ { .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE |
+ WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+};
+
+static void cs_dsp_ctl_flags_desc(const struct cs_dsp_ctl_parse_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "flags:%#x", param->flags);
+}
+
+KUNIT_ARRAY_PARAM(cs_dsp_ctl_flags,
+ cs_dsp_ctl_flags_param_cases,
+ cs_dsp_ctl_flags_desc);
+
+static const struct cs_dsp_ctl_parse_test_param cs_dsp_ctl_illegal_type_flags_param_cases[] = {
+ /* ACKED control must be volatile + read + write */
+ { .ctl_type = WMFW_CTL_TYPE_ACKED, .flags = 0 },
+ { .ctl_type = WMFW_CTL_TYPE_ACKED, .flags = WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_ACKED, .flags = WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_ACKED, .flags = WMFW_CTL_FLAG_VOLATILE },
+ { .ctl_type = WMFW_CTL_TYPE_ACKED,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_ACKED,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE },
+
+ /* HOSTEVENT must be system + volatile + read + write */
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, .flags = 0 },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, .flags = WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, .flags = WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, .flags = WMFW_CTL_FLAG_VOLATILE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT, .flags = WMFW_CTL_FLAG_SYS },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOSTEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE },
+
+ /* FWEVENT rules same as HOSTEVENT */
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT, .flags = 0 },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT, .flags = WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT, .flags = WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT, .flags = WMFW_CTL_FLAG_VOLATILE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT, .flags = WMFW_CTL_FLAG_SYS },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_FWEVENT,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE },
+
+ /*
+ * HOSTBUFFER must be system + volatile + readable or
+ * system + volatile + readable + writeable
+ */
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, .flags = 0 },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, .flags = WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, .flags = WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE},
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, .flags = WMFW_CTL_FLAG_VOLATILE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER, .flags = WMFW_CTL_FLAG_SYS },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE },
+ { .ctl_type = WMFW_CTL_TYPE_HOST_BUFFER,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE },
+};
+
+KUNIT_ARRAY_PARAM(cs_dsp_ctl_illegal_type_flags,
+ cs_dsp_ctl_illegal_type_flags_param_cases,
+ cs_dsp_ctl_type_flags_desc);
+
+static struct kunit_case cs_dsp_ctl_parse_test_cases_v1[] = {
+ KUNIT_CASE(cs_dsp_ctl_parse_no_coeffs),
+ KUNIT_CASE(cs_dsp_ctl_parse_v1_name),
+ KUNIT_CASE(cs_dsp_ctl_parse_empty_v1_name),
+ KUNIT_CASE(cs_dsp_ctl_parse_max_v1_name),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_memory_type, cs_dsp_ctl_mem_type_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_alg_id, cs_dsp_ctl_alg_id_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_alg_mem, cs_dsp_ctl_mem_type_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_offset, cs_dsp_ctl_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_length, cs_dsp_ctl_length_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_ctl_type, cs_dsp_ctl_type_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_flags, cs_dsp_ctl_flags_gen_params),
+ KUNIT_CASE(cs_dsp_ctl_parse_fw_name),
+
+ KUNIT_CASE(cs_dsp_ctl_alg_id_uniqueness),
+ KUNIT_CASE(cs_dsp_ctl_mem_uniqueness),
+ KUNIT_CASE(cs_dsp_ctl_fw_uniqueness),
+ KUNIT_CASE(cs_dsp_ctl_squash_reloaded_controls),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_ctl_parse_test_cases_v2_v3[] = {
+ KUNIT_CASE(cs_dsp_ctl_parse_no_coeffs),
+ KUNIT_CASE(cs_dsp_ctl_parse_short_name),
+ KUNIT_CASE(cs_dsp_ctl_parse_min_short_name),
+ KUNIT_CASE(cs_dsp_ctl_parse_max_short_name),
+ KUNIT_CASE(cs_dsp_ctl_parse_with_min_fullname),
+ KUNIT_CASE(cs_dsp_ctl_parse_with_max_fullname),
+ KUNIT_CASE(cs_dsp_ctl_parse_with_min_description),
+ KUNIT_CASE(cs_dsp_ctl_parse_with_max_description),
+ KUNIT_CASE(cs_dsp_ctl_parse_with_max_fullname_and_description),
+ KUNIT_CASE(cs_dsp_ctl_shortname_alignment),
+ KUNIT_CASE(cs_dsp_ctl_fullname_alignment),
+ KUNIT_CASE(cs_dsp_ctl_description_alignment),
+ KUNIT_CASE(cs_dsp_get_ctl_test),
+ KUNIT_CASE(cs_dsp_get_ctl_test_multiple_wmfw),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_memory_type, cs_dsp_ctl_mem_type_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_alg_id, cs_dsp_ctl_alg_id_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_alg_mem, cs_dsp_ctl_mem_type_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_offset, cs_dsp_ctl_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_length, cs_dsp_ctl_length_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_ctl_type, cs_dsp_ctl_type_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_parse_flags, cs_dsp_ctl_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_illegal_type_flags,
+ cs_dsp_ctl_illegal_type_flags_gen_params),
+ KUNIT_CASE(cs_dsp_ctl_parse_fw_name),
+
+ KUNIT_CASE(cs_dsp_ctl_alg_id_uniqueness),
+ KUNIT_CASE(cs_dsp_ctl_mem_uniqueness),
+ KUNIT_CASE(cs_dsp_ctl_fw_uniqueness),
+ KUNIT_CASE(cs_dsp_ctl_squash_reloaded_controls),
+ KUNIT_CASE(cs_dsp_ctl_v2_squash_reloaded_controls),
+ KUNIT_CASE(cs_dsp_ctl_v2_compare_len),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_ctl_parse_test_halo = {
+ .name = "cs_dsp_ctl_parse_wmfwV3_halo",
+ .init = cs_dsp_ctl_parse_test_halo_init,
+ .test_cases = cs_dsp_ctl_parse_test_cases_v2_v3,
+};
+
+static struct kunit_suite cs_dsp_ctl_parse_test_adsp2_32bit_wmfw1 = {
+ .name = "cs_dsp_ctl_parse_wmfwV1_adsp2_32bit",
+ .init = cs_dsp_ctl_parse_test_adsp2_32bit_wmfw1_init,
+ .test_cases = cs_dsp_ctl_parse_test_cases_v1,
+};
+
+static struct kunit_suite cs_dsp_ctl_parse_test_adsp2_32bit_wmfw2 = {
+ .name = "cs_dsp_ctl_parse_wmfwV2_adsp2_32bit",
+ .init = cs_dsp_ctl_parse_test_adsp2_32bit_wmfw2_init,
+ .test_cases = cs_dsp_ctl_parse_test_cases_v2_v3,
+};
+
+static struct kunit_suite cs_dsp_ctl_parse_test_adsp2_16bit_wmfw1 = {
+ .name = "cs_dsp_ctl_parse_wmfwV1_adsp2_16bit",
+ .init = cs_dsp_ctl_parse_test_adsp2_16bit_wmfw1_init,
+ .test_cases = cs_dsp_ctl_parse_test_cases_v1,
+};
+
+static struct kunit_suite cs_dsp_ctl_parse_test_adsp2_16bit_wmfw2 = {
+ .name = "cs_dsp_ctl_parse_wmfwV2_adsp2_16bit",
+ .init = cs_dsp_ctl_parse_test_adsp2_16bit_wmfw2_init,
+ .test_cases = cs_dsp_ctl_parse_test_cases_v2_v3,
+};
+
+kunit_test_suites(&cs_dsp_ctl_parse_test_halo,
+ &cs_dsp_ctl_parse_test_adsp2_32bit_wmfw1,
+ &cs_dsp_ctl_parse_test_adsp2_32bit_wmfw2,
+ &cs_dsp_ctl_parse_test_adsp2_16bit_wmfw1,
+ &cs_dsp_ctl_parse_test_adsp2_16bit_wmfw2);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_control_rw.c b/drivers/firmware/cirrus/test/cs_dsp_test_control_rw.c
new file mode 100644
index 000000000000..bda00a95d4f9
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_control_rw.c
@@ -0,0 +1,2669 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/list.h>
+#include <linux/random.h>
+#include <linux/regmap.h>
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *);
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_stop_wrapper, cs_dsp_stop, struct cs_dsp *);
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *);
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_xm_header *xm_header;
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+ int wmfw_version;
+};
+
+struct cs_dsp_ctl_rw_test_param {
+ int mem_type;
+ int alg_id;
+ unsigned int offs_words;
+ unsigned int len_bytes;
+ u16 ctl_type;
+ u16 flags;
+};
+
+static const struct cs_dsp_mock_alg_def cs_dsp_ctl_rw_test_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_base_words = 60,
+ .xm_size_words = 1000,
+ .ym_base_words = 0,
+ .ym_size_words = 1000,
+ .zm_base_words = 0,
+ .zm_size_words = 1000,
+ },
+ {
+ .id = 0xb,
+ .ver = 0x100001,
+ .xm_base_words = 1060,
+ .xm_size_words = 1000,
+ .ym_base_words = 1000,
+ .ym_size_words = 1000,
+ .zm_base_words = 1000,
+ .zm_size_words = 1000,
+ },
+ {
+ .id = 0x9f1234,
+ .ver = 0x100500,
+ .xm_base_words = 2060,
+ .xm_size_words = 32,
+ .ym_base_words = 2000,
+ .ym_size_words = 32,
+ .zm_base_words = 2000,
+ .zm_size_words = 32,
+ },
+ {
+ .id = 0xff00ff,
+ .ver = 0x300113,
+ .xm_base_words = 2100,
+ .xm_size_words = 32,
+ .ym_base_words = 2032,
+ .ym_size_words = 32,
+ .zm_base_words = 2032,
+ .zm_size_words = 32,
+ },
+};
+
+static const struct cs_dsp_mock_coeff_def mock_coeff_template = {
+ .shortname = "Dummy Coeff",
+ .type = WMFW_CTL_TYPE_BYTES,
+ .mem_type = WMFW_ADSP2_YM,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ .length_bytes = 4,
+};
+
+static int _find_alg_entry(struct kunit *test, unsigned int alg_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_ctl_rw_test_algs); ++i) {
+ if (cs_dsp_ctl_rw_test_algs[i].id == alg_id)
+ break;
+ }
+
+ KUNIT_ASSERT_LT(test, i, ARRAY_SIZE(cs_dsp_ctl_rw_test_algs));
+
+ return i;
+}
+
+static int _get_alg_mem_base_words(struct kunit *test, int alg_index, int mem_type)
+{
+ switch (mem_type) {
+ case WMFW_ADSP2_XM:
+ return cs_dsp_ctl_rw_test_algs[alg_index].xm_base_words;
+ case WMFW_ADSP2_YM:
+ return cs_dsp_ctl_rw_test_algs[alg_index].ym_base_words;
+ case WMFW_ADSP2_ZM:
+ return cs_dsp_ctl_rw_test_algs[alg_index].zm_base_words;
+ default:
+ KUNIT_FAIL(test, "Bug in test: illegal memory type %d\n", mem_type);
+ return 0;
+ }
+}
+
+static struct cs_dsp_mock_wmfw_builder *_create_dummy_wmfw(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp_mock_wmfw_builder *builder;
+
+ builder = cs_dsp_mock_wmfw_init(priv, local->wmfw_version);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, builder);
+
+ /* Init an XM header */
+ cs_dsp_mock_wmfw_add_data_block(builder,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ return builder;
+}
+
+/*
+ * Write to a control while the firmware is running.
+ * This should write to the underlying registers.
+ */
+static void cs_dsp_ctl_write_running(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ memset(reg_vals, 0, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /*
+ * Write new data to the control, it should be written to the registers
+ * and cs_dsp_coeff_lock_and_write_ctrl() should return 1 to indicate
+ * that the control content changed.
+ */
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 1);
+ KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, param->len_bytes), 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Read from a volatile control while the firmware is running.
+ * This should return the current state of the underlying registers.
+ */
+static void cs_dsp_ctl_read_volatile_running(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ memset(reg_vals, 0, param->len_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Read the control, it should return the current register content */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+
+ /*
+ * Change the register content and read the control, it should return
+ * the new register content
+ */
+ get_random_bytes(reg_vals, param->len_bytes);
+ KUNIT_ASSERT_EQ(test, regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes), 0);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, param->len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, param->len_bytes);
+}
+
+/*
+ * Read from a volatile control before the firmware is started.
+ * This should return an error.
+ */
+static void cs_dsp_ctl_read_volatile_not_started(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Read the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+}
+
+/*
+ * Read from a volatile control after the firmware has stopped.
+ * This should return an error.
+ */
+static void cs_dsp_ctl_read_volatile_stopped(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ /* Read the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+}
+
+/*
+ * Read from a volatile control after the DSP has been powered down.
+ * This should return an error.
+ */
+static void cs_dsp_ctl_read_volatile_stopped_powered_down(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware then power down */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Read the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+}
+
+/*
+ * Read from a volatile control when a different firmware is currently
+ * loaded into the DSP.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_read_volatile_not_current_loaded_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Power-down DSP then power-up with a different firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+
+ /* Read the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+}
+
+/*
+ * Read from a volatile control when a different firmware is currently
+ * running.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_read_volatile_not_current_running_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Power-down DSP then power-up with a different firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Read the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+}
+
+/*
+ * Write to a volatile control before the firmware is started.
+ * This should return an error.
+ */
+static void cs_dsp_ctl_write_volatile_not_started(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ /* Write the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+
+ /* Should not have been any writes to registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write to a volatile control after the firmware has stopped.
+ * This should return an error.
+ */
+static void cs_dsp_ctl_write_volatile_stopped(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ /* Write the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+
+ /* Should not have been any writes to registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write to a volatile control after the DSP has been powered down.
+ * This should return an error.
+ */
+static void cs_dsp_ctl_write_volatile_stopped_powered_down(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kzalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Start and stop the firmware then power down */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+ cs_dsp_power_down(dsp);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ /* Write the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+
+ /* Should not have been any writes to registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write to a volatile control when a different firmware is currently
+ * loaded into the DSP.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_write_volatile_not_current_loaded_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Power-down DSP then power-up with a different firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ /* Write the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+
+ /* Should not have been any writes to registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write to a volatile control when a different firmware is currently
+ * running.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_write_volatile_not_current_running_fw(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ struct cs_dsp_mock_wmfw_builder *builder2 = _create_dummy_wmfw(test);
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ reg_vals = kunit_kmalloc(test, param->len_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some DSP data to be read into the control cache */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, param->len_bytes);
+
+ /* Create control pointing to this data */
+ def.flags = param->flags | WMFW_CTL_FLAG_VOLATILE;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ /* Power-up DSP */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ /* Power-down DSP then power-up with a different firmware */
+ cs_dsp_power_down(dsp);
+ wmfw = cs_dsp_mock_wmfw_get_firmware(builder2);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw2", NULL, NULL, "mbc.vss"), 0);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ /* Write the control, it should return an error */
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, param->len_bytes),
+ 0);
+
+ /* Should not have been any writes to registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Read from an offset into the control data. Should return only the
+ * portion of data from the offset position.
+ */
+static void cs_dsp_ctl_read_with_seek(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+ unsigned int seek_words;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ for (seek_words = 1; seek_words < (def.length_bytes / sizeof(u32)); seek_words++) {
+ unsigned int len_bytes = def.length_bytes - (seek_words * sizeof(u32));
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, seek_words,
+ readback, len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, &reg_vals[seek_words], len_bytes);
+ }
+}
+
+/*
+ * Read from an offset into the control cache. Should return only the
+ * portion of data from the offset position.
+ * Same as cs_dsp_ctl_read_with_seek() except the control is cached
+ * and the firmware is not running.
+ */
+static void cs_dsp_ctl_read_cache_with_seek(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+ unsigned int seek_words;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start and stop the firmware so the read will come from the cache */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ for (seek_words = 1; seek_words < (def.length_bytes / sizeof(u32)); seek_words++) {
+ unsigned int len_bytes = def.length_bytes - (seek_words * sizeof(u32));
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, seek_words,
+ readback, len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, &reg_vals[seek_words], len_bytes);
+ }
+}
+
+/*
+ * Read less than the full length of data from a control. Should return
+ * only the requested number of bytes.
+ */
+static void cs_dsp_ctl_read_truncated(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+ unsigned int len_bytes;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Reads are only allowed to be a multiple of the DSP word length */
+ for (len_bytes = sizeof(u32); len_bytes < def.length_bytes; len_bytes += sizeof(u32)) {
+ memset(readback, 0, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, len_bytes);
+ KUNIT_EXPECT_MEMNEQ(test,
+ (u8 *)readback + len_bytes,
+ (u8 *)reg_vals + len_bytes,
+ def.length_bytes - len_bytes);
+ }
+}
+
+/*
+ * Read less than the full length of data from a cached control.
+ * Should return only the requested number of bytes.
+ * Same as cs_dsp_ctl_read_truncated() except the control is cached
+ * and the firmware is not running.
+ */
+static void cs_dsp_ctl_read_cache_truncated(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback;
+ unsigned int len_bytes;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start and stop the firmware so the read will come from the cache */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ /* Reads are only allowed to be a multiple of the DSP word length */
+ for (len_bytes = sizeof(u32); len_bytes < def.length_bytes; len_bytes += sizeof(u32)) {
+ memset(readback, 0, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, len_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, len_bytes);
+ KUNIT_EXPECT_MEMNEQ(test,
+ (u8 *)readback + len_bytes,
+ (u8 *)reg_vals + len_bytes,
+ def.length_bytes - len_bytes);
+ }
+}
+
+/*
+ * Write to an offset into the control data. Should only change the
+ * portion of data from the offset position.
+ */
+static void cs_dsp_ctl_write_with_seek(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback, *new_data;
+ unsigned int seek_words;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ new_data = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_data);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ for (seek_words = 1; seek_words < (def.length_bytes / sizeof(u32)); seek_words++) {
+ unsigned int len_bytes = def.length_bytes - (seek_words * sizeof(u32));
+
+ /* Reset the register values to the test data */
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ get_random_bytes(new_data, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, seek_words,
+ new_data, len_bytes),
+ 1);
+ KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, def.length_bytes),
+ 0);
+ /* Initial portion of readback should be unchanged */
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, seek_words * sizeof(u32));
+ KUNIT_EXPECT_MEMEQ(test, &readback[seek_words], new_data, len_bytes);
+ }
+}
+
+/*
+ * Write to an offset into the control cache. Should only change the
+ * portion of data from the offset position.
+ * Same as cs_dsp_ctl_write_with_seek() except the control is cached
+ * and the firmware is not running.
+ */
+static void cs_dsp_ctl_write_cache_with_seek(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback, *new_data;
+ unsigned int seek_words;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ new_data = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_data);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start and stop the firmware so the read will come from the cache */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ for (seek_words = 1; seek_words < (def.length_bytes / sizeof(u32)); seek_words++) {
+ unsigned int len_bytes = def.length_bytes - (seek_words * sizeof(u32));
+
+ /* Reset the cache to the test data */
+ KUNIT_EXPECT_GE(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals,
+ def.length_bytes),
+ 0);
+
+ get_random_bytes(new_data, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, seek_words,
+ new_data, len_bytes),
+ 1);
+
+ memset(readback, 0, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback,
+ def.length_bytes),
+ 0);
+ /* Initial portion of readback should be unchanged */
+ KUNIT_EXPECT_MEMEQ(test, readback, reg_vals, seek_words * sizeof(u32));
+ KUNIT_EXPECT_MEMEQ(test, &readback[seek_words], new_data, len_bytes);
+ }
+}
+
+/*
+ * Write less than the full length of data to a control. Should only
+ * change the requested number of bytes.
+ */
+static void cs_dsp_ctl_write_truncated(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback, *new_data;
+ unsigned int len_bytes;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ new_data = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_data);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Writes are only allowed to be a multiple of the DSP word length */
+ for (len_bytes = sizeof(u32); len_bytes < def.length_bytes; len_bytes += sizeof(u32)) {
+ /* Reset the register values to the test data */
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ get_random_bytes(new_data, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, new_data, len_bytes),
+ 1);
+
+ memset(readback, 0, def.length_bytes);
+ KUNIT_ASSERT_EQ(test, regmap_raw_read(dsp->regmap, reg, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, new_data, len_bytes);
+ KUNIT_EXPECT_MEMEQ(test,
+ (u8 *)readback + len_bytes,
+ (u8 *)reg_vals + len_bytes,
+ def.length_bytes - len_bytes);
+ }
+}
+
+/*
+ * Write less than the full length of data to a cached control.
+ * Should only change the requested number of bytes.
+ * Same as cs_dsp_ctl_write_truncated() except the control is cached
+ * and the firmware is not running.
+ */
+static void cs_dsp_ctl_write_cache_truncated(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals, *readback, *new_data;
+ unsigned int len_bytes;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = 48;
+
+ reg_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ readback = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ new_data = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_data);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ get_random_bytes(reg_vals, def.length_bytes);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start and stop the firmware so the read will come from the cache */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ cs_dsp_stop(dsp);
+
+ /* Writes are only allowed to be a multiple of the DSP word length */
+ for (len_bytes = sizeof(u32); len_bytes < def.length_bytes; len_bytes += sizeof(u32)) {
+ /* Reset the cache to the test data */
+ KUNIT_EXPECT_GE(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals,
+ def.length_bytes),
+ 0);
+
+ get_random_bytes(new_data, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, new_data, len_bytes),
+ 1);
+
+ memset(readback, 0, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback,
+ def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, new_data, len_bytes);
+ KUNIT_EXPECT_MEMEQ(test,
+ (u8 *)readback + len_bytes,
+ (u8 *)reg_vals + len_bytes,
+ def.length_bytes - len_bytes);
+ }
+}
+
+/*
+ * Read from an offset that is beyond the end of the control data.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_read_with_seek_oob(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+ unsigned int seek_words;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ seek_words = def.length_bytes / sizeof(u32);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, seek_words,
+ reg_vals, def.length_bytes),
+ 0);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the read from the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, seek_words,
+ reg_vals, def.length_bytes),
+ 0);
+ }
+}
+
+/*
+ * Read more data than the length of the control data.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_read_with_length_overflow(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals, def.length_bytes + 1),
+ 0);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the read from the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, reg_vals,
+ def.length_bytes + 1),
+ 0);
+ }
+}
+
+/*
+ * Read with a seek and length that ends beyond the end of control data.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_read_with_seek_and_length_oob(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /*
+ * Read full control length but at a start offset of 1 so that
+ * offset + length exceeds the length of the control.
+ */
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 1, reg_vals, def.length_bytes),
+ 0);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the read from the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 1, reg_vals,
+ def.length_bytes),
+ 0);
+ }
+}
+
+/*
+ * Write to an offset that is beyond the end of the control data.
+ * Should return an error without touching any registers.
+ */
+static void cs_dsp_ctl_write_with_seek_oob(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+ unsigned int seek_words;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ get_random_bytes(reg_vals, def.length_bytes);
+ seek_words = def.length_bytes / sizeof(u32);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, seek_words,
+ reg_vals, def.length_bytes),
+ 0);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the write to the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ get_random_bytes(reg_vals, def.length_bytes);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, seek_words,
+ reg_vals, def.length_bytes),
+ 0);
+ }
+
+ /* Check that it didn't write any registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write more data than the length of the control data.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_write_with_length_overflow(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ get_random_bytes(reg_vals, def.length_bytes);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, def.length_bytes + 1),
+ 0);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the write to the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ get_random_bytes(reg_vals, def.length_bytes);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals,
+ def.length_bytes + 1),
+ 0);
+ }
+
+ /* Check that it didn't write any registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write with a seek and length that ends beyond the end of control data.
+ * Should return an error.
+ */
+static void cs_dsp_ctl_write_with_seek_and_length_oob(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ /*
+ * Write full control length but at a start offset of 1 so that
+ * offset + length exceeeds the length of the control.
+ */
+ get_random_bytes(reg_vals, def.length_bytes);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 1, reg_vals, def.length_bytes),
+ 0);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the write to the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ get_random_bytes(reg_vals, def.length_bytes);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 1, reg_vals,
+ def.length_bytes),
+ 0);
+ }
+
+ /* Check that it didn't write any registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Read from a write-only control. This is legal because controls can
+ * always be read. Write-only only indicates that it is not useful to
+ * populate the cache from the DSP memory.
+ */
+static void cs_dsp_ctl_read_from_writeonly(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *ctl_vals, *readback;
+
+ /* Sanity check parameters */
+ KUNIT_ASSERT_TRUE(test, param->flags & WMFW_CTL_FLAG_WRITEABLE);
+ KUNIT_ASSERT_FALSE(test, param->flags & WMFW_CTL_FLAG_READABLE);
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ ctl_vals = kunit_kmalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctl_vals);
+
+ readback = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Write some test data to the control */
+ get_random_bytes(ctl_vals, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, ctl_vals, def.length_bytes),
+ 1);
+
+ /* Read back the data */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback, def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, def.length_bytes);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the read from the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ memset(readback, 0, def.length_bytes);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_coeff_lock_and_read_ctrl(ctl, 0, readback,
+ def.length_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, ctl_vals, def.length_bytes);
+ }
+}
+
+/*
+ * Write to a read-only control.
+ * This should return an error without writing registers.
+ */
+static void cs_dsp_ctl_write_to_readonly(struct kunit *test)
+{
+ const struct cs_dsp_ctl_rw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct cs_dsp *dsp = priv->dsp;
+ struct cs_dsp_mock_coeff_def def = mock_coeff_template;
+ int alg_idx = _find_alg_entry(test, param->alg_id);
+ unsigned int reg, alg_base_words;
+ struct cs_dsp_coeff_ctl *ctl;
+ struct firmware *wmfw;
+ u32 *reg_vals;
+
+ /* Sanity check parameters */
+ KUNIT_ASSERT_FALSE(test, param->flags & WMFW_CTL_FLAG_WRITEABLE);
+ KUNIT_ASSERT_TRUE(test, param->flags & WMFW_CTL_FLAG_READABLE);
+
+ def.flags = param->flags;
+ def.mem_type = param->mem_type;
+ def.offset_dsp_words = param->offs_words;
+ def.length_bytes = param->len_bytes;
+
+ reg_vals = kunit_kzalloc(test, def.length_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, reg_vals);
+
+ /* Create some initial register content */
+ alg_base_words = _get_alg_mem_base_words(test, alg_idx, param->mem_type);
+ reg = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg += (alg_base_words + param->offs_words) *
+ cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv);
+ regmap_raw_write(dsp->regmap, reg, reg_vals, def.length_bytes);
+
+ /* Create control pointing to this data */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_ctl_rw_test_algs[alg_idx].id,
+ "dummyalg", NULL);
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &def);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_ASSERT_EQ(test, cs_dsp_power_up(dsp, wmfw, "mock_fw", NULL, NULL, "misc"), 0);
+
+ ctl = list_first_entry_or_null(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+
+ /* Start the firmware and add an action to stop it during cleanup */
+ KUNIT_ASSERT_EQ(test, cs_dsp_run(dsp), 0);
+ KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, _cs_dsp_stop_wrapper, dsp), 0);
+
+ /* Drop expected writes and the regmap cache should be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ cs_dsp_mock_regmap_drop_bytes(priv, reg, param->len_bytes);
+
+ get_random_bytes(reg_vals, def.length_bytes);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals, def.length_bytes),
+ 0);
+
+ if (!(def.flags & WMFW_CTL_FLAG_VOLATILE)) {
+ /* Stop firmware and repeat the write to the cache */
+ kunit_release_action(test, _cs_dsp_stop_wrapper, dsp);
+ KUNIT_ASSERT_FALSE(test, dsp->running);
+
+ get_random_bytes(reg_vals, def.length_bytes);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_coeff_lock_and_write_ctrl(ctl, 0, reg_vals,
+ def.length_bytes),
+ 0);
+ }
+
+ /* Check that it didn't write any registers */
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+static int cs_dsp_ctl_rw_test_common_init(struct kunit *test, struct cs_dsp *dsp,
+ int wmfw_version)
+{
+ struct cs_dsp_test *priv;
+ struct cs_dsp_test_local *local;
+ struct device *test_dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+ priv->local = local;
+ priv->local->wmfw_version = wmfw_version;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * There must always be a XM header with at least 1 algorithm, so create
+ * a dummy one that tests can use and extract it to a data blob.
+ */
+ local->xm_header = cs_dsp_create_mock_xm_header(priv,
+ cs_dsp_ctl_rw_test_algs,
+ ARRAY_SIZE(cs_dsp_ctl_rw_test_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->xm_header);
+
+ /* Create wmfw builder */
+ local->wmfw_builder = _create_dummy_wmfw(test);
+
+ /* Init cs_dsp */
+ dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops);
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_ctl_rw_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_ctl_rw_test_common_init(test, dsp, 3);
+}
+
+static int cs_dsp_ctl_rw_test_adsp2_32bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 1;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_ctl_rw_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_ctl_rw_test_adsp2_32bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_ctl_rw_test_adsp2_32bit_init(test, 1);
+}
+
+static int cs_dsp_ctl_rw_test_adsp2_32bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_ctl_rw_test_adsp2_32bit_init(test, 2);
+}
+
+static int cs_dsp_ctl_rw_test_adsp2_16bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_ctl_rw_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_ctl_rw_test_adsp2_16bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_ctl_rw_test_adsp2_16bit_init(test, 1);
+}
+
+static int cs_dsp_ctl_rw_test_adsp2_16bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_ctl_rw_test_adsp2_16bit_init(test, 2);
+}
+
+static void cs_dsp_ctl_all_param_desc(const struct cs_dsp_ctl_rw_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "alg:%#x %s@%u len:%u flags:%#x",
+ param->alg_id, cs_dsp_mem_region_name(param->mem_type),
+ param->offs_words, param->len_bytes, param->flags);
+}
+
+/* All parameters populated, with various lengths */
+static const struct cs_dsp_ctl_rw_test_param all_pop_varying_len_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 8 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 12 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 16 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 48 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 100 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 512 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 1000 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_len, all_pop_varying_len_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, with various offsets */
+static const struct cs_dsp_ctl_rw_test_param all_pop_varying_offset_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 0, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 2, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 3, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 8, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 10, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 128, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 180, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_offset, all_pop_varying_offset_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, with various X and Y memory regions */
+static const struct cs_dsp_ctl_rw_test_param all_pop_varying_xy_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_XM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_xy, all_pop_varying_xy_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, using ZM */
+static const struct cs_dsp_ctl_rw_test_param all_pop_z_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_ZM, .offs_words = 1, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_z, all_pop_z_cases, cs_dsp_ctl_all_param_desc);
+
+/* All parameters populated, with various algorithm ids */
+static const struct cs_dsp_ctl_rw_test_param all_pop_varying_alg_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xb, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0x9f1234, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+ { .alg_id = 0xff00ff, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4 },
+};
+KUNIT_ARRAY_PARAM(all_pop_varying_alg, all_pop_varying_alg_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * readable control.
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_readable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS |
+ WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_readable_flags,
+ all_pop_readable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * read-only control
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_readonly_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_readonly_flags,
+ all_pop_readonly_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * non-volatile readable control
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_nonvol_readable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_nonvol_readable_flags,
+ all_pop_nonvol_readable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * writeable control
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_writeable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS |
+ WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_writeable_flags,
+ all_pop_writeable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * write-only control
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_writeonly_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_writeonly_flags,
+ all_pop_writeonly_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * non-volatile writeable control
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_nonvol_writeable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_nonvol_writeable_flags,
+ all_pop_nonvol_writeable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * volatile readable control.
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_volatile_readable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0 /* flags == 0 is volatile while firmware is running */
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_READABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS |
+ WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_volatile_readable_flags,
+ all_pop_volatile_readable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+/*
+ * All parameters populated, with all combinations of flags for a
+ * volatile readable control.
+ */
+static const struct cs_dsp_ctl_rw_test_param all_pop_volatile_writeable_flags_cases[] = {
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = 0 /* flags == 0 is volatile while firmware is running */
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS | WMFW_CTL_FLAG_WRITEABLE,
+ },
+ { .alg_id = 0xfafa, .mem_type = WMFW_ADSP2_YM, .offs_words = 1, .len_bytes = 4,
+ .flags = WMFW_CTL_FLAG_VOLATILE | WMFW_CTL_FLAG_SYS |
+ WMFW_CTL_FLAG_READABLE | WMFW_CTL_FLAG_WRITEABLE,
+ },
+};
+KUNIT_ARRAY_PARAM(all_pop_volatile_writeable_flags,
+ all_pop_volatile_writeable_flags_cases,
+ cs_dsp_ctl_all_param_desc);
+
+static struct kunit_case cs_dsp_ctl_rw_test_cases_adsp[] = {
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_z_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running,
+ all_pop_volatile_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_not_started,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_stopped,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_stopped_powered_down,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_not_current_loaded_fw,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_not_current_running_fw,
+ all_pop_volatile_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_not_started,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_stopped,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_stopped_powered_down,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_not_current_loaded_fw,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_not_current_running_fw,
+ all_pop_volatile_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_seek,
+ all_pop_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_cache_with_seek,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_truncated,
+ all_pop_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_cache_truncated,
+ all_pop_nonvol_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_seek,
+ all_pop_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_cache_with_seek,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_truncated,
+ all_pop_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_cache_truncated,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_seek_oob,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_length_overflow,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_seek_and_length_oob,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_seek_oob,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_length_overflow,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_seek_and_length_oob,
+ all_pop_varying_len_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_from_writeonly,
+ all_pop_writeonly_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_to_readonly,
+ all_pop_readonly_flags_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_ctl_rw_test_cases_halo[] = {
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_varying_alg_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_running, all_pop_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_varying_offset_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running, all_pop_varying_xy_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_running,
+ all_pop_volatile_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_not_started,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_stopped,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_stopped_powered_down,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_not_current_loaded_fw,
+ all_pop_volatile_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_volatile_not_current_running_fw,
+ all_pop_volatile_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_not_started,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_stopped,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_stopped_powered_down,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_not_current_loaded_fw,
+ all_pop_volatile_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_volatile_not_current_running_fw,
+ all_pop_volatile_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_seek,
+ all_pop_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_cache_with_seek,
+ all_pop_nonvol_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_truncated,
+ all_pop_readable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_cache_truncated,
+ all_pop_nonvol_readable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_seek,
+ all_pop_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_cache_with_seek,
+ all_pop_nonvol_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_truncated,
+ all_pop_writeable_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_cache_truncated,
+ all_pop_nonvol_writeable_flags_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_seek_oob,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_length_overflow,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_with_seek_and_length_oob,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_seek_oob,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_length_overflow,
+ all_pop_varying_len_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_with_seek_and_length_oob,
+ all_pop_varying_len_gen_params),
+
+ KUNIT_CASE_PARAM(cs_dsp_ctl_read_from_writeonly,
+ all_pop_writeonly_flags_gen_params),
+ KUNIT_CASE_PARAM(cs_dsp_ctl_write_to_readonly,
+ all_pop_readonly_flags_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_ctl_rw_test_halo = {
+ .name = "cs_dsp_ctl_rw_wmfwV3_halo",
+ .init = cs_dsp_ctl_rw_test_halo_init,
+ .test_cases = cs_dsp_ctl_rw_test_cases_halo,
+};
+
+static struct kunit_suite cs_dsp_ctl_rw_test_adsp2_32bit_wmfw1 = {
+ .name = "cs_dsp_ctl_rw_wmfwV1_adsp2_32bit",
+ .init = cs_dsp_ctl_rw_test_adsp2_32bit_wmfw1_init,
+ .test_cases = cs_dsp_ctl_rw_test_cases_adsp,
+};
+
+static struct kunit_suite cs_dsp_ctl_rw_test_adsp2_32bit_wmfw2 = {
+ .name = "cs_dsp_ctl_rw_wmfwV2_adsp2_32bit",
+ .init = cs_dsp_ctl_rw_test_adsp2_32bit_wmfw2_init,
+ .test_cases = cs_dsp_ctl_rw_test_cases_adsp,
+};
+
+static struct kunit_suite cs_dsp_ctl_rw_test_adsp2_16bit_wmfw1 = {
+ .name = "cs_dsp_ctl_rw_wmfwV1_adsp2_16bit",
+ .init = cs_dsp_ctl_rw_test_adsp2_16bit_wmfw1_init,
+ .test_cases = cs_dsp_ctl_rw_test_cases_adsp,
+};
+
+static struct kunit_suite cs_dsp_ctl_rw_test_adsp2_16bit_wmfw2 = {
+ .name = "cs_dsp_ctl_rw_wmfwV2_adsp2_16bit",
+ .init = cs_dsp_ctl_rw_test_adsp2_16bit_wmfw2_init,
+ .test_cases = cs_dsp_ctl_rw_test_cases_adsp,
+};
+
+kunit_test_suites(&cs_dsp_ctl_rw_test_halo,
+ &cs_dsp_ctl_rw_test_adsp2_32bit_wmfw1,
+ &cs_dsp_ctl_rw_test_adsp2_32bit_wmfw2,
+ &cs_dsp_ctl_rw_test_adsp2_16bit_wmfw1,
+ &cs_dsp_ctl_rw_test_adsp2_16bit_wmfw2);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_wmfw.c b/drivers/firmware/cirrus/test/cs_dsp_test_wmfw.c
new file mode 100644
index 000000000000..9e997c4ee2d6
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_wmfw.c
@@ -0,0 +1,2211 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+//
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/random.h>
+#include <linux/regmap.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+/*
+ * Test method is:
+ *
+ * 1) Create a mock regmap in cache-only mode so that all writes will be cached.
+ * 2) Create dummy wmfw file.
+ * 3) Call cs_dsp_power_up() with the bin file.
+ * 4) Readback the cached value of registers that should have been written and
+ * check they have the correct value.
+ * 5) All the registers that are expected to have been written are dropped from
+ * the cache. This should leave the cache clean.
+ * 6) If the cache is still dirty there have been unexpected writes.
+ */
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *)
+KUNIT_DEFINE_ACTION_WRAPPER(_vfree_wrapper, vfree, void *)
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *)
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_xm_header *xm_header;
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+ int wmfw_version;
+};
+
+struct cs_dsp_wmfw_test_param {
+ unsigned int num_blocks;
+ int mem_type;
+};
+
+static const struct cs_dsp_mock_alg_def cs_dsp_wmfw_test_mock_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_size_words = 164,
+ .ym_size_words = 164,
+ .zm_size_words = 164,
+ },
+};
+
+/*
+ * wmfw that writes the XM header.
+ * cs_dsp always reads this back from unpacked XM.
+ */
+static void wmfw_write_xm_header_unpacked(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ unsigned int reg_addr;
+ u8 *readback;
+
+ /* XM header payload was added to wmfw by test case init function */
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /* Read raw so endianness and register width don't matter */
+ readback = kunit_kzalloc(test, local->xm_header->blob_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_XM);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ local->xm_header->blob_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* Write one payload of length param->num_blocks */
+static void wmfw_write_one_payload(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ unsigned int mem_offset_dsp_words = 0;
+ unsigned int payload_size_bytes;
+
+ payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, param->mem_type);
+
+ /* payloads must be a multiple of 4 bytes and a whole number of DSP registers */
+ do {
+ payload_size_bytes += cs_dsp_mock_reg_block_length_bytes(priv, param->mem_type);
+ } while (payload_size_bytes % 4);
+
+ payload_data = kunit_kmalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+ get_random_bytes(payload_data, payload_size_bytes);
+
+ readback = kunit_kzalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Tests on XM must be after the XM header */
+ if (param->mem_type == WMFW_ADSP2_XM)
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / sizeof(u32);
+
+ /* Add a single payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ param->mem_type, mem_offset_dsp_words,
+ payload_data, payload_size_bytes);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg_addr += cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv) * mem_offset_dsp_words;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, payload_size_bytes);
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* Write several smallest possible payloads for the given memory type */
+static void wmfw_write_multiple_oneblock_payloads(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ unsigned int mem_offset_dsp_words = 0;
+ unsigned int payload_size_bytes, payload_size_dsp_words;
+ const unsigned int num_payloads = param->num_blocks;
+ int i;
+
+ /* payloads must be a multiple of 4 bytes and a whole number of DSP registers */
+ payload_size_dsp_words = 0;
+ payload_size_bytes = 0;
+ do {
+ payload_size_dsp_words += cs_dsp_mock_reg_block_length_dsp_words(priv,
+ param->mem_type);
+ payload_size_bytes += cs_dsp_mock_reg_block_length_bytes(priv, param->mem_type);
+ } while (payload_size_bytes % 4);
+
+ payload_data = kunit_kcalloc(test, num_payloads, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+
+ readback = kunit_kcalloc(test, num_payloads, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ get_random_bytes(payload_data, num_payloads * payload_size_bytes);
+
+ /* Tests on XM must be after the XM header */
+ if (param->mem_type == WMFW_ADSP2_XM)
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / payload_size_bytes;
+
+ /* Add multiple payloads of one block each */
+ for (i = 0; i < num_payloads; ++i) {
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ param->mem_type,
+ mem_offset_dsp_words + (i * payload_size_dsp_words),
+ &payload_data[i * payload_size_bytes],
+ payload_size_bytes);
+ }
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg_addr += cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv) * mem_offset_dsp_words;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ num_payloads * payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, num_payloads * payload_size_bytes);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, num_payloads * payload_size_bytes);
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write several smallest possible payloads of the given memory type
+ * in reverse address order
+ */
+static void wmfw_write_multiple_oneblock_payloads_reverse(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ unsigned int mem_offset_dsp_words = 0;
+ unsigned int payload_size_bytes, payload_size_dsp_words;
+ const unsigned int num_payloads = param->num_blocks;
+ int i;
+
+ /* payloads must be a multiple of 4 bytes and a whole number of DSP registers */
+ payload_size_dsp_words = 0;
+ payload_size_bytes = 0;
+ do {
+ payload_size_dsp_words += cs_dsp_mock_reg_block_length_dsp_words(priv,
+ param->mem_type);
+ payload_size_bytes += cs_dsp_mock_reg_block_length_bytes(priv, param->mem_type);
+ } while (payload_size_bytes % 4);
+
+ payload_data = kunit_kcalloc(test, num_payloads, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+
+ readback = kunit_kcalloc(test, num_payloads, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ get_random_bytes(payload_data, num_payloads * payload_size_bytes);
+
+ /* Tests on XM must be after the XM header */
+ if (param->mem_type == WMFW_ADSP2_XM)
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / payload_size_bytes;
+
+ /* Add multiple payloads of one block each */
+ for (i = num_payloads - 1; i >= 0; --i) {
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ param->mem_type,
+ mem_offset_dsp_words + (i * payload_size_dsp_words),
+ &payload_data[i * payload_size_bytes],
+ payload_size_bytes);
+ }
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg_addr += cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv) * mem_offset_dsp_words;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ num_payloads * payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, num_payloads * payload_size_bytes);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, num_payloads * payload_size_bytes);
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write multiple payloads of length param->num_blocks.
+ * The payloads are not in address order and collectively do not patch
+ * a contiguous block of memory.
+ */
+static void wmfw_write_multiple_payloads_sparse_unordered(struct kunit *test)
+{
+ static const unsigned int random_offsets[] = {
+ 11, 69, 59, 61, 32, 75, 4, 38, 70, 13, 79, 47, 46, 53, 18, 44,
+ 54, 35, 51, 21, 26, 45, 27, 41, 66, 2, 17, 56, 40, 9, 8, 20,
+ 29, 19, 63, 42, 12, 16, 43, 3, 5, 55, 52, 22
+ };
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ unsigned int mem_offset_dsp_words = 0;
+ unsigned int payload_size_bytes, payload_size_dsp_words;
+ const int num_payloads = ARRAY_SIZE(random_offsets);
+ int i;
+
+ payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, param->mem_type);
+ payload_size_dsp_words = param->num_blocks *
+ cs_dsp_mock_reg_block_length_dsp_words(priv, param->mem_type);
+
+ /* payloads must be a multiple of 4 bytes and a whole number of DSP registers */
+ do {
+ payload_size_dsp_words += cs_dsp_mock_reg_block_length_dsp_words(priv,
+ param->mem_type);
+ payload_size_bytes += cs_dsp_mock_reg_block_length_bytes(priv, param->mem_type);
+ } while (payload_size_bytes % 4);
+
+ payload_data = kunit_kcalloc(test, num_payloads, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+ get_random_bytes(payload_data, payload_size_bytes);
+
+ readback = kunit_kcalloc(test, num_payloads, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Tests on XM must be after the XM header */
+ if (param->mem_type == WMFW_ADSP2_XM)
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / payload_size_bytes;
+
+ /* Add multiple payloads of one block each at "random" locations */
+ for (i = 0; i < num_payloads; ++i) {
+ unsigned int offset = random_offsets[i] * payload_size_dsp_words;
+
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ param->mem_type,
+ mem_offset_dsp_words + offset,
+ &payload_data[i * payload_size_bytes],
+ payload_size_bytes);
+ }
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ for (i = 0; i < num_payloads; ++i) {
+ unsigned int offset_num_regs = (random_offsets[i] * payload_size_bytes) /
+ regmap_get_val_bytes(priv->dsp->regmap);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, param->mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ reg_addr += cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv) * mem_offset_dsp_words;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr,
+ &readback[i * payload_size_bytes],
+ payload_size_bytes),
+ 0);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, payload_size_bytes);
+ }
+
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* Write the whole of PM in a single unpacked payload */
+static void wmfw_write_all_unpacked_pm(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ unsigned int payload_size_bytes;
+
+ payload_size_bytes = cs_dsp_mock_size_of_region(priv->dsp, WMFW_ADSP2_PM);
+ payload_data = vmalloc(payload_size_bytes);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+ kunit_add_action_or_reset(priv->test, _vfree_wrapper, payload_data);
+
+ readback = vmalloc(payload_size_bytes);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+ kunit_add_action_or_reset(priv->test, _vfree_wrapper, readback);
+ memset(readback, 0, payload_size_bytes);
+
+ /* Add a single PM payload */
+ get_random_bytes(payload_data, payload_size_bytes);
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_PM, 0,
+ payload_data, payload_size_bytes);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_PM);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, payload_size_bytes);
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* Write the whole of PM in a single packed payload */
+static void wmfw_write_all_packed_pm(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ unsigned int payload_size_bytes;
+
+ payload_size_bytes = cs_dsp_mock_size_of_region(priv->dsp, WMFW_HALO_PM_PACKED);
+ payload_data = vmalloc(payload_size_bytes);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+ kunit_add_action_or_reset(priv->test, _vfree_wrapper, payload_data);
+
+ readback = vmalloc(payload_size_bytes);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+ kunit_add_action_or_reset(priv->test, _vfree_wrapper, readback);
+ memset(readback, 0, payload_size_bytes);
+
+ /* Add a single PM payload */
+ get_random_bytes(payload_data, payload_size_bytes);
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_HALO_PM_PACKED, 0,
+ payload_data, payload_size_bytes);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_HALO_PM_PACKED);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, payload_size_bytes);
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write a series of payloads to various unpacked memory regions.
+ * The payloads are of various lengths and offsets, driven by the
+ * payload_defs table. The offset and length are both given as a
+ * number of minimum-sized register blocks to keep the maths simpler.
+ * (Where a minimum-sized register block is the smallest number of
+ * registers that contain a whole number of DSP words.)
+ */
+static void wmfw_write_multiple_unpacked_mem(struct kunit *test)
+{
+ static const struct {
+ int mem_type;
+ unsigned int offset_num_blocks;
+ unsigned int num_blocks;
+ } payload_defs[] = {
+ { WMFW_ADSP2_PM, 11, 60 },
+ { WMFW_ADSP2_ZM, 69, 8 },
+ { WMFW_ADSP2_YM, 32, 74 },
+ { WMFW_ADSP2_XM, 70, 38 },
+ { WMFW_ADSP2_PM, 84, 48 },
+ { WMFW_ADSP2_XM, 46, 18 },
+ { WMFW_ADSP2_PM, 0, 8 },
+ { WMFW_ADSP2_YM, 0, 30 },
+ { WMFW_ADSP2_PM, 160, 50 },
+ { WMFW_ADSP2_ZM, 21, 26 },
+ };
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int payload_size_bytes, offset_num_dsp_words;
+ unsigned int reg_addr, offset_bytes, offset_num_regs;
+ void **payload_data;
+ void *readback;
+ int i, ret;
+
+ payload_data = kunit_kcalloc(test, ARRAY_SIZE(payload_defs), sizeof(*payload_data),
+ GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+
+ for (i = 0; i < ARRAY_SIZE(payload_defs); ++i) {
+ payload_size_bytes = payload_defs[i].num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv,
+ payload_defs[i].mem_type);
+
+ payload_data[i] = kunit_kmalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data[i]);
+ get_random_bytes(payload_data[i], payload_size_bytes);
+
+ offset_num_dsp_words = payload_defs[i].offset_num_blocks *
+ cs_dsp_mock_reg_block_length_dsp_words(priv,
+ payload_defs[i].mem_type);
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ payload_defs[i].mem_type,
+ offset_num_dsp_words,
+ payload_data[i],
+ payload_size_bytes);
+ }
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ for (i = 0; i < ARRAY_SIZE(payload_defs); ++i) {
+ payload_size_bytes = payload_defs[i].num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv,
+ payload_defs[i].mem_type);
+
+ readback = kunit_kzalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ offset_bytes = payload_defs[i].offset_num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, payload_defs[i].mem_type);
+ offset_num_regs = offset_bytes / regmap_get_val_bytes(priv->dsp->regmap);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, payload_defs[i].mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ ret = regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes);
+ KUNIT_EXPECT_EQ_MSG(test, ret, 0, "%s @%u num:%u\n",
+ cs_dsp_mem_region_name(payload_defs[i].mem_type),
+ payload_defs[i].offset_num_blocks, payload_defs[i].num_blocks);
+ KUNIT_EXPECT_MEMEQ_MSG(test, readback, payload_data[i], payload_size_bytes,
+ "%s @%u num:%u\n",
+ cs_dsp_mem_region_name(payload_defs[i].mem_type),
+ payload_defs[i].offset_num_blocks,
+ payload_defs[i].num_blocks);
+
+ kunit_kfree(test, readback);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, payload_size_bytes);
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write a series of payloads to various packed and unpacked memory regions.
+ * The payloads are of various lengths and offsets, driven by the
+ * payload_defs table. The offset and length are both given as a
+ * number of minimum-sized register blocks to keep the maths simpler.
+ * (Where a minimum-sized register block is the smallest number of
+ * registers that contain a whole number of DSP words.)
+ */
+static void wmfw_write_multiple_packed_unpacked_mem(struct kunit *test)
+{
+ static const struct {
+ int mem_type;
+ unsigned int offset_num_blocks;
+ unsigned int num_blocks;
+ } payload_defs[] = {
+ { WMFW_HALO_PM_PACKED, 11, 60 },
+ { WMFW_ADSP2_YM, 69, 8 },
+ { WMFW_HALO_YM_PACKED, 32, 74 },
+ { WMFW_HALO_XM_PACKED, 70, 38 },
+ { WMFW_HALO_PM_PACKED, 84, 48 },
+ { WMFW_HALO_XM_PACKED, 46, 18 },
+ { WMFW_HALO_PM_PACKED, 0, 8 },
+ { WMFW_HALO_YM_PACKED, 0, 30 },
+ { WMFW_HALO_PM_PACKED, 160, 50 },
+ { WMFW_ADSP2_XM, 21, 26 },
+ };
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int payload_size_bytes, offset_num_dsp_words;
+ unsigned int reg_addr, offset_bytes, offset_num_regs;
+ void **payload_data;
+ void *readback;
+ int i, ret;
+
+ payload_data = kunit_kcalloc(test, ARRAY_SIZE(payload_defs), sizeof(*payload_data),
+ GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+
+ for (i = 0; i < ARRAY_SIZE(payload_defs); ++i) {
+ payload_size_bytes = payload_defs[i].num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv,
+ payload_defs[i].mem_type);
+
+ payload_data[i] = kunit_kmalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data[i]);
+ get_random_bytes(payload_data[i], payload_size_bytes);
+
+ offset_num_dsp_words = payload_defs[i].offset_num_blocks *
+ cs_dsp_mock_reg_block_length_dsp_words(priv,
+ payload_defs[i].mem_type);
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ payload_defs[i].mem_type,
+ offset_num_dsp_words,
+ payload_data[i],
+ payload_size_bytes);
+ }
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ for (i = 0; i < ARRAY_SIZE(payload_defs); ++i) {
+ payload_size_bytes = payload_defs[i].num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv,
+ payload_defs[i].mem_type);
+
+ readback = kunit_kzalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ offset_bytes = payload_defs[i].offset_num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, payload_defs[i].mem_type);
+ offset_num_regs = offset_bytes / regmap_get_val_bytes(priv->dsp->regmap);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, payload_defs[i].mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ ret = regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes);
+ KUNIT_EXPECT_EQ_MSG(test, ret, 0, "%s @%u num:%u\n",
+ cs_dsp_mem_region_name(payload_defs[i].mem_type),
+ payload_defs[i].offset_num_blocks,
+ payload_defs[i].num_blocks);
+ KUNIT_EXPECT_MEMEQ_MSG(test, readback, payload_data[i], payload_size_bytes,
+ "%s @%u num:%u\n",
+ cs_dsp_mem_region_name(payload_defs[i].mem_type),
+ payload_defs[i].offset_num_blocks,
+ payload_defs[i].num_blocks);
+
+ kunit_kfree(test, readback);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, payload_size_bytes);
+ }
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is one word longer than a packed block multiple,
+ * using one packed payload followed by one unpacked word.
+ */
+static void wmfw_write_packed_1_unpacked_trailing(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int mem_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[1];
+ unsigned int packed_payload_size_bytes, packed_payload_size_dsp_words;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+ packed_payload_size_dsp_words = param->num_blocks * dsp_words_per_packed_block;
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM) {
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / sizeof(u32);
+
+ /* Round up to multiple of packed block length */
+ mem_offset_dsp_words = roundup(mem_offset_dsp_words, dsp_words_per_packed_block);
+ }
+
+ /* Add a single packed payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type, mem_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+ /*
+ * Add payload of one unpacked word to DSP memory right after
+ * the packed payload words.
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words,
+ unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked word was written correctly and drop
+ * it from the regmap cache. The unpacked payload is offset within
+ * unpacked register space by the number of DSP words that were
+ * written in the packed payload.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ offset_num_regs += (packed_payload_size_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is two words longer than a packed block multiple,
+ * using one packed payload followed by one payload of two unpacked words.
+ */
+static void wmfw_write_packed_2_unpacked_trailing(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int mem_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[2];
+ unsigned int packed_payload_size_bytes, packed_payload_size_dsp_words;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+ packed_payload_size_dsp_words = param->num_blocks * dsp_words_per_packed_block;
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM) {
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / sizeof(u32);
+
+ /* Round up to multiple of packed block length */
+ mem_offset_dsp_words = roundup(mem_offset_dsp_words, dsp_words_per_packed_block);
+ }
+
+ /* Add a single packed payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type, mem_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+ /*
+ * Add payload of two unpacked words to DSP memory right after
+ * the packed payload words.
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words,
+ unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache. The unpacked payload is offset
+ * within unpacked register space by the number of DSP words
+ * that were written in the packed payload.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ offset_num_regs += (packed_payload_size_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is three words longer than a packed block multiple,
+ * using one packed payload followed by one payload of three unpacked words.
+ */
+static void wmfw_write_packed_3_unpacked_trailing(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int mem_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[3];
+ unsigned int packed_payload_size_bytes, packed_payload_size_dsp_words;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+ packed_payload_size_dsp_words = param->num_blocks * dsp_words_per_packed_block;
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM) {
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / sizeof(u32);
+
+ /* Round up to multiple of packed block length */
+ mem_offset_dsp_words = roundup(mem_offset_dsp_words, dsp_words_per_packed_block);
+ }
+
+ /* Add a single packed payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type, mem_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+ /*
+ * Add payload of three unpacked words to DSP memory right after
+ * the packed payload words.
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words,
+ unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache. The unpacked payload is offset
+ * within unpacked register space by the number of DSP words
+ * that were written in the packed payload.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ offset_num_regs += (packed_payload_size_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is two words longer than a packed block multiple,
+ * using one packed payload followed by two payloads of one unpacked word each.
+ */
+static void wmfw_write_packed_2_single_unpacked_trailing(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int mem_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[2];
+ unsigned int packed_payload_size_bytes, packed_payload_size_dsp_words;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+ packed_payload_size_dsp_words = param->num_blocks * dsp_words_per_packed_block;
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM) {
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / sizeof(u32);
+
+ /* Round up to multiple of packed block length */
+ mem_offset_dsp_words = roundup(mem_offset_dsp_words, dsp_words_per_packed_block);
+ }
+
+ /* Add a single packed payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type, mem_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+ /*
+ * Add two unpacked words to DSP memory right after the packed
+ * payload words. Each unpacked word in its own payload.
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words,
+ &unpacked_payload_data[0],
+ sizeof(unpacked_payload_data[0]));
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words + 1,
+ &unpacked_payload_data[1],
+ sizeof(unpacked_payload_data[1]));
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache. The unpacked words are offset
+ * within unpacked register space by the number of DSP words
+ * that were written in the packed payload.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ offset_num_regs += (packed_payload_size_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is three words longer than a packed block multiple,
+ * using one packed payload followed by three payloads of one unpacked word each.
+ */
+static void wmfw_write_packed_3_single_unpacked_trailing(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int mem_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[3];
+ unsigned int packed_payload_size_bytes, packed_payload_size_dsp_words;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+ packed_payload_size_dsp_words = param->num_blocks * dsp_words_per_packed_block;
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM) {
+ mem_offset_dsp_words += local->xm_header->blob_size_bytes / sizeof(u32);
+
+ /* Round up to multiple of packed block length */
+ mem_offset_dsp_words = roundup(mem_offset_dsp_words, dsp_words_per_packed_block);
+ }
+
+ /* Add a single packed payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type, mem_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+ /*
+ * Add three unpacked words to DSP memory right after the packed
+ * payload words. Each unpacked word in its own payload.
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words,
+ &unpacked_payload_data[0],
+ sizeof(unpacked_payload_data[0]));
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words + 1,
+ &unpacked_payload_data[1],
+ sizeof(unpacked_payload_data[1]));
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ mem_offset_dsp_words + packed_payload_size_dsp_words + 2,
+ &unpacked_payload_data[2],
+ sizeof(unpacked_payload_data[2]));
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache. The unpacked words are offset
+ * within unpacked register space by the number of DSP words
+ * that were written in the packed payload.
+ */
+ offset_num_regs = (mem_offset_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ offset_num_regs += (packed_payload_size_dsp_words / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is one word longer than a packed block multiple,
+ * and does not start on a packed alignment. Use one unpacked word
+ * followed by a packed payload.
+ */
+static void wmfw_write_packed_1_unpacked_leading(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int packed_payload_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[1];
+ unsigned int packed_payload_size_bytes;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM)
+ packed_payload_offset_dsp_words += local->xm_header->blob_size_bytes /
+ sizeof(u32);
+ /*
+ * Leave space for an unaligned word before the packed block and
+ * round the packed block start to multiple of packed block length.
+ */
+ packed_payload_offset_dsp_words += 1;
+ packed_payload_offset_dsp_words = roundup(packed_payload_offset_dsp_words,
+ dsp_words_per_packed_block);
+
+ /* Add a single unpacked word right before the first word of packed data */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 1,
+ unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ /* Add payload of packed data to the DSP memory after the unpacked word. */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type,
+ packed_payload_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (packed_payload_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked word was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = ((packed_payload_offset_dsp_words - 1) / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is two words longer than a packed block multiple,
+ * and does not start on a packed alignment. Use one payload of two unpacked
+ * words followed by a packed payload.
+ */
+static void wmfw_write_packed_2_unpacked_leading(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int packed_payload_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[2];
+ unsigned int packed_payload_size_bytes;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM)
+ packed_payload_offset_dsp_words += local->xm_header->blob_size_bytes /
+ sizeof(u32);
+ /*
+ * Leave space for two unaligned words before the packed block and
+ * round the packed block start to multiple of packed block length.
+ */
+ packed_payload_offset_dsp_words += 2;
+ packed_payload_offset_dsp_words = roundup(packed_payload_offset_dsp_words,
+ dsp_words_per_packed_block);
+
+ /*
+ * Add two unpacked words as a single payload right before the
+ * first word of packed data
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 2,
+ unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ /* Add payload of packed data to the DSP memory after the unpacked words. */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type,
+ packed_payload_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (packed_payload_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache.
+ */
+ offset_num_regs = ((packed_payload_offset_dsp_words - 2) / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is three words longer than a packed block multiple,
+ * and does not start on a packed alignment. Use one payload of three unpacked
+ * words followed by a packed payload.
+ */
+static void wmfw_write_packed_3_unpacked_leading(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int packed_payload_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[3];
+ unsigned int packed_payload_size_bytes;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM)
+ packed_payload_offset_dsp_words += local->xm_header->blob_size_bytes /
+ sizeof(u32);
+ /*
+ * Leave space for three unaligned words before the packed block and
+ * round the packed block start to multiple of packed block length.
+ */
+ packed_payload_offset_dsp_words += 3;
+ packed_payload_offset_dsp_words = roundup(packed_payload_offset_dsp_words,
+ dsp_words_per_packed_block);
+
+ /*
+ * Add three unpacked words as a single payload right before the
+ * first word of packed data
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 3,
+ unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ /* Add payload of packed data to the DSP memory after the unpacked words. */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type,
+ packed_payload_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (packed_payload_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache.
+ */
+ offset_num_regs = ((packed_payload_offset_dsp_words - 3) / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is two words longer than a packed block multiple,
+ * and does not start on a packed alignment. Use two payloads of one unpacked
+ * word each, followed by a packed payload.
+ */
+static void wmfw_write_packed_2_single_unpacked_leading(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int packed_payload_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[2];
+ unsigned int packed_payload_size_bytes;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM)
+ packed_payload_offset_dsp_words += local->xm_header->blob_size_bytes /
+ sizeof(u32);
+ /*
+ * Leave space for two unaligned words before the packed block and
+ * round the packed block start to multiple of packed block length.
+ */
+ packed_payload_offset_dsp_words += 2;
+ packed_payload_offset_dsp_words = roundup(packed_payload_offset_dsp_words,
+ dsp_words_per_packed_block);
+
+ /*
+ * Add two unpacked words as two payloads each containing a single
+ * unpacked word.
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 2,
+ &unpacked_payload_data[0],
+ sizeof(unpacked_payload_data[0]));
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 1,
+ &unpacked_payload_data[1],
+ sizeof(unpacked_payload_data[1]));
+
+ /* Add payload of packed data to the DSP memory after the unpacked words. */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type,
+ packed_payload_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (packed_payload_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache.
+ */
+ offset_num_regs = ((packed_payload_offset_dsp_words - 2) / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/*
+ * Write XM/YM data that is three words longer than a packed block multiple,
+ * and does not start on a packed alignment. Use three payloads of one unpacked
+ * word each, followed by a packed payload.
+ */
+static void wmfw_write_packed_3_single_unpacked_leading(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ int packed_mem_type = param->mem_type;
+ int unpacked_mem_type = cs_dsp_mock_packed_to_unpacked_mem_type(param->mem_type);
+ unsigned int dsp_words_per_packed_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, packed_mem_type);
+ unsigned int dsp_words_per_unpacked_block =
+ cs_dsp_mock_reg_block_length_dsp_words(priv, unpacked_mem_type);
+ unsigned int packed_payload_offset_dsp_words = 0;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ void *packed_payload_data, *readback;
+ u32 unpacked_payload_data[3];
+ unsigned int packed_payload_size_bytes;
+ unsigned int offset_num_regs;
+
+ packed_payload_size_bytes = param->num_blocks *
+ cs_dsp_mock_reg_block_length_bytes(priv, packed_mem_type);
+
+ packed_payload_data = kunit_kmalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, packed_payload_data);
+ get_random_bytes(packed_payload_data, packed_payload_size_bytes);
+
+ get_random_bytes(unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ readback = kunit_kzalloc(test, packed_payload_size_bytes, GFP_KERNEL);
+
+ /* Tests on XM must be after the XM header */
+ if (unpacked_mem_type == WMFW_ADSP2_XM)
+ packed_payload_offset_dsp_words += local->xm_header->blob_size_bytes /
+ sizeof(u32);
+ /*
+ * Leave space for two unaligned words before the packed block and
+ * round the packed block start to multiple of packed block length.
+ */
+ packed_payload_offset_dsp_words += 3;
+ packed_payload_offset_dsp_words = roundup(packed_payload_offset_dsp_words,
+ dsp_words_per_packed_block);
+
+ /*
+ * Add three unpacked words as three payloads each containing a single
+ * unpacked word.
+ */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 3,
+ &unpacked_payload_data[0],
+ sizeof(unpacked_payload_data[0]));
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 2,
+ &unpacked_payload_data[1],
+ sizeof(unpacked_payload_data[1]));
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ unpacked_mem_type,
+ packed_payload_offset_dsp_words - 1,
+ &unpacked_payload_data[2],
+ sizeof(unpacked_payload_data[2]));
+
+ /* Add payload of packed data to the DSP memory after the unpacked words. */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ packed_mem_type,
+ packed_payload_offset_dsp_words,
+ packed_payload_data, packed_payload_size_bytes);
+
+ /* Download the wmfw */
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ /*
+ * Check that the packed payload was written correctly and drop
+ * it from the regmap cache.
+ */
+ offset_num_regs = (packed_payload_offset_dsp_words / dsp_words_per_packed_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, packed_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, packed_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ packed_payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, packed_payload_data, packed_payload_size_bytes);
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, packed_payload_size_bytes);
+
+ /*
+ * Check that the unpacked words were written correctly and drop
+ * them from the regmap cache.
+ */
+ offset_num_regs = ((packed_payload_offset_dsp_words - 3) / dsp_words_per_unpacked_block) *
+ cs_dsp_mock_reg_block_length_registers(priv, unpacked_mem_type);
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, unpacked_mem_type);
+ reg_addr += offset_num_regs * regmap_get_reg_stride(priv->dsp->regmap);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback,
+ sizeof(unpacked_payload_data)),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, unpacked_payload_data, sizeof(unpacked_payload_data));
+
+ cs_dsp_mock_regmap_drop_bytes(priv, reg_addr, sizeof(unpacked_payload_data));
+
+ /* Drop expected writes and the cache should then be clean */
+ cs_dsp_mock_xm_header_drop_from_regmap_cache(priv);
+ KUNIT_EXPECT_FALSE(test, cs_dsp_mock_regmap_is_dirty(priv, true));
+}
+
+/* Load a wmfw containing multiple info blocks */
+static void wmfw_load_with_info(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ char *infobuf;
+ const unsigned int payload_size_bytes = 48;
+ int ret;
+
+ payload_data = kunit_kmalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+ get_random_bytes(payload_data, payload_size_bytes);
+
+ readback = kunit_kzalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Add a couple of info blocks at the start of the wmfw */
+ cs_dsp_mock_wmfw_add_info(local->wmfw_builder, "This is a timestamp");
+ cs_dsp_mock_wmfw_add_info(local->wmfw_builder, "This is some more info");
+
+ /* Add a single payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_YM, 0,
+ payload_data, payload_size_bytes);
+
+ /* Add a bigger info block then another small one*/
+ infobuf = kunit_kzalloc(test, 512, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, infobuf);
+
+ for (; strlcat(infobuf, "Waffle{Blah}\n", 512) < 512;)
+ ;
+
+ cs_dsp_mock_wmfw_add_info(local->wmfw_builder, infobuf);
+ cs_dsp_mock_wmfw_add_info(local->wmfw_builder, "Another block of info");
+
+ /* Add another payload */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_YM, 64,
+ payload_data, payload_size_bytes);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(priv->local->wmfw_builder);
+
+ ret = cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc");
+ KUNIT_EXPECT_EQ_MSG(test, ret, 0, "cs_dsp_power_up failed: %d\n", ret);
+
+ /* Check first payload was written */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+
+ /* Check second payload was written */
+ reg_addr += cs_dsp_mock_reg_addr_inc_per_unpacked_word(priv) * 64;
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+}
+
+static int cs_dsp_wmfw_test_common_init(struct kunit *test, struct cs_dsp *dsp,
+ int wmfw_version)
+{
+ struct cs_dsp_test *priv;
+ struct cs_dsp_test_local *local;
+ struct device *test_dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+ priv->local = local;
+ priv->local->wmfw_version = wmfw_version;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * There must always be a XM header with at least 1 algorithm, so create
+ * a dummy one that tests can use and extract it to a data payload.
+ */
+ local->xm_header = cs_dsp_create_mock_xm_header(priv,
+ cs_dsp_wmfw_test_mock_algs,
+ ARRAY_SIZE(cs_dsp_wmfw_test_mock_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->xm_header);
+
+ local->wmfw_builder = cs_dsp_mock_wmfw_init(priv, priv->local->wmfw_version);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->wmfw_builder);
+
+ /* Add dummy XM header payload to wmfw */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ /* Init cs_dsp */
+ dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops);
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_wmfw_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_wmfw_test_common_init(test, dsp, 3);
+}
+
+static int cs_dsp_wmfw_test_adsp2_32bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 1;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_wmfw_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_wmfw_test_adsp2_32bit_wmfw0_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_test_adsp2_32bit_init(test, 0);
+}
+
+static int cs_dsp_wmfw_test_adsp2_32bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_test_adsp2_32bit_init(test, 1);
+}
+
+static int cs_dsp_wmfw_test_adsp2_32bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_test_adsp2_32bit_init(test, 2);
+}
+
+static int cs_dsp_wmfw_test_adsp2_16bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_wmfw_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_wmfw_test_adsp2_16bit_wmfw0_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_test_adsp2_16bit_init(test, 0);
+}
+
+static int cs_dsp_wmfw_test_adsp2_16bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_test_adsp2_16bit_init(test, 1);
+}
+
+static int cs_dsp_wmfw_test_adsp2_16bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_test_adsp2_16bit_init(test, 2);
+}
+
+static void cs_dsp_mem_param_desc(const struct cs_dsp_wmfw_test_param *param, char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s num_blocks:%u",
+ cs_dsp_mem_region_name(param->mem_type),
+ param->num_blocks);
+}
+
+static const struct cs_dsp_wmfw_test_param adsp2_all_num_blocks_param_cases[] = {
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 1 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 2 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 3 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 4 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 5 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 6 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 12 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 13 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 14 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 15 },
+ { .mem_type = WMFW_ADSP2_PM, .num_blocks = 16 },
+
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 1 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 2 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 3 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 4 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 5 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 6 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 12 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 13 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 14 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 15 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 16 },
+
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 1 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 2 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 3 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 4 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 5 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 6 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 12 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 13 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 14 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 15 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 16 },
+
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 1 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 2 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 3 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 4 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 5 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 6 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 12 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 13 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 14 },
+ { .mem_type = WMFW_ADSP2_ZM, .num_blocks = 15 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 16 },
+};
+
+KUNIT_ARRAY_PARAM(adsp2_all_num_blocks,
+ adsp2_all_num_blocks_param_cases,
+ cs_dsp_mem_param_desc);
+
+static const struct cs_dsp_wmfw_test_param halo_all_num_blocks_param_cases[] = {
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 1 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 2 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 3 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 4 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 5 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 6 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 12 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 13 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 14 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 15 },
+ { .mem_type = WMFW_HALO_PM_PACKED, .num_blocks = 16 },
+
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 1 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 2 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 3 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 4 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 5 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 6 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 12 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 13 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 14 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 15 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 16 },
+
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 1 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 2 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 3 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 4 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 5 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 6 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 12 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 13 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 14 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 15 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 16 },
+
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 1 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 2 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 3 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 4 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 5 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 6 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 12 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 13 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 14 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 15 },
+ { .mem_type = WMFW_ADSP2_XM, .num_blocks = 16 },
+
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 1 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 2 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 3 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 4 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 5 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 6 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 12 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 13 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 14 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 15 },
+ { .mem_type = WMFW_ADSP2_YM, .num_blocks = 16 },
+};
+
+KUNIT_ARRAY_PARAM(halo_all_num_blocks,
+ halo_all_num_blocks_param_cases,
+ cs_dsp_mem_param_desc);
+
+static const struct cs_dsp_wmfw_test_param packed_xy_num_blocks_param_cases[] = {
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 1 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 2 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 3 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 4 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 5 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 6 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 12 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 13 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 14 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 15 },
+ { .mem_type = WMFW_HALO_XM_PACKED, .num_blocks = 16 },
+
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 1 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 2 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 3 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 4 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 5 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 6 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 12 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 13 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 14 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 15 },
+ { .mem_type = WMFW_HALO_YM_PACKED, .num_blocks = 16 },
+};
+
+KUNIT_ARRAY_PARAM(packed_xy_num_blocks,
+ packed_xy_num_blocks_param_cases,
+ cs_dsp_mem_param_desc);
+
+static struct kunit_case cs_dsp_wmfw_test_cases_halo[] = {
+ KUNIT_CASE(wmfw_write_xm_header_unpacked),
+
+ KUNIT_CASE_PARAM(wmfw_write_one_payload,
+ halo_all_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_multiple_oneblock_payloads,
+ halo_all_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_multiple_oneblock_payloads_reverse,
+ halo_all_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_multiple_payloads_sparse_unordered,
+ halo_all_num_blocks_gen_params),
+
+ KUNIT_CASE(wmfw_write_all_packed_pm),
+ KUNIT_CASE(wmfw_write_multiple_packed_unpacked_mem),
+
+ KUNIT_CASE_PARAM(wmfw_write_packed_1_unpacked_trailing,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_2_unpacked_trailing,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_3_unpacked_trailing,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_2_single_unpacked_trailing,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_3_single_unpacked_trailing,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_1_unpacked_leading,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_2_unpacked_leading,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_3_unpacked_leading,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_2_single_unpacked_leading,
+ packed_xy_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_packed_3_single_unpacked_leading,
+ packed_xy_num_blocks_gen_params),
+
+ KUNIT_CASE(wmfw_load_with_info),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_wmfw_test_cases_adsp2[] = {
+ KUNIT_CASE(wmfw_write_xm_header_unpacked),
+ KUNIT_CASE_PARAM(wmfw_write_one_payload,
+ adsp2_all_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_multiple_oneblock_payloads,
+ adsp2_all_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_multiple_oneblock_payloads_reverse,
+ adsp2_all_num_blocks_gen_params),
+ KUNIT_CASE_PARAM(wmfw_write_multiple_payloads_sparse_unordered,
+ adsp2_all_num_blocks_gen_params),
+
+ KUNIT_CASE(wmfw_write_all_unpacked_pm),
+ KUNIT_CASE(wmfw_write_multiple_unpacked_mem),
+
+ KUNIT_CASE(wmfw_load_with_info),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_wmfw_test_halo = {
+ .name = "cs_dsp_wmfwV3_halo",
+ .init = cs_dsp_wmfw_test_halo_init,
+ .test_cases = cs_dsp_wmfw_test_cases_halo,
+};
+
+static struct kunit_suite cs_dsp_wmfw_test_adsp2_32bit_wmfw0 = {
+ .name = "cs_dsp_wmfwV0_adsp2_32bit",
+ .init = cs_dsp_wmfw_test_adsp2_32bit_wmfw0_init,
+ .test_cases = cs_dsp_wmfw_test_cases_adsp2,
+};
+
+static struct kunit_suite cs_dsp_wmfw_test_adsp2_32bit_wmfw1 = {
+ .name = "cs_dsp_wmfwV1_adsp2_32bit",
+ .init = cs_dsp_wmfw_test_adsp2_32bit_wmfw1_init,
+ .test_cases = cs_dsp_wmfw_test_cases_adsp2,
+};
+
+static struct kunit_suite cs_dsp_wmfw_test_adsp2_32bit_wmfw2 = {
+ .name = "cs_dsp_wmfwV2_adsp2_32bit",
+ .init = cs_dsp_wmfw_test_adsp2_32bit_wmfw2_init,
+ .test_cases = cs_dsp_wmfw_test_cases_adsp2,
+};
+
+static struct kunit_suite cs_dsp_wmfw_test_adsp2_16bit_wmfw0 = {
+ .name = "cs_dsp_wmfwV0_adsp2_16bit",
+ .init = cs_dsp_wmfw_test_adsp2_16bit_wmfw0_init,
+ .test_cases = cs_dsp_wmfw_test_cases_adsp2,
+};
+
+static struct kunit_suite cs_dsp_wmfw_test_adsp2_16bit_wmfw1 = {
+ .name = "cs_dsp_wmfwV1_adsp2_16bit",
+ .init = cs_dsp_wmfw_test_adsp2_16bit_wmfw1_init,
+ .test_cases = cs_dsp_wmfw_test_cases_adsp2,
+};
+
+static struct kunit_suite cs_dsp_wmfw_test_adsp2_16bit_wmfw2 = {
+ .name = "cs_dsp_wmfwV2_adsp2_16bit",
+ .init = cs_dsp_wmfw_test_adsp2_16bit_wmfw2_init,
+ .test_cases = cs_dsp_wmfw_test_cases_adsp2,
+};
+
+kunit_test_suites(&cs_dsp_wmfw_test_halo,
+ &cs_dsp_wmfw_test_adsp2_32bit_wmfw0,
+ &cs_dsp_wmfw_test_adsp2_32bit_wmfw1,
+ &cs_dsp_wmfw_test_adsp2_32bit_wmfw2,
+ &cs_dsp_wmfw_test_adsp2_16bit_wmfw0,
+ &cs_dsp_wmfw_test_adsp2_16bit_wmfw1,
+ &cs_dsp_wmfw_test_adsp2_16bit_wmfw2);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_wmfw_error.c b/drivers/firmware/cirrus/test/cs_dsp_test_wmfw_error.c
new file mode 100644
index 000000000000..c309843261d7
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_wmfw_error.c
@@ -0,0 +1,1347 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// KUnit tests for cs_dsp.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+//
+
+#include <kunit/device.h>
+#include <kunit/resource.h>
+#include <kunit/test.h>
+#include <linux/build_bug.h>
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/cs_dsp_test_utils.h>
+#include <linux/firmware/cirrus/wmfw.h>
+#include <linux/random.h>
+#include <linux/regmap.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+KUNIT_DEFINE_ACTION_WRAPPER(_put_device_wrapper, put_device, struct device *);
+KUNIT_DEFINE_ACTION_WRAPPER(_cs_dsp_remove_wrapper, cs_dsp_remove, struct cs_dsp *);
+
+struct cs_dsp_test_local {
+ struct cs_dsp_mock_xm_header *xm_header;
+ struct cs_dsp_mock_wmfw_builder *wmfw_builder;
+ int wmfw_version;
+};
+
+struct cs_dsp_wmfw_test_param {
+ int block_type;
+};
+
+static const struct cs_dsp_mock_alg_def cs_dsp_wmfw_err_test_mock_algs[] = {
+ {
+ .id = 0xfafa,
+ .ver = 0x100000,
+ .xm_size_words = 164,
+ .ym_size_words = 164,
+ .zm_size_words = 164,
+ },
+};
+
+static const struct cs_dsp_mock_coeff_def mock_coeff_template = {
+ .shortname = "Dummy Coeff",
+ .type = WMFW_CTL_TYPE_BYTES,
+ .mem_type = WMFW_ADSP2_YM,
+ .flags = WMFW_CTL_FLAG_VOLATILE,
+ .length_bytes = 4,
+};
+
+/* Load a wmfw containing unknown blocks. They should be skipped. */
+static void wmfw_load_with_unknown_blocks(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int reg_addr;
+ u8 *payload_data, *readback;
+ u8 random_data[8];
+ const unsigned int payload_size_bytes = 64;
+
+ /* Add dummy XM header payload to wmfw */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_XM, 0,
+ local->xm_header->blob_data,
+ local->xm_header->blob_size_bytes);
+
+ payload_data = kunit_kmalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, payload_data);
+ get_random_bytes(payload_data, payload_size_bytes);
+
+ readback = kunit_kzalloc(test, payload_size_bytes, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, readback);
+
+ /* Add some unknown blocks at the start of the wmfw */
+ get_random_bytes(random_data, sizeof(random_data));
+ cs_dsp_mock_wmfw_add_raw_block(local->wmfw_builder, 0xf5, 0,
+ random_data, sizeof(random_data));
+ cs_dsp_mock_wmfw_add_raw_block(local->wmfw_builder, 0xc0, 0, random_data,
+ sizeof(random_data));
+ cs_dsp_mock_wmfw_add_raw_block(local->wmfw_builder, 0x33, 0, NULL, 0);
+
+ /* Add a single payload to be written to DSP memory */
+ cs_dsp_mock_wmfw_add_data_block(local->wmfw_builder,
+ WMFW_ADSP2_YM, 0,
+ payload_data, payload_size_bytes);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ /* Check that the payload was written to memory */
+ reg_addr = cs_dsp_mock_base_addr_for_mem(priv, WMFW_ADSP2_YM);
+ KUNIT_EXPECT_EQ(test,
+ regmap_raw_read(priv->dsp->regmap, reg_addr, readback, payload_size_bytes),
+ 0);
+ KUNIT_EXPECT_MEMEQ(test, readback, payload_data, payload_size_bytes);
+}
+
+/* Load a wmfw that doesn't have a valid magic marker. */
+static void wmfw_err_wrong_magic(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ memcpy((void *)wmfw->data, "WMDR", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ memcpy((void *)wmfw->data, "xMFW", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ memcpy((void *)wmfw->data, "WxFW", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ memcpy((void *)wmfw->data, "WMxW", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ memcpy((void *)wmfw->data, "WMFx", 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ memset((void *)wmfw->data, 0, 4);
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+}
+
+/* Load a wmfw that is too short for a valid header. */
+static void wmfw_err_too_short_for_header(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ do {
+ wmfw->size--;
+
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+ } while (wmfw->size > 0);
+}
+
+/* Header length field isn't a valid header length. */
+static void wmfw_err_bad_header_length(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ unsigned int real_len, len;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ real_len = le32_to_cpu(header->len);
+
+ for (len = 0; len < real_len; len++) {
+ header->len = cpu_to_le32(len);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+ }
+
+ for (len = real_len + 1; len < real_len + 7; len++) {
+ header->len = cpu_to_le32(len);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+ }
+
+ header->len = cpu_to_le32(0xffffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ header->len = cpu_to_le32(0x80000000);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ header->len = cpu_to_le32(0x7fffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* Wrong core type in header. */
+static void wmfw_err_bad_core_type(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+
+ header->core = 0;
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ header->core = 1;
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ header->core = priv->dsp->type + 1;
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+
+ header->core = 0xff;
+ KUNIT_EXPECT_LT(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+}
+
+/* File too short to contain a full block header */
+static void wmfw_too_short_for_block_header(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int header_length;
+ u32 dummy_payload = 0;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ header_length = wmfw->size;
+ kunit_kfree(test, wmfw);
+
+ /* Add the block. A block must have at least 4 bytes of payload */
+ cs_dsp_mock_wmfw_add_raw_block(local->wmfw_builder, param->block_type, 0,
+ &dummy_payload, sizeof(dummy_payload));
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_ASSERT_GT(test, wmfw->size, header_length);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ for (wmfw->size--; wmfw->size > header_length; wmfw->size--) {
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+ }
+}
+
+/* File too short to contain the block payload */
+static void wmfw_too_short_for_block_payload(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ static const u8 payload[256] = { };
+ int i;
+
+ cs_dsp_mock_wmfw_add_raw_block(local->wmfw_builder, param->block_type, 0,
+ payload, sizeof(payload));
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ for (i = 0; i < sizeof(payload); i++) {
+ wmfw->size--;
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+ }
+}
+
+/* Block payload length is a garbage value */
+static void wmfw_block_payload_len_garbage(struct kunit *test)
+{
+ const struct cs_dsp_wmfw_test_param *param = test->param_value;
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ u32 payload = 0;
+
+
+ cs_dsp_mock_wmfw_add_raw_block(local->wmfw_builder, param->block_type, 0,
+ &payload, sizeof(payload));
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+
+ /* Sanity check that we're looking at the correct part of the wmfw */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(region->offset) >> 24, param->block_type);
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(region->len), sizeof(payload));
+
+ region->len = cpu_to_le32(0x8000);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ region->len = cpu_to_le32(0xffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ region->len = cpu_to_le32(0x7fffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ region->len = cpu_to_le32(0x80000000);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ region->len = cpu_to_le32(0xffffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* File too short to contain an algorithm header */
+static void wmfw_too_short_for_alg_header(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ unsigned int header_length;
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ header_length = wmfw->size;
+ kunit_kfree(test, wmfw);
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ NULL, NULL);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+ KUNIT_ASSERT_GT(test, wmfw->size, header_length);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ for (wmfw->size--; wmfw->size > header_length; wmfw->size--) {
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+ }
+}
+
+/* V1 algorithm name does not have NUL terminator */
+static void wmfw_v1_alg_name_unterminated(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ struct wmfw_adsp_alg_data *alg_data;
+ struct cs_dsp_coeff_ctl *ctl;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (struct wmfw_adsp_alg_data *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data->id), cs_dsp_wmfw_err_test_mock_algs[0].id);
+
+ /* Write a string to the alg name that overflows the array */
+ memset(alg_data->descr, 0, sizeof(alg_data->descr));
+ memset(alg_data->name, 'A', sizeof(alg_data->name));
+ memset(alg_data->descr, 'A', sizeof(alg_data->descr) - 1);
+
+ /*
+ * Sanity-check that a strlen would overflow alg_data->name.
+ * FORTIFY_STRING obstructs testing what strlen() would actually
+ * return, so instead verify that a strnlen() returns
+ * sizeof(alg_data->name[]), therefore it doesn't have a NUL.
+ */
+ KUNIT_ASSERT_EQ(test, strnlen(alg_data->name, sizeof(alg_data->name)),
+ sizeof(alg_data->name));
+
+ /*
+ * The alg name isn't stored, but cs_dsp parses the name field.
+ * It should load the file successfully and create the control.
+ * If FORTIFY_STRING is enabled it will detect a buffer overflow
+ * if cs_dsp string length walks past end of alg name array.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 0);
+}
+
+/* V2+ algorithm name exceeds length of containing block */
+static void wmfw_v2_alg_name_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ __le32 *alg_data;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", NULL);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (__force __le32 *)region->data;
+
+ /*
+ * Sanity check we're pointing at the alg header of
+ * [ alg_id ][name_len]abc
+ */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id);
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[1]), 3 | ('a' << 8) | ('b' << 16) | ('c' << 24));
+ KUNIT_ASSERT_EQ(test, *(u8 *)&alg_data[1], 3);
+
+ /* Set name string length longer than available space */
+ *(u8 *)&alg_data[1] = 4;
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(u8 *)&alg_data[1] = 7;
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(u8 *)&alg_data[1] = 0x80;
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(u8 *)&alg_data[1] = 0xff;
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* V2+ algorithm description exceeds length of containing block */
+static void wmfw_v2_alg_description_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ __le32 *alg_data;
+
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (__force __le32 *)region->data;
+
+ /*
+ * Sanity check we're pointing at the alg header of
+ * [ alg_id ][name_len]abc[desc_len]de
+ */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id);
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[2]), 2 | ('d' << 16) | ('e' << 24));
+ KUNIT_ASSERT_EQ(test, le16_to_cpu(*(__le16 *)&alg_data[2]), 2);
+
+ /* Set name string length longer than available space */
+ *(__le16 *)&alg_data[2] = cpu_to_le16(4);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(__le16 *)&alg_data[2] = cpu_to_le16(7);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(__le16 *)&alg_data[2] = cpu_to_le16(0x80);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(__le16 *)&alg_data[2] = cpu_to_le16(0xff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(__le16 *)&alg_data[2] = cpu_to_le16(0x8000);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *(__le16 *)&alg_data[2] = cpu_to_le16(0xffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* V1 coefficient count exceeds length of containing block */
+static void wmfw_v1_coeff_count_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ struct wmfw_adsp_alg_data *alg_data;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (struct wmfw_adsp_alg_data *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data->id), cs_dsp_wmfw_err_test_mock_algs[0].id);
+
+ /* Add one to the coefficient count */
+ alg_data->ncoeff = cpu_to_le32(le32_to_cpu(alg_data->ncoeff) + 1);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ /* Make the coefficient count garbage */
+ alg_data->ncoeff = cpu_to_le32(0xffffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ alg_data->ncoeff = cpu_to_le32(0x7fffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ alg_data->ncoeff = cpu_to_le32(0x80000000);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* V2+ coefficient count exceeds length of containing block */
+static void wmfw_v2_coeff_count_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ __le32 *alg_data, *ncoeff;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (__force __le32 *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id);
+
+ ncoeff = (__force __le32 *)&alg_data[3];
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(*ncoeff), 1);
+
+ /* Add one to the coefficient count */
+ *ncoeff = cpu_to_le32(2);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ /* Make the coefficient count garbage */
+ *ncoeff = cpu_to_le32(0xffffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *ncoeff = cpu_to_le32(0x7fffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ *ncoeff = cpu_to_le32(0x80000000);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* V2+ coefficient block size exceeds length of containing block */
+static void wmfw_v2_coeff_block_size_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ __le32 *alg_data, *coeff;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (__force __le32 *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id);
+
+ /* Sanity check we're pointing at the coeff block */
+ coeff = (__force __le32 *)&alg_data[4];
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(coeff[0]), mock_coeff_template.mem_type << 16);
+
+ /* Add one to the block size */
+ coeff[1] = cpu_to_le32(le32_to_cpu(coeff[1]) + 1);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ /* Make the block size garbage */
+ coeff[1] = cpu_to_le32(0xffffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ coeff[1] = cpu_to_le32(0x7fffffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ coeff[1] = cpu_to_le32(0x80000000);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* V1 coeff name does not have NUL terminator */
+static void wmfw_v1_coeff_name_unterminated(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ struct wmfw_adsp_alg_data *alg_data;
+ struct wmfw_adsp_coeff_data *coeff;
+ struct cs_dsp_coeff_ctl *ctl;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (struct wmfw_adsp_alg_data *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data->id), cs_dsp_wmfw_err_test_mock_algs[0].id);
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data->ncoeff), 1);
+
+ coeff = (void *)alg_data->data;
+
+ /* Write a string to the coeff name that overflows the array */
+ memset(coeff->descr, 0, sizeof(coeff->descr));
+ memset(coeff->name, 'A', sizeof(coeff->name));
+ memset(coeff->descr, 'A', sizeof(coeff->descr) - 1);
+
+ /*
+ * Sanity-check that a strlen would overflow coeff->name.
+ * FORTIFY_STRING obstructs testing what strlen() would actually
+ * return, so instead verify that a strnlen() returns
+ * sizeof(coeff->name[]), therefore it doesn't have a NUL.
+ */
+ KUNIT_ASSERT_EQ(test, strnlen(coeff->name, sizeof(coeff->name)),
+ sizeof(coeff->name));
+
+ /*
+ * V1 controls do not have names, but cs_dsp parses the name
+ * field. It should load the file successfully and create the
+ * control.
+ * If FORTIFY_STRING is enabled it will detect a buffer overflow
+ * if cs_dsp string length walks past end of coeff name array.
+ */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ 0);
+ ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+ KUNIT_ASSERT_NOT_NULL(test, ctl);
+ KUNIT_EXPECT_EQ(test, ctl->subname_len, 0);
+}
+
+/* V2+ coefficient shortname exceeds length of coeff block */
+static void wmfw_v2_coeff_shortname_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ __le32 *alg_data, *coeff;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (__force __le32 *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id);
+
+ /* Sanity check we're pointing at the coeff block */
+ coeff = (__force __le32 *)&alg_data[4];
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(coeff[0]), mock_coeff_template.mem_type << 16);
+
+ /* Add one to the shortname length */
+ coeff[2] = cpu_to_le32(le32_to_cpu(coeff[2]) + 1);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ /* Maximum shortname length */
+ coeff[2] = cpu_to_le32(255);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* V2+ coefficient fullname exceeds length of coeff block */
+static void wmfw_v2_coeff_fullname_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ __le32 *alg_data, *coeff, *fullname;
+ size_t shortlen;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (__force __le32 *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id);
+
+ /* Sanity check we're pointing at the coeff block */
+ coeff = (__force __le32 *)&alg_data[4];
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(coeff[0]), mock_coeff_template.mem_type << 16);
+
+ /* Fullname follows the shortname rounded up to a __le32 boundary */
+ shortlen = round_up(le32_to_cpu(coeff[2]) & 0xff, sizeof(__le32));
+ fullname = &coeff[2] + (shortlen / sizeof(*coeff));
+
+ /* Fullname increases in blocks of __le32 so increase past the current __le32 */
+ fullname[0] = cpu_to_le32(round_up(le32_to_cpu(fullname[0]) + 1, sizeof(__le32)));
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ /* Maximum fullname length */
+ fullname[0] = cpu_to_le32(255);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+/* V2+ coefficient description exceeds length of coeff block */
+static void wmfw_v2_coeff_description_exceeds_block(struct kunit *test)
+{
+ struct cs_dsp_test *priv = test->priv;
+ struct cs_dsp_test_local *local = priv->local;
+ struct firmware *wmfw;
+ struct wmfw_header *header;
+ struct wmfw_region *region;
+ __le32 *alg_data, *coeff, *fullname, *description;
+ size_t namelen;
+
+ /* Create alg info block with a coefficient */
+ cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
+ cs_dsp_wmfw_err_test_mock_algs[0].id,
+ "abc", "de");
+ cs_dsp_mock_wmfw_add_coeff_desc(local->wmfw_builder, &mock_coeff_template);
+ cs_dsp_mock_wmfw_end_alg_info_block(local->wmfw_builder);
+
+ wmfw = cs_dsp_mock_wmfw_get_firmware(local->wmfw_builder);
+
+ /* Sanity-check that the good wmfw loads ok */
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "wmfw", NULL, NULL, "misc"),
+ 0);
+ cs_dsp_power_down(priv->dsp);
+
+ header = (struct wmfw_header *)wmfw->data;
+ region = (struct wmfw_region *)&wmfw->data[le32_to_cpu(header->len)];
+ alg_data = (__force __le32 *)region->data;
+
+ /* Sanity check we're pointing at the alg header */
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(alg_data[0]), cs_dsp_wmfw_err_test_mock_algs[0].id);
+
+ /* Sanity check we're pointing at the coeff block */
+ coeff = (__force __le32 *)&alg_data[4];
+ KUNIT_ASSERT_EQ(test, le32_to_cpu(coeff[0]), mock_coeff_template.mem_type << 16);
+
+ /* Description follows the shortname and fullname rounded up to __le32 boundaries */
+ namelen = round_up(le32_to_cpu(coeff[2]) & 0xff, sizeof(__le32));
+ fullname = &coeff[2] + (namelen / sizeof(*coeff));
+ namelen = round_up(le32_to_cpu(fullname[0]) & 0xff, sizeof(__le32));
+ description = fullname + (namelen / sizeof(*fullname));
+
+ /* Description increases in blocks of __le32 so increase past the current __le32 */
+ description[0] = cpu_to_le32(round_up(le32_to_cpu(fullname[0]) + 1, sizeof(__le32)));
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+
+ /* Maximum description length */
+ fullname[0] = cpu_to_le32(0xffff);
+ KUNIT_EXPECT_EQ(test,
+ cs_dsp_power_up(priv->dsp, wmfw, "mock_wmfw", NULL, NULL, "misc"),
+ -EOVERFLOW);
+}
+
+static void cs_dsp_wmfw_err_test_exit(struct kunit *test)
+{
+ /*
+ * Testing error conditions can produce a lot of log output
+ * from cs_dsp error messages, so rate limit the test cases.
+ */
+ usleep_range(200, 500);
+}
+
+static int cs_dsp_wmfw_err_test_common_init(struct kunit *test, struct cs_dsp *dsp,
+ int wmfw_version)
+{
+ struct cs_dsp_test *priv;
+ struct cs_dsp_test_local *local;
+ struct device *test_dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ local = kunit_kzalloc(test, sizeof(struct cs_dsp_test_local), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ priv->test = test;
+ priv->dsp = dsp;
+ test->priv = priv;
+ priv->local = local;
+ local->wmfw_version = wmfw_version;
+
+ /* Create dummy struct device */
+ test_dev = kunit_device_register(test, "cs_dsp_test_drv");
+ if (IS_ERR(test_dev))
+ return PTR_ERR(test_dev);
+
+ dsp->dev = get_device(test_dev);
+ if (!dsp->dev)
+ return -ENODEV;
+
+ ret = kunit_add_action_or_reset(test, _put_device_wrapper, dsp->dev);
+ if (ret)
+ return ret;
+
+ dev_set_drvdata(dsp->dev, priv);
+
+ /* Allocate regmap */
+ ret = cs_dsp_mock_regmap_init(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * There must always be a XM header with at least 1 algorithm,
+ * so create a dummy one and pre-populate XM so the wmfw doesn't
+ * have to contain an XM blob.
+ */
+ local->xm_header = cs_dsp_create_mock_xm_header(priv,
+ cs_dsp_wmfw_err_test_mock_algs,
+ ARRAY_SIZE(cs_dsp_wmfw_err_test_mock_algs));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->xm_header);
+ cs_dsp_mock_xm_header_write_to_regmap(local->xm_header);
+
+ local->wmfw_builder = cs_dsp_mock_wmfw_init(priv, local->wmfw_version);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, local->wmfw_builder);
+
+ /* Init cs_dsp */
+ dsp->client_ops = kunit_kzalloc(test, sizeof(*dsp->client_ops), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dsp->client_ops);
+
+ switch (dsp->type) {
+ case WMFW_ADSP2:
+ ret = cs_dsp_adsp2_init(dsp);
+ break;
+ case WMFW_HALO:
+ ret = cs_dsp_halo_init(dsp);
+ break;
+ default:
+ KUNIT_FAIL(test, "Untested DSP type %d\n", dsp->type);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Automatically call cs_dsp_remove() when test case ends */
+ return kunit_add_action_or_reset(priv->test, _cs_dsp_remove_wrapper, dsp);
+}
+
+static int cs_dsp_wmfw_err_test_halo_init(struct kunit *test)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_HALO;
+ dsp->mem = cs_dsp_mock_halo_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_halo_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_halo_core_base;
+ dsp->base_sysinfo = cs_dsp_mock_halo_sysinfo_base;
+
+ return cs_dsp_wmfw_err_test_common_init(test, dsp, 3);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_32bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 1;
+ dsp->mem = cs_dsp_mock_adsp2_32bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_32bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_32bit_sysbase;
+
+ return cs_dsp_wmfw_err_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_32bit_wmfw0_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_err_test_adsp2_32bit_init(test, 0);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_32bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_err_test_adsp2_32bit_init(test, 1);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_32bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_err_test_adsp2_32bit_init(test, 2);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_16bit_init(struct kunit *test, int wmfw_ver)
+{
+ struct cs_dsp *dsp;
+
+ /* Fill in cs_dsp and initialize */
+ dsp = kunit_kzalloc(test, sizeof(*dsp), GFP_KERNEL);
+ if (!dsp)
+ return -ENOMEM;
+
+ dsp->num = 1;
+ dsp->type = WMFW_ADSP2;
+ dsp->rev = 0;
+ dsp->mem = cs_dsp_mock_adsp2_16bit_dsp1_regions;
+ dsp->num_mems = cs_dsp_mock_count_regions(cs_dsp_mock_adsp2_16bit_dsp1_region_sizes);
+ dsp->base = cs_dsp_mock_adsp2_16bit_sysbase;
+
+ return cs_dsp_wmfw_err_test_common_init(test, dsp, wmfw_ver);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_16bit_wmfw0_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_err_test_adsp2_16bit_init(test, 0);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_16bit_wmfw1_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_err_test_adsp2_16bit_init(test, 1);
+}
+
+static int cs_dsp_wmfw_err_test_adsp2_16bit_wmfw2_init(struct kunit *test)
+{
+ return cs_dsp_wmfw_err_test_adsp2_16bit_init(test, 2);
+}
+
+static void cs_dsp_wmfw_err_block_types_desc(const struct cs_dsp_wmfw_test_param *param,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "block_type:%#x", param->block_type);
+}
+
+static const struct cs_dsp_wmfw_test_param wmfw_valid_block_types_adsp2_cases[] = {
+ { .block_type = WMFW_INFO_TEXT },
+ { .block_type = WMFW_ADSP2_PM },
+ { .block_type = WMFW_ADSP2_YM },
+};
+
+KUNIT_ARRAY_PARAM(wmfw_valid_block_types_adsp2,
+ wmfw_valid_block_types_adsp2_cases,
+ cs_dsp_wmfw_err_block_types_desc);
+
+static const struct cs_dsp_wmfw_test_param wmfw_valid_block_types_halo_cases[] = {
+ { .block_type = WMFW_INFO_TEXT },
+ { .block_type = WMFW_HALO_PM_PACKED },
+ { .block_type = WMFW_ADSP2_YM },
+};
+
+KUNIT_ARRAY_PARAM(wmfw_valid_block_types_halo,
+ wmfw_valid_block_types_halo_cases,
+ cs_dsp_wmfw_err_block_types_desc);
+
+static const struct cs_dsp_wmfw_test_param wmfw_invalid_block_types_cases[] = {
+ { .block_type = 0x33 },
+ { .block_type = 0xf5 },
+ { .block_type = 0xc0 },
+};
+
+KUNIT_ARRAY_PARAM(wmfw_invalid_block_types,
+ wmfw_invalid_block_types_cases,
+ cs_dsp_wmfw_err_block_types_desc);
+
+static struct kunit_case cs_dsp_wmfw_err_test_cases_v0[] = {
+ KUNIT_CASE(wmfw_load_with_unknown_blocks),
+ KUNIT_CASE(wmfw_err_wrong_magic),
+ KUNIT_CASE(wmfw_err_too_short_for_header),
+ KUNIT_CASE(wmfw_err_bad_header_length),
+ KUNIT_CASE(wmfw_err_bad_core_type),
+
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_payload, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_block_payload_len_garbage, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_wmfw_err_test_cases_v1[] = {
+ KUNIT_CASE(wmfw_load_with_unknown_blocks),
+ KUNIT_CASE(wmfw_err_wrong_magic),
+ KUNIT_CASE(wmfw_err_too_short_for_header),
+ KUNIT_CASE(wmfw_err_bad_header_length),
+ KUNIT_CASE(wmfw_err_bad_core_type),
+
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_payload, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_block_payload_len_garbage, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+
+ KUNIT_CASE(wmfw_too_short_for_alg_header),
+ KUNIT_CASE(wmfw_v1_alg_name_unterminated),
+ KUNIT_CASE(wmfw_v1_coeff_count_exceeds_block),
+ KUNIT_CASE(wmfw_v1_coeff_name_unterminated),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_wmfw_err_test_cases_v2[] = {
+ KUNIT_CASE(wmfw_load_with_unknown_blocks),
+ KUNIT_CASE(wmfw_err_wrong_magic),
+ KUNIT_CASE(wmfw_err_too_short_for_header),
+ KUNIT_CASE(wmfw_err_bad_header_length),
+ KUNIT_CASE(wmfw_err_bad_core_type),
+
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_payload, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_block_payload_len_garbage, wmfw_valid_block_types_adsp2_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+
+ KUNIT_CASE(wmfw_too_short_for_alg_header),
+ KUNIT_CASE(wmfw_v2_alg_name_exceeds_block),
+ KUNIT_CASE(wmfw_v2_alg_description_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_count_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_block_size_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_shortname_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_fullname_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_description_exceeds_block),
+
+ { } /* terminator */
+};
+
+static struct kunit_case cs_dsp_wmfw_err_test_cases_v3[] = {
+ KUNIT_CASE(wmfw_load_with_unknown_blocks),
+ KUNIT_CASE(wmfw_err_wrong_magic),
+ KUNIT_CASE(wmfw_err_too_short_for_header),
+ KUNIT_CASE(wmfw_err_bad_header_length),
+ KUNIT_CASE(wmfw_err_bad_core_type),
+
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_valid_block_types_halo_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_payload, wmfw_valid_block_types_halo_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+ KUNIT_CASE_PARAM(wmfw_block_payload_len_garbage, wmfw_valid_block_types_halo_gen_params),
+ KUNIT_CASE_PARAM(wmfw_too_short_for_block_header, wmfw_invalid_block_types_gen_params),
+
+ KUNIT_CASE(wmfw_too_short_for_alg_header),
+ KUNIT_CASE(wmfw_v2_alg_name_exceeds_block),
+ KUNIT_CASE(wmfw_v2_alg_description_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_count_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_block_size_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_shortname_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_fullname_exceeds_block),
+ KUNIT_CASE(wmfw_v2_coeff_description_exceeds_block),
+
+ { } /* terminator */
+};
+
+static struct kunit_suite cs_dsp_wmfw_err_test_halo = {
+ .name = "cs_dsp_wmfwV3_err_halo",
+ .init = cs_dsp_wmfw_err_test_halo_init,
+ .exit = cs_dsp_wmfw_err_test_exit,
+ .test_cases = cs_dsp_wmfw_err_test_cases_v3,
+};
+
+static struct kunit_suite cs_dsp_wmfw_err_test_adsp2_32bit_wmfw0 = {
+ .name = "cs_dsp_wmfwV0_err_adsp2_32bit",
+ .init = cs_dsp_wmfw_err_test_adsp2_32bit_wmfw0_init,
+ .exit = cs_dsp_wmfw_err_test_exit,
+ .test_cases = cs_dsp_wmfw_err_test_cases_v0,
+};
+
+static struct kunit_suite cs_dsp_wmfw_err_test_adsp2_32bit_wmfw1 = {
+ .name = "cs_dsp_wmfwV1_err_adsp2_32bit",
+ .init = cs_dsp_wmfw_err_test_adsp2_32bit_wmfw1_init,
+ .exit = cs_dsp_wmfw_err_test_exit,
+ .test_cases = cs_dsp_wmfw_err_test_cases_v1,
+};
+
+static struct kunit_suite cs_dsp_wmfw_err_test_adsp2_32bit_wmfw2 = {
+ .name = "cs_dsp_wmfwV2_err_adsp2_32bit",
+ .init = cs_dsp_wmfw_err_test_adsp2_32bit_wmfw2_init,
+ .exit = cs_dsp_wmfw_err_test_exit,
+ .test_cases = cs_dsp_wmfw_err_test_cases_v2,
+};
+
+static struct kunit_suite cs_dsp_wmfw_err_test_adsp2_16bit_wmfw0 = {
+ .name = "cs_dsp_wmfwV0_err_adsp2_16bit",
+ .init = cs_dsp_wmfw_err_test_adsp2_16bit_wmfw0_init,
+ .exit = cs_dsp_wmfw_err_test_exit,
+ .test_cases = cs_dsp_wmfw_err_test_cases_v0,
+};
+
+static struct kunit_suite cs_dsp_wmfw_err_test_adsp2_16bit_wmfw1 = {
+ .name = "cs_dsp_wmfwV1_err_adsp2_16bit",
+ .init = cs_dsp_wmfw_err_test_adsp2_16bit_wmfw1_init,
+ .exit = cs_dsp_wmfw_err_test_exit,
+ .test_cases = cs_dsp_wmfw_err_test_cases_v1,
+};
+
+static struct kunit_suite cs_dsp_wmfw_err_test_adsp2_16bit_wmfw2 = {
+ .name = "cs_dsp_wmfwV2_err_adsp2_16bit",
+ .init = cs_dsp_wmfw_err_test_adsp2_16bit_wmfw2_init,
+ .exit = cs_dsp_wmfw_err_test_exit,
+ .test_cases = cs_dsp_wmfw_err_test_cases_v2,
+};
+
+kunit_test_suites(&cs_dsp_wmfw_err_test_halo,
+ &cs_dsp_wmfw_err_test_adsp2_32bit_wmfw0,
+ &cs_dsp_wmfw_err_test_adsp2_32bit_wmfw1,
+ &cs_dsp_wmfw_err_test_adsp2_32bit_wmfw2,
+ &cs_dsp_wmfw_err_test_adsp2_16bit_wmfw0,
+ &cs_dsp_wmfw_err_test_adsp2_16bit_wmfw1,
+ &cs_dsp_wmfw_err_test_adsp2_16bit_wmfw2);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_tests.c b/drivers/firmware/cirrus/test/cs_dsp_tests.c
new file mode 100644
index 000000000000..7b829a03ca52
--- /dev/null
+++ b/drivers/firmware/cirrus/test/cs_dsp_tests.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Utility module for cs_dsp KUnit testing.
+//
+// Copyright (C) 2024 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <linux/module.h>
+
+MODULE_DESCRIPTION("KUnit tests for Cirrus Logic DSP driver");
+MODULE_AUTHOR("Richard Fitzgerald <rf@opensource.cirrus.com>");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("FW_CS_DSP");
+MODULE_IMPORT_NS("FW_CS_DSP_KUNIT_TEST_UTILS");
diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig
index 477d3f32d99a..907cd149c40a 100644
--- a/drivers/firmware/imx/Kconfig
+++ b/drivers/firmware/imx/Kconfig
@@ -25,7 +25,6 @@ config IMX_SCU
config IMX_SCMI_MISC_DRV
tristate "IMX SCMI MISC Protocol driver"
- depends on IMX_SCMI_MISC_EXT || COMPILE_TEST
default y if ARCH_MXC
help
The System Controller Management Interface firmware (SCMI FW) is
diff --git a/drivers/firmware/microchip/mpfs-auto-update.c b/drivers/firmware/microchip/mpfs-auto-update.c
index 38a03698cec9..e194f7acb2a9 100644
--- a/drivers/firmware/microchip/mpfs-auto-update.c
+++ b/drivers/firmware/microchip/mpfs-auto-update.c
@@ -402,10 +402,10 @@ static int mpfs_auto_update_available(struct mpfs_auto_update_priv *priv)
return -EIO;
/*
- * Bit 5 of byte 1 is "UL_Auto Update" & if it is set, Auto Update is
+ * Bit 5 of byte 1 is "UL_IAP" & if it is set, Auto Update is
* not possible.
*/
- if (response_msg[1] & AUTO_UPDATE_FEATURE_ENABLED)
+ if ((((u8 *)response_msg)[1] & AUTO_UPDATE_FEATURE_ENABLED))
return -EPERM;
return 0;
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 5504721007cc..772fc7625639 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -99,6 +99,7 @@ config DRM_KUNIT_TEST
config DRM_KMS_HELPER
tristate
depends on DRM
+ select FB_CORE if DRM_FBDEV_EMULATION
help
CRTC helpers for KMS drivers.
@@ -358,6 +359,7 @@ config DRM_TTM_HELPER
tristate
depends on DRM
select DRM_TTM
+ select FB_CORE if DRM_FBDEV_EMULATION
select FB_SYSMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
help
Helpers for ttm-based gem objects
@@ -365,6 +367,7 @@ config DRM_TTM_HELPER
config DRM_GEM_DMA_HELPER
tristate
depends on DRM
+ select FB_CORE if DRM_FBDEV_EMULATION
select FB_DMAMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
help
Choose this if you need the GEM DMA helper functions
@@ -372,6 +375,7 @@ config DRM_GEM_DMA_HELPER
config DRM_GEM_SHMEM_HELPER
tristate
depends on DRM && MMU
+ select FB_CORE if DRM_FBDEV_EMULATION
select FB_SYSMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
help
Choose this if you need the GEM shmem helper functions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
index 946c48829f19..824f9da5b6ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
@@ -343,11 +343,10 @@ void amdgpu_coredump(struct amdgpu_device *adev, bool skip_vram_check,
coredump->skip_vram_check = skip_vram_check;
coredump->reset_vram_lost = vram_lost;
- if (job && job->vm) {
- struct amdgpu_vm *vm = job->vm;
+ if (job && job->pasid) {
struct amdgpu_task_info *ti;
- ti = amdgpu_vm_get_task_info_vm(vm);
+ ti = amdgpu_vm_get_task_info_pasid(adev, job->pasid);
if (ti) {
coredump->reset_task_info = *ti;
amdgpu_vm_put_task_info(ti);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index d272d95dd5b2..cd4fac120834 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -417,6 +417,9 @@ bool amdgpu_device_supports_boco(struct drm_device *dev)
{
struct amdgpu_device *adev = drm_to_adev(dev);
+ if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
+ return false;
+
if (adev->has_pr3 ||
((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
return true;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index b9d08bc96581..a21c510c408e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -255,7 +255,6 @@ void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds,
void amdgpu_job_free_resources(struct amdgpu_job *job)
{
- struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
struct dma_fence *f;
unsigned i;
@@ -268,7 +267,7 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
f = NULL;
for (i = 0; i < job->num_ibs; ++i)
- amdgpu_ib_free(ring->adev, &job->ibs[i], f);
+ amdgpu_ib_free(NULL, &job->ibs[i], f);
}
static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index ddd7f05e4db9..c9c48b782ec1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1266,10 +1266,9 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
* next command submission.
*/
if (amdgpu_vm_is_bo_always_valid(vm, bo)) {
- uint32_t mem_type = bo->tbo.resource->mem_type;
-
- if (!(bo->preferred_domains &
- amdgpu_mem_type_to_domain(mem_type)))
+ if (bo->tbo.resource &&
+ !(bo->preferred_domains &
+ amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type)))
amdgpu_vm_bo_evicted(&bo_va->base);
else
amdgpu_vm_bo_idle(&bo_va->base);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index fe7c48f2fb2a..da327ab48a57 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -4123,7 +4123,7 @@ static int gfx_v12_0_set_clockgating_state(void *handle,
if (amdgpu_sriov_vf(adev))
return 0;
- switch (adev->ip_versions[GC_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(12, 0, 0):
case IP_VERSION(12, 0, 1):
gfx_v12_0_update_gfx_clock_gating(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c
index 0fbc3be81f14..f2ab5001b492 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c
@@ -108,7 +108,7 @@ mmhub_v4_1_0_print_l2_protection_fault_status(struct amdgpu_device *adev,
dev_err(adev->dev,
"MMVM_L2_PROTECTION_FAULT_STATUS_LO32:0x%08X\n",
status);
- switch (adev->ip_versions[MMHUB_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
case IP_VERSION(4, 1, 0):
mmhub_cid = mmhub_client_ids_v4_1_0[cid][rw];
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
index b1b57dcc5a73..d1032e9992b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
@@ -271,8 +271,19 @@ const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = {
.ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK,
};
+#define regRCC_DEV0_EPF6_STRAP4 0xd304
+#define regRCC_DEV0_EPF6_STRAP4_BASE_IDX 5
+
static void nbio_v7_0_init_registers(struct amdgpu_device *adev)
{
+ uint32_t data;
+
+ switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
+ case IP_VERSION(2, 5, 0):
+ data = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF6_STRAP4) & ~BIT(23);
+ WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF6_STRAP4, data);
+ break;
+ }
}
#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c
index 814ab59fdd4a..41421da63a08 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c
@@ -275,7 +275,7 @@ static void nbio_v7_11_init_registers(struct amdgpu_device *adev)
if (def != data)
WREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3, data);
- switch (adev->ip_versions[NBIO_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
case IP_VERSION(7, 11, 0):
case IP_VERSION(7, 11, 1):
case IP_VERSION(7, 11, 2):
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
index 1ac730328516..3fb6d2aa7e3b 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
@@ -247,7 +247,7 @@ static void nbio_v7_7_init_registers(struct amdgpu_device *adev)
if (def != data)
WREG32_SOC15(NBIO, 0, regBIF0_PCIE_MST_CTRL_3, data);
- switch (adev->ip_versions[NBIO_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
case IP_VERSION(7, 7, 0):
data = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF5_STRAP4) & ~BIT(23);
WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF5_STRAP4, data);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index 6a565ce74d5b..5cad09c5f2ff 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -2096,7 +2096,7 @@ static int smu_v14_0_2_enable_gfx_features(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(14, 0, 2))
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 2))
return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableAllSmuFeatures,
FEATURE_PWR_GFX, NULL);
else
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
index 61f4a38e7d2b..8f786592143b 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
@@ -153,7 +153,16 @@ static int adv7511_hdmi_hw_params(struct device *dev, void *data,
ADV7511_AUDIO_CFG3_LEN_MASK, len);
regmap_update_bits(adv7511->regmap, ADV7511_REG_I2C_FREQ_ID_CFG,
ADV7511_I2C_FREQ_ID_CFG_RATE_MASK, rate << 4);
- regmap_write(adv7511->regmap, 0x73, 0x1);
+
+ /* send current Audio infoframe values while updating */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
+ BIT(5), BIT(5));
+
+ regmap_write(adv7511->regmap, ADV7511_REG_AUDIO_INFOFRAME(0), 0x1);
+
+ /* use Audio infoframe updated info */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
+ BIT(5), 0);
return 0;
}
@@ -184,8 +193,9 @@ static int audio_startup(struct device *dev, void *data)
regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(0),
BIT(7) | BIT(6), BIT(7));
/* use Audio infoframe updated info */
- regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(1),
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
BIT(5), 0);
+
/* enable SPDIF receiver */
if (adv7511->audio_source == ADV7511_AUDIO_SOURCE_SPDIF)
regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index eb5919b38263..a13b3d8ab6ac 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -1241,8 +1241,10 @@ static int adv7511_probe(struct i2c_client *i2c)
return ret;
ret = adv7511_init_regulators(adv7511);
- if (ret)
- return dev_err_probe(dev, ret, "failed to init regulators\n");
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to init regulators\n");
+ goto err_of_node_put;
+ }
/*
* The power down GPIO is optional. If present, toggle it from active to
@@ -1363,6 +1365,8 @@ err_i2c_unregister_edid:
i2c_unregister_device(adv7511->i2c_edid);
uninit_regulators:
adv7511_uninit_regulators(adv7511);
+err_of_node_put:
+ of_node_put(adv7511->host_node);
return ret;
}
@@ -1371,6 +1375,8 @@ static void adv7511_remove(struct i2c_client *i2c)
{
struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
+ of_node_put(adv7511->host_node);
+
adv7511_uninit_regulators(adv7511);
drm_bridge_remove(&adv7511->bridge);
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c
index 4481489aaf5e..122ad91e8a32 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7533.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c
@@ -172,7 +172,7 @@ int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
of_property_read_u32(np, "adi,dsi-lanes", &num_lanes);
- if (num_lanes < 1 || num_lanes > 4)
+ if (num_lanes < 2 || num_lanes > 4)
return -EINVAL;
adv->num_dsi_lanes = num_lanes;
@@ -181,8 +181,6 @@ int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
if (!adv->host_node)
return -ENODEV;
- of_node_put(adv->host_node);
-
adv->use_timing_gen = !of_property_read_bool(np,
"adi,disable-timing-generator");
diff --git a/drivers/gpu/drm/display/drm_dp_tunnel.c b/drivers/gpu/drm/display/drm_dp_tunnel.c
index 48b2df120086..90fe07a89260 100644
--- a/drivers/gpu/drm/display/drm_dp_tunnel.c
+++ b/drivers/gpu/drm/display/drm_dp_tunnel.c
@@ -1896,8 +1896,8 @@ static void destroy_mgr(struct drm_dp_tunnel_mgr *mgr)
*
* Creates a DP tunnel manager for @dev.
*
- * Returns a pointer to the tunnel manager if created successfully or NULL in
- * case of an error.
+ * Returns a pointer to the tunnel manager if created successfully or error
+ * pointer in case of failure.
*/
struct drm_dp_tunnel_mgr *
drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count)
@@ -1907,7 +1907,7 @@ drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count)
mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
if (!mgr)
- return NULL;
+ return ERR_PTR(-ENOMEM);
mgr->dev = dev;
init_waitqueue_head(&mgr->bw_req_queue);
@@ -1916,7 +1916,7 @@ drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count)
if (!mgr->groups) {
kfree(mgr);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
@@ -1927,7 +1927,7 @@ drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count)
if (!init_group(mgr, &mgr->groups[i])) {
destroy_mgr(mgr);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
mgr->group_count++;
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 6ba167a33461..71573b85d924 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1287,14 +1287,11 @@ EXPORT_SYMBOL(drm_mode_set_name);
*/
int drm_mode_vrefresh(const struct drm_display_mode *mode)
{
- unsigned int num, den;
+ unsigned int num = 1, den = 1;
if (mode->htotal == 0 || mode->vtotal == 0)
return 0;
- num = mode->clock;
- den = mode->htotal * mode->vtotal;
-
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
num *= 2;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -1302,6 +1299,12 @@ int drm_mode_vrefresh(const struct drm_display_mode *mode)
if (mode->vscan > 1)
den *= mode->vscan;
+ if (check_mul_overflow(mode->clock, num, &num))
+ return 0;
+
+ if (check_mul_overflow(mode->htotal * mode->vtotal, den, &den))
+ return 0;
+
return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(num, 1000), den);
}
EXPORT_SYMBOL(drm_mode_vrefresh);
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index 71dc659228ab..0c7aee13495a 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -2115,14 +2115,6 @@ static void intel_c10_pll_program(struct intel_display *display,
0, C10_VDR_CTRL_MSGBUS_ACCESS,
MB_WRITE_COMMITTED);
- /* Custom width needs to be programmed to 0 for both the phy lanes */
- intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CUSTOM_WIDTH,
- C10_VDR_CUSTOM_WIDTH_MASK, C10_VDR_CUSTOM_WIDTH_8_10,
- MB_WRITE_COMMITTED);
- intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
- 0, C10_VDR_CTRL_UPDATE_CFG,
- MB_WRITE_COMMITTED);
-
/* Program the pll values only for the master lane */
for (i = 0; i < ARRAY_SIZE(pll_state->pll); i++)
intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_PLL(i),
@@ -2132,6 +2124,10 @@ static void intel_c10_pll_program(struct intel_display *display,
intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_CMN(0), pll_state->cmn, MB_WRITE_COMMITTED);
intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_TX(0), pll_state->tx, MB_WRITE_COMMITTED);
+ /* Custom width needs to be programmed to 0 for both the phy lanes */
+ intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CUSTOM_WIDTH,
+ C10_VDR_CUSTOM_WIDTH_MASK, C10_VDR_CUSTOM_WIDTH_8_10,
+ MB_WRITE_COMMITTED);
intel_cx0_rmw(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_CONTROL(1),
0, C10_VDR_CTRL_MASTER_LANE | C10_VDR_CTRL_UPDATE_CFG,
MB_WRITE_COMMITTED);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index ba55c059063d..fe1f85e5dda3 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -343,6 +343,11 @@ struct intel_engine_guc_stats {
* @start_gt_clk: GT clock time of last idle to active transition.
*/
u64 start_gt_clk;
+
+ /**
+ * @total: The last value of total returned
+ */
+ u64 total;
};
union intel_engine_tlb_inv_reg {
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index c864d101faf9..9378d5901c49 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -133,7 +133,7 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6)
GEN9_MEDIA_PG_ENABLE |
GEN11_MEDIA_SAMPLER_PG_ENABLE;
- if (GRAPHICS_VER(gt->i915) >= 12) {
+ if (GRAPHICS_VER(gt->i915) >= 12 && !IS_DG1(gt->i915)) {
for (i = 0; i < I915_MAX_VCS; i++)
if (HAS_ENGINE(gt, _VCS(i)))
pg_enable |= (VDN_HCP_POWERGATE_ENABLE(i) |
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 9ede6f240d79..c0bd730383f2 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1243,6 +1243,21 @@ static void __get_engine_usage_record(struct intel_engine_cs *engine,
} while (++i < 6);
}
+static void __set_engine_usage_record(struct intel_engine_cs *engine,
+ u32 last_in, u32 id, u32 total)
+{
+ struct iosys_map rec_map = intel_guc_engine_usage_record_map(engine);
+
+#define record_write(map_, field_, val_) \
+ iosys_map_wr_field(map_, 0, struct guc_engine_usage_record, field_, val_)
+
+ record_write(&rec_map, last_switch_in_stamp, last_in);
+ record_write(&rec_map, current_context_index, id);
+ record_write(&rec_map, total_runtime, total);
+
+#undef record_write
+}
+
static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
{
struct intel_engine_guc_stats *stats = &engine->stats.guc;
@@ -1363,9 +1378,12 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
total += intel_gt_clock_interval_to_ns(gt, clk);
}
+ if (total > stats->total)
+ stats->total = total;
+
spin_unlock_irqrestore(&guc->timestamp.lock, flags);
- return ns_to_ktime(total);
+ return ns_to_ktime(stats->total);
}
static void guc_enable_busyness_worker(struct intel_guc *guc)
@@ -1431,8 +1449,21 @@ static void __reset_guc_busyness_stats(struct intel_guc *guc)
guc_update_pm_timestamp(guc, &unused);
for_each_engine(engine, gt, id) {
+ struct intel_engine_guc_stats *stats = &engine->stats.guc;
+
guc_update_engine_gt_clks(engine);
- engine->stats.guc.prev_total = 0;
+
+ /*
+ * If resetting a running context, accumulate the active
+ * time as well since there will be no context switch.
+ */
+ if (stats->running) {
+ u64 clk = guc->timestamp.gt_stamp - stats->start_gt_clk;
+
+ stats->total_gt_clks += clk;
+ }
+ stats->prev_total = 0;
+ stats->running = 0;
}
spin_unlock_irqrestore(&guc->timestamp.lock, flags);
@@ -1543,6 +1574,9 @@ err_trylock:
static int guc_action_enable_usage_stats(struct intel_guc *guc)
{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
u32 offset = intel_guc_engine_usage_offset(guc);
u32 action[] = {
INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF,
@@ -1550,6 +1584,9 @@ static int guc_action_enable_usage_stats(struct intel_guc *guc)
0,
};
+ for_each_engine(engine, gt, id)
+ __set_engine_usage_record(engine, 0, 0xffffffff, 0);
+
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
diff --git a/drivers/gpu/drm/panel/panel-himax-hx83102.c b/drivers/gpu/drm/panel/panel-himax-hx83102.c
index 8b48bba18131..3644a7544b93 100644
--- a/drivers/gpu/drm/panel/panel-himax-hx83102.c
+++ b/drivers/gpu/drm/panel/panel-himax-hx83102.c
@@ -565,6 +565,8 @@ static int hx83102_get_modes(struct drm_panel *panel,
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, m);
+ if (!mode)
+ return -ENOMEM;
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_set_name(mode);
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35950.c b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
index b036208f9356..08b22b592ab0 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35950.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
@@ -481,9 +481,9 @@ static int nt35950_probe(struct mipi_dsi_device *dsi)
return dev_err_probe(dev, -EPROBE_DEFER, "Cannot get secondary DSI host\n");
nt->dsi[1] = mipi_dsi_device_register_full(dsi_r_host, info);
- if (!nt->dsi[1]) {
+ if (IS_ERR(nt->dsi[1])) {
dev_err(dev, "Cannot get secondary DSI node\n");
- return -ENODEV;
+ return PTR_ERR(nt->dsi[1]);
}
num_dsis++;
}
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
index eef03d04e0cd..1f72ef7ca74c 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7701.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
@@ -1177,6 +1177,7 @@ static int st7701_probe(struct device *dev, int connector_type)
return dev_err_probe(dev, ret, "Failed to get orientation\n");
drm_panel_init(&st7701->panel, dev, &st7701_funcs, connector_type);
+ st7701->panel.prepare_prev_first = true;
/**
* Once sleep out has been issued, ST7701 IC required to wait 120ms
diff --git a/drivers/gpu/drm/panel/panel-synaptics-r63353.c b/drivers/gpu/drm/panel/panel-synaptics-r63353.c
index 169c629746c7..17349825543f 100644
--- a/drivers/gpu/drm/panel/panel-synaptics-r63353.c
+++ b/drivers/gpu/drm/panel/panel-synaptics-r63353.c
@@ -325,7 +325,7 @@ static void r63353_panel_shutdown(struct mipi_dsi_device *dsi)
{
struct r63353_panel *rpanel = mipi_dsi_get_drvdata(dsi);
- r63353_panel_unprepare(&rpanel->base);
+ drm_panel_unprepare(&rpanel->base);
}
static const struct r63353_desc sharp_ls068b3sx02_data = {
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 7ce25281c74c..57da84908752 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -1355,7 +1355,8 @@ EXPORT_SYMBOL(drm_sched_init);
* drm_sched_backend_ops.run_job(). Consequently, drm_sched_backend_ops.free_job()
* will not be called for all jobs still in drm_gpu_scheduler.pending_list.
* There is no solution for this currently. Thus, it is up to the driver to make
- * sure that
+ * sure that:
+ *
* a) drm_sched_fini() is only called after for all submitted jobs
* drm_sched_backend_ops.free_job() has been called or that
* b) the jobs for which drm_sched_backend_ops.free_job() has not been called
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index ae6b337cdc54..f61a8ef38094 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -724,7 +724,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
new_mem->mem_type == XE_PL_SYSTEM) {
long timeout = dma_resv_wait_timeout(ttm_bo->base.resv,
DMA_RESV_USAGE_BOOKKEEP,
- true,
+ false,
MAX_SCHEDULE_TIMEOUT);
if (timeout < 0) {
ret = timeout;
@@ -848,8 +848,16 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
out:
if ((!ttm_bo->resource || ttm_bo->resource->mem_type == XE_PL_SYSTEM) &&
- ttm_bo->ttm)
+ ttm_bo->ttm) {
+ long timeout = dma_resv_wait_timeout(ttm_bo->base.resv,
+ DMA_RESV_USAGE_KERNEL,
+ false,
+ MAX_SCHEDULE_TIMEOUT);
+ if (timeout < 0)
+ ret = timeout;
+
xe_tt_unmap_sg(ttm_bo->ttm);
+ }
return ret;
}
diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c
index f8947e7e917e..21a50d539426 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump.c
+++ b/drivers/gpu/drm/xe/xe_devcoredump.c
@@ -109,7 +109,11 @@ static ssize_t __xe_devcoredump_read(char *buffer, size_t count,
drm_puts(&p, "\n**** GuC CT ****\n");
xe_guc_ct_snapshot_print(ss->guc.ct, &p);
- drm_puts(&p, "\n**** Contexts ****\n");
+ /*
+ * Don't add a new section header here because the mesa debug decoder
+ * tool expects the context information to be in the 'GuC CT' section.
+ */
+ /* drm_puts(&p, "\n**** Contexts ****\n"); */
xe_guc_exec_queue_snapshot_print(ss->ge, &p);
drm_puts(&p, "\n**** Job ****\n");
@@ -363,6 +367,15 @@ void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix,
char buff[ASCII85_BUFSZ], *line_buff;
size_t line_pos = 0;
+ /*
+ * Splitting blobs across multiple lines is not compatible with the mesa
+ * debug decoder tool. Note that even dropping the explicit '\n' below
+ * doesn't help because the GuC log is so big some underlying implementation
+ * still splits the lines at 512K characters. So just bail completely for
+ * the moment.
+ */
+ return;
+
#define DMESG_MAX_LINE_LEN 800
#define MIN_SPACE (ASCII85_BUFSZ + 2) /* 85 + "\n\0" */
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index fd0f3b3c9101..268cd3123be9 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -8,6 +8,7 @@
#include <linux/nospec.h>
#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <uapi/drm/xe_drm.h>
@@ -762,9 +763,11 @@ bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
*/
void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
{
+ struct xe_device *xe = gt_to_xe(q->gt);
struct xe_file *xef;
struct xe_lrc *lrc;
u32 old_ts, new_ts;
+ int idx;
/*
* Jobs that are run during driver load may use an exec_queue, but are
@@ -774,6 +777,10 @@ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
if (!q->vm || !q->vm->xef)
return;
+ /* Synchronize with unbind while holding the xe file open */
+ if (!drm_dev_enter(&xe->drm, &idx))
+ return;
+
xef = q->vm->xef;
/*
@@ -787,6 +794,8 @@ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
lrc = q->lrc[0];
new_ts = xe_lrc_update_timestamp(lrc, &old_ts);
xef->run_ticks[q->class] += (new_ts - old_ts) * q->width;
+
+ drm_dev_exit(idx);
}
/**
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
index 192643d63d22..ca49860168f6 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
@@ -2046,7 +2046,7 @@ static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid)
valid_any = valid_any || (valid_ggtt && is_primary);
if (IS_DGFX(xe)) {
- bool valid_lmem = pf_get_vf_config_ggtt(primary_gt, vfid);
+ bool valid_lmem = pf_get_vf_config_lmem(primary_gt, vfid);
valid_any = valid_any || (valid_lmem && is_primary);
valid_all = valid_all && valid_lmem;
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index 8dd55798ab31..5cc0f6f9bc11 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -74,12 +74,6 @@ struct xe_oa_config {
struct rcu_head rcu;
};
-struct flex {
- struct xe_reg reg;
- u32 offset;
- u32 value;
-};
-
struct xe_oa_open_param {
struct xe_file *xef;
u32 oa_unit_id;
@@ -596,19 +590,38 @@ static __poll_t xe_oa_poll(struct file *file, poll_table *wait)
return ret;
}
+static void xe_oa_lock_vma(struct xe_exec_queue *q)
+{
+ if (q->vm) {
+ down_read(&q->vm->lock);
+ xe_vm_lock(q->vm, false);
+ }
+}
+
+static void xe_oa_unlock_vma(struct xe_exec_queue *q)
+{
+ if (q->vm) {
+ xe_vm_unlock(q->vm);
+ up_read(&q->vm->lock);
+ }
+}
+
static struct dma_fence *xe_oa_submit_bb(struct xe_oa_stream *stream, enum xe_oa_submit_deps deps,
struct xe_bb *bb)
{
+ struct xe_exec_queue *q = stream->exec_q ?: stream->k_exec_q;
struct xe_sched_job *job;
struct dma_fence *fence;
int err = 0;
- /* Kernel configuration is issued on stream->k_exec_q, not stream->exec_q */
- job = xe_bb_create_job(stream->k_exec_q, bb);
+ xe_oa_lock_vma(q);
+
+ job = xe_bb_create_job(q, bb);
if (IS_ERR(job)) {
err = PTR_ERR(job);
goto exit;
}
+ job->ggtt = true;
if (deps == XE_OA_SUBMIT_ADD_DEPS) {
for (int i = 0; i < stream->num_syncs && !err; i++)
@@ -623,10 +636,13 @@ static struct dma_fence *xe_oa_submit_bb(struct xe_oa_stream *stream, enum xe_oa
fence = dma_fence_get(&job->drm.s_fence->finished);
xe_sched_job_push(job);
+ xe_oa_unlock_vma(q);
+
return fence;
err_put_job:
xe_sched_job_put(job);
exit:
+ xe_oa_unlock_vma(q);
return ERR_PTR(err);
}
@@ -675,63 +691,19 @@ static void xe_oa_free_configs(struct xe_oa_stream *stream)
dma_fence_put(stream->last_fence);
}
-static void xe_oa_store_flex(struct xe_oa_stream *stream, struct xe_lrc *lrc,
- struct xe_bb *bb, const struct flex *flex, u32 count)
-{
- u32 offset = xe_bo_ggtt_addr(lrc->bo);
-
- do {
- bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_GGTT | MI_SDI_NUM_DW(1);
- bb->cs[bb->len++] = offset + flex->offset * sizeof(u32);
- bb->cs[bb->len++] = 0;
- bb->cs[bb->len++] = flex->value;
-
- } while (flex++, --count);
-}
-
-static int xe_oa_modify_ctx_image(struct xe_oa_stream *stream, struct xe_lrc *lrc,
- const struct flex *flex, u32 count)
-{
- struct dma_fence *fence;
- struct xe_bb *bb;
- int err;
-
- bb = xe_bb_new(stream->gt, 4 * count, false);
- if (IS_ERR(bb)) {
- err = PTR_ERR(bb);
- goto exit;
- }
-
- xe_oa_store_flex(stream, lrc, bb, flex, count);
-
- fence = xe_oa_submit_bb(stream, XE_OA_SUBMIT_NO_DEPS, bb);
- if (IS_ERR(fence)) {
- err = PTR_ERR(fence);
- goto free_bb;
- }
- xe_bb_free(bb, fence);
- dma_fence_put(fence);
-
- return 0;
-free_bb:
- xe_bb_free(bb, NULL);
-exit:
- return err;
-}
-
-static int xe_oa_load_with_lri(struct xe_oa_stream *stream, struct xe_oa_reg *reg_lri)
+static int xe_oa_load_with_lri(struct xe_oa_stream *stream, struct xe_oa_reg *reg_lri, u32 count)
{
struct dma_fence *fence;
struct xe_bb *bb;
int err;
- bb = xe_bb_new(stream->gt, 3, false);
+ bb = xe_bb_new(stream->gt, 2 * count + 1, false);
if (IS_ERR(bb)) {
err = PTR_ERR(bb);
goto exit;
}
- write_cs_mi_lri(bb, reg_lri, 1);
+ write_cs_mi_lri(bb, reg_lri, count);
fence = xe_oa_submit_bb(stream, XE_OA_SUBMIT_NO_DEPS, bb);
if (IS_ERR(fence)) {
@@ -751,71 +723,55 @@ exit:
static int xe_oa_configure_oar_context(struct xe_oa_stream *stream, bool enable)
{
const struct xe_oa_format *format = stream->oa_buffer.format;
- struct xe_lrc *lrc = stream->exec_q->lrc[0];
- u32 regs_offset = xe_lrc_regs_offset(lrc) / sizeof(u32);
u32 oacontrol = __format_to_oactrl(format, OAR_OACONTROL_COUNTER_SEL_MASK) |
(enable ? OAR_OACONTROL_COUNTER_ENABLE : 0);
- struct flex regs_context[] = {
+ struct xe_oa_reg reg_lri[] = {
{
OACTXCONTROL(stream->hwe->mmio_base),
- stream->oa->ctx_oactxctrl_offset[stream->hwe->class] + 1,
enable ? OA_COUNTER_RESUME : 0,
},
{
+ OAR_OACONTROL,
+ oacontrol,
+ },
+ {
RING_CONTEXT_CONTROL(stream->hwe->mmio_base),
- regs_offset + CTX_CONTEXT_CONTROL,
- _MASKED_BIT_ENABLE(CTX_CTRL_OAC_CONTEXT_ENABLE),
+ _MASKED_FIELD(CTX_CTRL_OAC_CONTEXT_ENABLE,
+ enable ? CTX_CTRL_OAC_CONTEXT_ENABLE : 0)
},
};
- struct xe_oa_reg reg_lri = { OAR_OACONTROL, oacontrol };
- int err;
-
- /* Modify stream hwe context image with regs_context */
- err = xe_oa_modify_ctx_image(stream, stream->exec_q->lrc[0],
- regs_context, ARRAY_SIZE(regs_context));
- if (err)
- return err;
- /* Apply reg_lri using LRI */
- return xe_oa_load_with_lri(stream, &reg_lri);
+ return xe_oa_load_with_lri(stream, reg_lri, ARRAY_SIZE(reg_lri));
}
static int xe_oa_configure_oac_context(struct xe_oa_stream *stream, bool enable)
{
const struct xe_oa_format *format = stream->oa_buffer.format;
- struct xe_lrc *lrc = stream->exec_q->lrc[0];
- u32 regs_offset = xe_lrc_regs_offset(lrc) / sizeof(u32);
u32 oacontrol = __format_to_oactrl(format, OAR_OACONTROL_COUNTER_SEL_MASK) |
(enable ? OAR_OACONTROL_COUNTER_ENABLE : 0);
- struct flex regs_context[] = {
+ struct xe_oa_reg reg_lri[] = {
{
OACTXCONTROL(stream->hwe->mmio_base),
- stream->oa->ctx_oactxctrl_offset[stream->hwe->class] + 1,
enable ? OA_COUNTER_RESUME : 0,
},
{
+ OAC_OACONTROL,
+ oacontrol
+ },
+ {
RING_CONTEXT_CONTROL(stream->hwe->mmio_base),
- regs_offset + CTX_CONTEXT_CONTROL,
- _MASKED_BIT_ENABLE(CTX_CTRL_OAC_CONTEXT_ENABLE) |
+ _MASKED_FIELD(CTX_CTRL_OAC_CONTEXT_ENABLE,
+ enable ? CTX_CTRL_OAC_CONTEXT_ENABLE : 0) |
_MASKED_FIELD(CTX_CTRL_RUN_ALONE, enable ? CTX_CTRL_RUN_ALONE : 0),
},
};
- struct xe_oa_reg reg_lri = { OAC_OACONTROL, oacontrol };
- int err;
/* Set ccs select to enable programming of OAC_OACONTROL */
xe_mmio_write32(&stream->gt->mmio, __oa_regs(stream)->oa_ctrl,
__oa_ccs_select(stream));
- /* Modify stream hwe context image with regs_context */
- err = xe_oa_modify_ctx_image(stream, stream->exec_q->lrc[0],
- regs_context, ARRAY_SIZE(regs_context));
- if (err)
- return err;
-
- /* Apply reg_lri using LRI */
- return xe_oa_load_with_lri(stream, &reg_lri);
+ return xe_oa_load_with_lri(stream, reg_lri, ARRAY_SIZE(reg_lri));
}
static int xe_oa_configure_oa_context(struct xe_oa_stream *stream, bool enable)
@@ -2066,8 +2022,8 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
if (XE_IOCTL_DBG(oa->xe, !param.exec_q))
return -ENOENT;
- if (param.exec_q->width > 1)
- drm_dbg(&oa->xe->drm, "exec_q->width > 1, programming only exec_q->lrc[0]\n");
+ if (XE_IOCTL_DBG(oa->xe, param.exec_q->width > 1))
+ return -EOPNOTSUPP;
}
/*
diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c
index 0be4f489d3e1..9f327f27c072 100644
--- a/drivers/gpu/drm/xe/xe_ring_ops.c
+++ b/drivers/gpu/drm/xe/xe_ring_ops.c
@@ -221,7 +221,10 @@ static int emit_pipe_imm_ggtt(u32 addr, u32 value, bool stall_only, u32 *dw,
static u32 get_ppgtt_flag(struct xe_sched_job *job)
{
- return job->q->vm ? BIT(8) : 0;
+ if (job->q->vm && !job->ggtt)
+ return BIT(8);
+
+ return 0;
}
static int emit_copy_timestamp(struct xe_lrc *lrc, u32 *dw, int i)
diff --git a/drivers/gpu/drm/xe/xe_sched_job_types.h b/drivers/gpu/drm/xe/xe_sched_job_types.h
index f13f333f00be..d942b20a9f29 100644
--- a/drivers/gpu/drm/xe/xe_sched_job_types.h
+++ b/drivers/gpu/drm/xe/xe_sched_job_types.h
@@ -56,6 +56,8 @@ struct xe_sched_job {
u32 migrate_flush_flags;
/** @ring_ops_flush_tlb: The ring ops need to flush TLB before payload. */
bool ring_ops_flush_tlb;
+ /** @ggtt: mapped in ggtt. */
+ bool ggtt;
/** @ptrs: per instance pointers. */
struct xe_job_ptrs ptrs[];
};
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index c38dcdfcb914..a99112e6f0b8 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -756,7 +756,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
* adding succeeded, it is ok to proceed even if the memory was
* not onlined in time.
*/
- wait_for_completion_timeout(&dm_device.ol_waitevent, 5 * HZ);
+ wait_for_completion_timeout(&dm_device.ol_waitevent, secs_to_jiffies(5));
post_status(&dm_device);
}
}
@@ -1373,7 +1373,8 @@ static int dm_thread_func(void *dm_dev)
struct hv_dynmem_device *dm = dm_dev;
while (!kthread_should_stop()) {
- wait_for_completion_interruptible_timeout(&dm_device.config_event, 1 * HZ);
+ wait_for_completion_interruptible_timeout(&dm_device.config_event,
+ secs_to_jiffies(1));
/*
* The host expects us to post information on the memory
* pressure every second.
@@ -1748,7 +1749,7 @@ static int balloon_connect_vsp(struct hv_device *dev)
if (ret)
goto out;
- t = wait_for_completion_timeout(&dm_device.host_event, 5 * HZ);
+ t = wait_for_completion_timeout(&dm_device.host_event, secs_to_jiffies(5));
if (t == 0) {
ret = -ETIMEDOUT;
goto out;
@@ -1806,7 +1807,7 @@ static int balloon_connect_vsp(struct hv_device *dev)
if (ret)
goto out;
- t = wait_for_completion_timeout(&dm_device.host_event, 5 * HZ);
+ t = wait_for_completion_timeout(&dm_device.host_event, secs_to_jiffies(5));
if (t == 0) {
ret = -ETIMEDOUT;
goto out;
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index d35b60c06114..7400a5a4d2bd 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -655,7 +655,7 @@ void hv_kvp_onchannelcallback(void *context)
if (host_negotiatied == NEGO_NOT_STARTED) {
host_negotiatied = NEGO_IN_PROGRESS;
schedule_delayed_work(&kvp_host_handshake_work,
- HV_UTIL_NEGO_TIMEOUT * HZ);
+ secs_to_jiffies(HV_UTIL_NEGO_TIMEOUT));
}
return;
}
@@ -724,7 +724,7 @@ void hv_kvp_onchannelcallback(void *context)
*/
schedule_work(&kvp_sendkey_work);
schedule_delayed_work(&kvp_timeout_work,
- HV_UTIL_TIMEOUT * HZ);
+ secs_to_jiffies(HV_UTIL_TIMEOUT));
return;
@@ -767,6 +767,12 @@ hv_kvp_init(struct hv_util_service *srv)
*/
kvp_transaction.state = HVUTIL_DEVICE_INIT;
+ return 0;
+}
+
+int
+hv_kvp_init_transport(void)
+{
hvt = hvutil_transport_init(kvp_devname, CN_KVP_IDX, CN_KVP_VAL,
kvp_on_msg, kvp_on_reset);
if (!hvt)
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index 0d2184be1691..bde637a96c37 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -193,7 +193,8 @@ static void vss_send_op(void)
vss_transaction.state = HVUTIL_USERSPACE_REQ;
schedule_delayed_work(&vss_timeout_work, op == VSS_OP_FREEZE ?
- VSS_FREEZE_TIMEOUT * HZ : HV_UTIL_TIMEOUT * HZ);
+ secs_to_jiffies(VSS_FREEZE_TIMEOUT) :
+ secs_to_jiffies(HV_UTIL_TIMEOUT));
rc = hvutil_transport_send(hvt, vss_msg, sizeof(*vss_msg), NULL);
if (rc) {
@@ -388,6 +389,12 @@ hv_vss_init(struct hv_util_service *srv)
*/
vss_transaction.state = HVUTIL_DEVICE_INIT;
+ return 0;
+}
+
+int
+hv_vss_init_transport(void)
+{
hvt = hvutil_transport_init(vss_devname, CN_VSS_IDX, CN_VSS_VAL,
vss_on_msg, vss_on_reset);
if (!hvt) {
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index c4f525325790..36ee89c0358b 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -141,6 +141,7 @@ static struct hv_util_service util_heartbeat = {
static struct hv_util_service util_kvp = {
.util_cb = hv_kvp_onchannelcallback,
.util_init = hv_kvp_init,
+ .util_init_transport = hv_kvp_init_transport,
.util_pre_suspend = hv_kvp_pre_suspend,
.util_pre_resume = hv_kvp_pre_resume,
.util_deinit = hv_kvp_deinit,
@@ -149,6 +150,7 @@ static struct hv_util_service util_kvp = {
static struct hv_util_service util_vss = {
.util_cb = hv_vss_onchannelcallback,
.util_init = hv_vss_init,
+ .util_init_transport = hv_vss_init_transport,
.util_pre_suspend = hv_vss_pre_suspend,
.util_pre_resume = hv_vss_pre_resume,
.util_deinit = hv_vss_deinit,
@@ -590,10 +592,8 @@ static int util_probe(struct hv_device *dev,
srv->channel = dev->channel;
if (srv->util_init) {
ret = srv->util_init(srv);
- if (ret) {
- ret = -ENODEV;
+ if (ret)
goto error1;
- }
}
/*
@@ -613,6 +613,13 @@ static int util_probe(struct hv_device *dev,
if (ret)
goto error;
+ if (srv->util_init_transport) {
+ ret = srv->util_init_transport();
+ if (ret) {
+ vmbus_close(dev->channel);
+ goto error;
+ }
+ }
return 0;
error:
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index d2856023d53c..52cb744b4d7f 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -370,12 +370,14 @@ void vmbus_on_event(unsigned long data);
void vmbus_on_msg_dpc(unsigned long data);
int hv_kvp_init(struct hv_util_service *srv);
+int hv_kvp_init_transport(void);
void hv_kvp_deinit(void);
int hv_kvp_pre_suspend(void);
int hv_kvp_pre_resume(void);
void hv_kvp_onchannelcallback(void *context);
int hv_vss_init(struct hv_util_service *srv);
+int hv_vss_init_transport(void);
void hv_vss_deinit(void);
int hv_vss_pre_suspend(void);
int hv_vss_pre_resume(void);
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 6d89d37b069a..2892b8da20a5 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -2507,7 +2507,7 @@ static int vmbus_bus_resume(struct device *dev)
vmbus_request_offers();
if (wait_for_completion_timeout(
- &vmbus_connection.ready_for_resume_event, 10 * HZ) == 0)
+ &vmbus_connection.ready_for_resume_event, secs_to_jiffies(10)) == 0)
pr_err("Some vmbus device is missing after suspending?\n");
/* Reset the event for the next suspend. */
diff --git a/drivers/hwmon/tmp513.c b/drivers/hwmon/tmp513.c
index 926d28cd3fab..1c2cb12071b8 100644
--- a/drivers/hwmon/tmp513.c
+++ b/drivers/hwmon/tmp513.c
@@ -182,7 +182,7 @@ struct tmp51x_data {
struct regmap *regmap;
};
-// Set the shift based on the gain 8=4, 4=3, 2=2, 1=1
+// Set the shift based on the gain: 8 -> 1, 4 -> 2, 2 -> 3, 1 -> 4
static inline u8 tmp51x_get_pga_shift(struct tmp51x_data *data)
{
return 5 - ffs(data->pga_gain);
@@ -204,7 +204,9 @@ static int tmp51x_get_value(struct tmp51x_data *data, u8 reg, u8 pos,
* 2's complement number shifted by one to four depending
* on the pga gain setting. 1lsb = 10uV
*/
- *val = sign_extend32(regval, 17 - tmp51x_get_pga_shift(data));
+ *val = sign_extend32(regval,
+ reg == TMP51X_SHUNT_CURRENT_RESULT ?
+ 16 - tmp51x_get_pga_shift(data) : 15);
*val = DIV_ROUND_CLOSEST(*val * 10 * MILLI, data->shunt_uohms);
break;
case TMP51X_BUS_VOLTAGE_RESULT:
@@ -220,7 +222,7 @@ static int tmp51x_get_value(struct tmp51x_data *data, u8 reg, u8 pos,
break;
case TMP51X_BUS_CURRENT_RESULT:
// Current = (ShuntVoltage * CalibrationRegister) / 4096
- *val = sign_extend32(regval, 16) * data->curr_lsb_ua;
+ *val = sign_extend32(regval, 15) * (long)data->curr_lsb_ua;
*val = DIV_ROUND_CLOSEST(*val, MILLI);
break;
case TMP51X_LOCAL_TEMP_RESULT:
@@ -232,7 +234,7 @@ static int tmp51x_get_value(struct tmp51x_data *data, u8 reg, u8 pos,
case TMP51X_REMOTE_TEMP_LIMIT_2:
case TMP513_REMOTE_TEMP_LIMIT_3:
// 1lsb = 0.0625 degrees centigrade
- *val = sign_extend32(regval, 16) >> TMP51X_TEMP_SHIFT;
+ *val = sign_extend32(regval, 15) >> TMP51X_TEMP_SHIFT;
*val = DIV_ROUND_CLOSEST(*val * 625, 10);
break;
case TMP51X_N_FACTOR_AND_HYST_1:
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index f751d231ded8..5c9a8dfbc4a0 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -335,6 +335,7 @@ static const struct of_device_id i2c_imx_dt_ids[] = {
{ .compatible = "fsl,imx6sll-i2c", .data = &imx6_i2c_hwdata, },
{ .compatible = "fsl,imx6sx-i2c", .data = &imx6_i2c_hwdata, },
{ .compatible = "fsl,imx6ul-i2c", .data = &imx6_i2c_hwdata, },
+ { .compatible = "fsl,imx7d-i2c", .data = &imx6_i2c_hwdata, },
{ .compatible = "fsl,imx7s-i2c", .data = &imx6_i2c_hwdata, },
{ .compatible = "fsl,imx8mm-i2c", .data = &imx6_i2c_hwdata, },
{ .compatible = "fsl,imx8mn-i2c", .data = &imx6_i2c_hwdata, },
@@ -532,22 +533,20 @@ static void i2c_imx_dma_free(struct imx_i2c_struct *i2c_imx)
static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy, bool atomic)
{
+ bool multi_master = i2c_imx->multi_master;
unsigned long orig_jiffies = jiffies;
unsigned int temp;
- if (!i2c_imx->multi_master)
- return 0;
-
while (1) {
temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR);
/* check for arbitration lost */
- if (temp & I2SR_IAL) {
+ if (multi_master && (temp & I2SR_IAL)) {
i2c_imx_clear_irq(i2c_imx, I2SR_IAL);
return -EAGAIN;
}
- if (for_busy && (temp & I2SR_IBB)) {
+ if (for_busy && (!multi_master || (temp & I2SR_IBB))) {
i2c_imx->stopped = 0;
break;
}
diff --git a/drivers/i2c/busses/i2c-microchip-corei2c.c b/drivers/i2c/busses/i2c-microchip-corei2c.c
index d1543e7d8380..5db73429125c 100644
--- a/drivers/i2c/busses/i2c-microchip-corei2c.c
+++ b/drivers/i2c/busses/i2c-microchip-corei2c.c
@@ -93,27 +93,35 @@
* @base: pointer to register struct
* @dev: device reference
* @i2c_clk: clock reference for i2c input clock
+ * @msg_queue: pointer to the messages requiring sending
* @buf: pointer to msg buffer for easier use
* @msg_complete: xfer completion object
* @adapter: core i2c abstraction
* @msg_err: error code for completed message
* @bus_clk_rate: current i2c bus clock rate
* @isr_status: cached copy of local ISR status
+ * @total_num: total number of messages to be sent/received
+ * @current_num: index of the current message being sent/received
* @msg_len: number of bytes transferred in msg
* @addr: address of the current slave
+ * @restart_needed: whether or not a repeated start is required after current message
*/
struct mchp_corei2c_dev {
void __iomem *base;
struct device *dev;
struct clk *i2c_clk;
+ struct i2c_msg *msg_queue;
u8 *buf;
struct completion msg_complete;
struct i2c_adapter adapter;
int msg_err;
+ int total_num;
+ int current_num;
u32 bus_clk_rate;
u32 isr_status;
u16 msg_len;
u8 addr;
+ bool restart_needed;
};
static void mchp_corei2c_core_disable(struct mchp_corei2c_dev *idev)
@@ -222,6 +230,47 @@ static int mchp_corei2c_fill_tx(struct mchp_corei2c_dev *idev)
return 0;
}
+static void mchp_corei2c_next_msg(struct mchp_corei2c_dev *idev)
+{
+ struct i2c_msg *this_msg;
+ u8 ctrl;
+
+ if (idev->current_num >= idev->total_num) {
+ complete(&idev->msg_complete);
+ return;
+ }
+
+ /*
+ * If there's been an error, the isr needs to return control
+ * to the "main" part of the driver, so as not to keep sending
+ * messages once it completes and clears the SI bit.
+ */
+ if (idev->msg_err) {
+ complete(&idev->msg_complete);
+ return;
+ }
+
+ this_msg = idev->msg_queue++;
+
+ if (idev->current_num < (idev->total_num - 1)) {
+ struct i2c_msg *next_msg = idev->msg_queue;
+
+ idev->restart_needed = next_msg->flags & I2C_M_RD;
+ } else {
+ idev->restart_needed = false;
+ }
+
+ idev->addr = i2c_8bit_addr_from_msg(this_msg);
+ idev->msg_len = this_msg->len;
+ idev->buf = this_msg->buf;
+
+ ctrl = readb(idev->base + CORE_I2C_CTRL);
+ ctrl |= CTRL_STA;
+ writeb(ctrl, idev->base + CORE_I2C_CTRL);
+
+ idev->current_num++;
+}
+
static irqreturn_t mchp_corei2c_handle_isr(struct mchp_corei2c_dev *idev)
{
u32 status = idev->isr_status;
@@ -238,8 +287,6 @@ static irqreturn_t mchp_corei2c_handle_isr(struct mchp_corei2c_dev *idev)
ctrl &= ~CTRL_STA;
writeb(idev->addr, idev->base + CORE_I2C_DATA);
writeb(ctrl, idev->base + CORE_I2C_CTRL);
- if (idev->msg_len == 0)
- finished = true;
break;
case STATUS_M_ARB_LOST:
idev->msg_err = -EAGAIN;
@@ -247,10 +294,14 @@ static irqreturn_t mchp_corei2c_handle_isr(struct mchp_corei2c_dev *idev)
break;
case STATUS_M_SLAW_ACK:
case STATUS_M_TX_DATA_ACK:
- if (idev->msg_len > 0)
+ if (idev->msg_len > 0) {
mchp_corei2c_fill_tx(idev);
- else
- last_byte = true;
+ } else {
+ if (idev->restart_needed)
+ finished = true;
+ else
+ last_byte = true;
+ }
break;
case STATUS_M_TX_DATA_NACK:
case STATUS_M_SLAR_NACK:
@@ -287,7 +338,7 @@ static irqreturn_t mchp_corei2c_handle_isr(struct mchp_corei2c_dev *idev)
mchp_corei2c_stop(idev);
if (last_byte || finished)
- complete(&idev->msg_complete);
+ mchp_corei2c_next_msg(idev);
return IRQ_HANDLED;
}
@@ -311,21 +362,48 @@ static irqreturn_t mchp_corei2c_isr(int irq, void *_dev)
return ret;
}
-static int mchp_corei2c_xfer_msg(struct mchp_corei2c_dev *idev,
- struct i2c_msg *msg)
+static int mchp_corei2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int num)
{
- u8 ctrl;
+ struct mchp_corei2c_dev *idev = i2c_get_adapdata(adap);
+ struct i2c_msg *this_msg = msgs;
unsigned long time_left;
+ u8 ctrl;
+
+ mchp_corei2c_core_enable(idev);
+
+ /*
+ * The isr controls the flow of a transfer, this info needs to be saved
+ * to a location that it can access the queue information from.
+ */
+ idev->restart_needed = false;
+ idev->msg_queue = msgs;
+ idev->total_num = num;
+ idev->current_num = 0;
- idev->addr = i2c_8bit_addr_from_msg(msg);
- idev->msg_len = msg->len;
- idev->buf = msg->buf;
+ /*
+ * But the first entry to the isr is triggered by the start in this
+ * function, so the first message needs to be "dequeued".
+ */
+ idev->addr = i2c_8bit_addr_from_msg(this_msg);
+ idev->msg_len = this_msg->len;
+ idev->buf = this_msg->buf;
idev->msg_err = 0;
- reinit_completion(&idev->msg_complete);
+ if (idev->total_num > 1) {
+ struct i2c_msg *next_msg = msgs + 1;
- mchp_corei2c_core_enable(idev);
+ idev->restart_needed = next_msg->flags & I2C_M_RD;
+ }
+ idev->current_num++;
+ idev->msg_queue++;
+
+ reinit_completion(&idev->msg_complete);
+
+ /*
+ * Send the first start to pass control to the isr
+ */
ctrl = readb(idev->base + CORE_I2C_CTRL);
ctrl |= CTRL_STA;
writeb(ctrl, idev->base + CORE_I2C_CTRL);
@@ -335,20 +413,8 @@ static int mchp_corei2c_xfer_msg(struct mchp_corei2c_dev *idev,
if (!time_left)
return -ETIMEDOUT;
- return idev->msg_err;
-}
-
-static int mchp_corei2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
- int num)
-{
- struct mchp_corei2c_dev *idev = i2c_get_adapdata(adap);
- int i, ret;
-
- for (i = 0; i < num; i++) {
- ret = mchp_corei2c_xfer_msg(idev, msgs++);
- if (ret)
- return ret;
- }
+ if (idev->msg_err)
+ return idev->msg_err;
return num;
}
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 64ace0b968f0..91db10515d74 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -690,6 +690,7 @@ cma_validate_port(struct ib_device *device, u32 port,
int bound_if_index = dev_addr->bound_dev_if;
int dev_type = dev_addr->dev_type;
struct net_device *ndev = NULL;
+ struct net_device *pdev = NULL;
if (!rdma_dev_access_netns(device, id_priv->id.route.addr.dev_addr.net))
goto out;
@@ -714,6 +715,21 @@ cma_validate_port(struct ib_device *device, u32 port,
rcu_read_lock();
ndev = rcu_dereference(sgid_attr->ndev);
+ if (ndev->ifindex != bound_if_index) {
+ pdev = dev_get_by_index_rcu(dev_addr->net, bound_if_index);
+ if (pdev) {
+ if (is_vlan_dev(pdev)) {
+ pdev = vlan_dev_real_dev(pdev);
+ if (ndev->ifindex == pdev->ifindex)
+ bound_if_index = pdev->ifindex;
+ }
+ if (is_vlan_dev(ndev)) {
+ pdev = vlan_dev_real_dev(ndev);
+ if (bound_if_index == pdev->ifindex)
+ bound_if_index = ndev->ifindex;
+ }
+ }
+ }
if (!net_eq(dev_net(ndev), dev_addr->net) ||
ndev->ifindex != bound_if_index) {
rdma_put_gid_attr(sgid_attr);
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index ff121e59b9c0..cb987ab0177c 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -2833,8 +2833,8 @@ int rdma_nl_notify_event(struct ib_device *device, u32 port_num,
enum rdma_nl_notify_event_type type)
{
struct sk_buff *skb;
+ int ret = -EMSGSIZE;
struct net *net;
- int ret = 0;
void *nlh;
net = read_pnet(&device->coredev.rdma_net);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 66b02fbf077a..5ad14c39d48c 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -161,7 +161,7 @@ static const void __user *uverbs_request_next_ptr(struct uverbs_req_iter *iter,
{
const void __user *res = iter->cur;
- if (iter->cur + len > iter->end)
+ if (len > iter->end - iter->cur)
return (void __force __user *)ERR_PTR(-ENOSPC);
iter->cur += len;
return res;
@@ -2008,11 +2008,13 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
if (ret)
return ret;
- wqes = uverbs_request_next_ptr(&iter, cmd.wqe_size * cmd.wr_count);
+ wqes = uverbs_request_next_ptr(&iter, size_mul(cmd.wqe_size,
+ cmd.wr_count));
if (IS_ERR(wqes))
return PTR_ERR(wqes);
- sgls = uverbs_request_next_ptr(
- &iter, cmd.sge_count * sizeof(struct ib_uverbs_sge));
+ sgls = uverbs_request_next_ptr(&iter,
+ size_mul(cmd.sge_count,
+ sizeof(struct ib_uverbs_sge)));
if (IS_ERR(sgls))
return PTR_ERR(sgls);
ret = uverbs_request_finish(&iter);
@@ -2198,11 +2200,11 @@ ib_uverbs_unmarshall_recv(struct uverbs_req_iter *iter, u32 wr_count,
if (wqe_size < sizeof(struct ib_uverbs_recv_wr))
return ERR_PTR(-EINVAL);
- wqes = uverbs_request_next_ptr(iter, wqe_size * wr_count);
+ wqes = uverbs_request_next_ptr(iter, size_mul(wqe_size, wr_count));
if (IS_ERR(wqes))
return ERR_CAST(wqes);
- sgls = uverbs_request_next_ptr(
- iter, sge_count * sizeof(struct ib_uverbs_sge));
+ sgls = uverbs_request_next_ptr(iter, size_mul(sge_count,
+ sizeof(struct ib_uverbs_sge)));
if (IS_ERR(sgls))
return ERR_CAST(sgls);
ret = uverbs_request_finish(iter);
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 82023394e330..e3d26bd6de05 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -199,7 +199,7 @@ int bnxt_re_query_device(struct ib_device *ibdev,
ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
- ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
+ ib_attr->hw_ver = rdev->en_dev->pdev->revision;
ib_attr->max_qp = dev_attr->max_qp;
ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
ib_attr->device_cap_flags =
@@ -967,13 +967,13 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
unsigned int flags;
int rc;
+ bnxt_re_debug_rem_qpinfo(rdev, qp);
+
bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
- if (rc) {
+ if (rc)
ibdev_err(&rdev->ibdev, "Failed to destroy HW QP");
- return rc;
- }
if (rdma_is_kernel_res(&qp->ib_qp.res)) {
flags = bnxt_re_lock_cqs(qp);
@@ -983,11 +983,8 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
- if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) {
- rc = bnxt_re_destroy_gsi_sqp(qp);
- if (rc)
- return rc;
- }
+ if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp)
+ bnxt_re_destroy_gsi_sqp(qp);
mutex_lock(&rdev->qp_lock);
list_del(&qp->list);
@@ -998,8 +995,6 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
else if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD)
atomic_dec(&rdev->stats.res.ud_qp_count);
- bnxt_re_debug_rem_qpinfo(rdev, qp);
-
ib_umem_release(qp->rumem);
ib_umem_release(qp->sumem);
@@ -2167,18 +2162,20 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
}
}
- if (qp_attr_mask & IB_QP_PATH_MTU) {
- qp->qplib_qp.modify_flags |=
- CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
- qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
- qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
- } else if (qp_attr->qp_state == IB_QPS_RTR) {
- qp->qplib_qp.modify_flags |=
- CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
- qp->qplib_qp.path_mtu =
- __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
- qp->qplib_qp.mtu =
- ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
+ if (qp_attr->qp_state == IB_QPS_RTR) {
+ enum ib_mtu qpmtu;
+
+ qpmtu = iboe_get_mtu(rdev->netdev->mtu);
+ if (qp_attr_mask & IB_QP_PATH_MTU) {
+ if (ib_mtu_enum_to_int(qp_attr->path_mtu) >
+ ib_mtu_enum_to_int(qpmtu))
+ return -EINVAL;
+ qpmtu = qp_attr->path_mtu;
+ }
+
+ qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
+ qp->qplib_qp.path_mtu = __from_ib_mtu(qpmtu);
+ qp->qplib_qp.mtu = ib_mtu_enum_to_int(qpmtu);
}
if (qp_attr_mask & IB_QP_TIMEOUT) {
@@ -2328,6 +2325,7 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
qp_attr->retry_cnt = qplib_qp->retry_cnt;
qp_attr->rnr_retry = qplib_qp->rnr_retry;
qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
+ qp_attr->port_num = __to_ib_port_num(qplib_qp->port_id);
qp_attr->rq_psn = qplib_qp->rq.psn;
qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
qp_attr->sq_psn = qplib_qp->sq.psn;
@@ -2824,7 +2822,8 @@ bad:
wr = wr->next;
}
bnxt_qplib_post_send_db(&qp->qplib_qp);
- bnxt_ud_qp_hw_stall_workaround(qp);
+ if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
+ bnxt_ud_qp_hw_stall_workaround(qp);
spin_unlock_irqrestore(&qp->sq_lock, flags);
return rc;
}
@@ -2936,7 +2935,8 @@ bad:
wr = wr->next;
}
bnxt_qplib_post_send_db(&qp->qplib_qp);
- bnxt_ud_qp_hw_stall_workaround(qp);
+ if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
+ bnxt_ud_qp_hw_stall_workaround(qp);
spin_unlock_irqrestore(&qp->sq_lock, flags);
return rc;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index ac59f1d73b15..fbb16a411d6a 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -268,6 +268,10 @@ void bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
+static inline u32 __to_ib_port_num(u16 port_id)
+{
+ return (u32)port_id + 1;
+}
unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp);
void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, unsigned long flags);
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index b7af0d5ff3b6..c143f273b759 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -1715,11 +1715,8 @@ static bool bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev *rdev,
static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev)
{
- int mask = IB_QP_STATE;
- struct ib_qp_attr qp_attr;
struct bnxt_re_qp *qp;
- qp_attr.qp_state = IB_QPS_ERR;
mutex_lock(&rdev->qp_lock);
list_for_each_entry(qp, &rdev->qp_list, list) {
/* Modify the state of all QPs except QP1/Shadow QP */
@@ -1727,12 +1724,9 @@ static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev)
if (qp->qplib_qp.state !=
CMDQ_MODIFY_QP_NEW_STATE_RESET &&
qp->qplib_qp.state !=
- CMDQ_MODIFY_QP_NEW_STATE_ERR) {
+ CMDQ_MODIFY_QP_NEW_STATE_ERR)
bnxt_re_dispatch_event(&rdev->ibdev, &qp->ib_qp,
1, IB_EVENT_QP_FATAL);
- bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, mask,
- NULL);
- }
}
}
mutex_unlock(&rdev->qp_lock);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index e42abf5be6c0..5336f74297f8 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -659,13 +659,6 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
if (rc)
return rc;
-
- srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
- GFP_KERNEL);
- if (!srq->swq) {
- rc = -ENOMEM;
- goto fail;
- }
srq->dbinfo.flags = 0;
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
CMDQ_BASE_OPCODE_CREATE_SRQ,
@@ -694,9 +687,17 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
spin_lock_init(&srq->lock);
srq->start_idx = 0;
srq->last_idx = srq->hwq.max_elements - 1;
- for (idx = 0; idx < srq->hwq.max_elements; idx++)
- srq->swq[idx].next_idx = idx + 1;
- srq->swq[srq->last_idx].next_idx = -1;
+ if (!srq->hwq.is_user) {
+ srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
+ GFP_KERNEL);
+ if (!srq->swq) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+ for (idx = 0; idx < srq->hwq.max_elements; idx++)
+ srq->swq[idx].next_idx = idx + 1;
+ srq->swq[srq->last_idx].next_idx = -1;
+ }
srq->id = le32_to_cpu(resp.xid);
srq->dbinfo.hwq = &srq->hwq;
@@ -1000,9 +1001,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
u32 tbl_indx;
u16 nsge;
- if (res->dattr)
- qp->is_host_msn_tbl = _is_host_msn_table(res->dattr->dev_cap_flags2);
-
+ qp->is_host_msn_tbl = _is_host_msn_table(res->dattr->dev_cap_flags2);
sq->dbinfo.flags = 0;
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
CMDQ_BASE_OPCODE_CREATE_QP,
@@ -1034,7 +1033,12 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
: 0;
/* Update msn tbl size */
if (qp->is_host_msn_tbl && psn_sz) {
- hwq_attr.aux_depth = roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
+ if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
+ hwq_attr.aux_depth =
+ roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
+ else
+ hwq_attr.aux_depth =
+ roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode)) / 2;
qp->msn_tbl_sz = hwq_attr.aux_depth;
qp->msn = 0;
}
@@ -1044,13 +1048,14 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
if (rc)
return rc;
- rc = bnxt_qplib_alloc_init_swq(sq);
- if (rc)
- goto fail_sq;
-
- if (psn_sz)
- bnxt_qplib_init_psn_ptr(qp, psn_sz);
+ if (!sq->hwq.is_user) {
+ rc = bnxt_qplib_alloc_init_swq(sq);
+ if (rc)
+ goto fail_sq;
+ if (psn_sz)
+ bnxt_qplib_init_psn_ptr(qp, psn_sz);
+ }
req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
pbl = &sq->hwq.pbl[PBL_LVL_0];
req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
@@ -1076,9 +1081,11 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
if (rc)
goto sq_swq;
- rc = bnxt_qplib_alloc_init_swq(rq);
- if (rc)
- goto fail_rq;
+ if (!rq->hwq.is_user) {
+ rc = bnxt_qplib_alloc_init_swq(rq);
+ if (rc)
+ goto fail_rq;
+ }
req.rq_size = cpu_to_le32(rq->max_wqe);
pbl = &rq->hwq.pbl[PBL_LVL_0];
@@ -1174,9 +1181,11 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
rq->dbinfo.db = qp->dpi->dbr;
rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
}
+ spin_lock_bh(&rcfw->tbl_lock);
tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
+ spin_unlock_bh(&rcfw->tbl_lock);
return 0;
fail:
@@ -1283,7 +1292,8 @@ static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
}
}
-static void bnxt_set_mandatory_attributes(struct bnxt_qplib_qp *qp,
+static void bnxt_set_mandatory_attributes(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_qp *qp,
struct cmdq_modify_qp *req)
{
u32 mandatory_flags = 0;
@@ -1298,6 +1308,14 @@ static void bnxt_set_mandatory_attributes(struct bnxt_qplib_qp *qp,
mandatory_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
}
+ if (_is_min_rnr_in_rtr_rts_mandatory(res->dattr->dev_cap_flags2) &&
+ (qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_RTR &&
+ qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTS)) {
+ if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_RC)
+ mandatory_flags |=
+ CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
+ }
+
if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_UD ||
qp->type == CMDQ_MODIFY_QP_QP_TYPE_GSI)
mandatory_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
@@ -1338,7 +1356,7 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
/* Set mandatory attributes for INIT -> RTR and RTR -> RTS transition */
if (_is_optimize_modify_qp_supported(res->dattr->dev_cap_flags2) &&
is_optimized_state_transition(qp))
- bnxt_set_mandatory_attributes(qp, &req);
+ bnxt_set_mandatory_attributes(res, qp, &req);
}
bmask = qp->modify_flags;
req.modify_mask = cpu_to_le32(qp->modify_flags);
@@ -1521,6 +1539,7 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
memcpy(qp->smac, sb->src_mac, 6);
qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
+ qp->port_id = le16_to_cpu(sb->port_id);
bail:
dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
sbuf.sb, sbuf.dma_addr);
@@ -2667,10 +2686,12 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
bnxt_qplib_add_flush_qp(qp);
} else {
/* Before we complete, do WA 9060 */
- if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
- cqe_sq_cons)) {
- *lib_qp = qp;
- goto out;
+ if (!bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
+ if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
+ cqe_sq_cons)) {
+ *lib_qp = qp;
+ goto out;
+ }
}
if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
cqe->status = CQ_REQ_STATUS_OK;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index ef3424c81345..0660101b5310 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -114,7 +114,6 @@ struct bnxt_qplib_sge {
u32 size;
};
-#define BNXT_QPLIB_QP_MAX_SGL 6
struct bnxt_qplib_swq {
u64 wr_id;
int next_idx;
@@ -154,7 +153,7 @@ struct bnxt_qplib_swqe {
#define BNXT_QPLIB_SWQE_FLAGS_UC_FENCE BIT(2)
#define BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT BIT(3)
#define BNXT_QPLIB_SWQE_FLAGS_INLINE BIT(4)
- struct bnxt_qplib_sge sg_list[BNXT_QPLIB_QP_MAX_SGL];
+ struct bnxt_qplib_sge sg_list[BNXT_VAR_MAX_SGE];
int num_sge;
/* Max inline data is 96 bytes */
u32 inline_len;
@@ -299,6 +298,7 @@ struct bnxt_qplib_qp {
u32 dest_qpn;
u8 smac[6];
u16 vlan_id;
+ u16 port_id;
u8 nw_type;
struct bnxt_qplib_ah ah;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 5e90ea232de8..17e62f22683b 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -424,7 +424,8 @@ static int __send_message_basic_sanity(struct bnxt_qplib_rcfw *rcfw,
/* Prevent posting if f/w is not in a state to process */
if (test_bit(ERR_DEVICE_DETACHED, &rcfw->cmdq.flags))
- return bnxt_qplib_map_rc(opcode);
+ return -ENXIO;
+
if (test_bit(FIRMWARE_STALL_DETECTED, &cmdq->flags))
return -ETIMEDOUT;
@@ -493,7 +494,7 @@ static int __bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
rc = __send_message_basic_sanity(rcfw, msg, opcode);
if (rc)
- return rc;
+ return rc == -ENXIO ? bnxt_qplib_map_rc(opcode) : rc;
rc = __send_message(rcfw, msg, opcode);
if (rc)
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index 21fb148713a6..cbfc49a1a56d 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -584,6 +584,11 @@ static inline bool _is_optimize_modify_qp_supported(u16 dev_cap_ext_flags2)
return dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_OPTIMIZE_MODIFY_QP_SUPPORTED;
}
+static inline bool _is_min_rnr_in_rtr_rts_mandatory(u16 dev_cap_ext_flags2)
+{
+ return !!(dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_MIN_RNR_RTR_RTS_OPT_SUPPORTED);
+}
+
static inline bool _is_cq_coalescing_supported(u16 dev_cap_ext_flags2)
{
return dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_CQ_COALESCING_SUPPORTED;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 7e20ae3d2c4f..9df3e3271577 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -129,12 +129,18 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
attr->max_qp_init_rd_atom =
sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom;
- attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr);
- /*
- * 128 WQEs needs to be reserved for the HW (8916). Prevent
- * reporting the max number
- */
- attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS + 1;
+ attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr) - 1;
+ if (!bnxt_qplib_is_chip_gen_p5_p7(rcfw->res->cctx)) {
+ /*
+ * 128 WQEs needs to be reserved for the HW (8916). Prevent
+ * reporting the max number on legacy devices
+ */
+ attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS + 1;
+ }
+
+ /* Adjust for max_qp_wqes for variable wqe */
+ if (cctx->modes.wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
+ attr->max_qp_wqes = BNXT_VAR_MAX_WQE - 1;
attr->max_qp_sges = cctx->modes.wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE ?
min_t(u32, sb->max_sge_var_wqe, BNXT_VAR_MAX_SGE) : 6;
diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
index a98fc9c2313e..0ee60fdc18b3 100644
--- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h
+++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
@@ -2215,6 +2215,7 @@ struct creq_query_func_resp_sb {
#define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_IQM_MSN_TABLE (0x2UL << 4)
#define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_LAST \
CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_IQM_MSN_TABLE
+ #define CREQ_QUERY_FUNC_RESP_SB_MIN_RNR_RTR_RTS_OPT_SUPPORTED 0x1000UL
__le16 max_xp_qp_size;
__le16 create_qp_batch_size;
__le16 destroy_qp_batch_size;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index f84521be3bea..605562122ecc 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -931,6 +931,7 @@ struct hns_roce_hem_item {
size_t count; /* max ba numbers */
int start; /* start buf offset in this hem */
int end; /* end buf offset in this hem */
+ bool exist_bt;
};
/* All HEM items are linked in a tree structure */
@@ -959,6 +960,7 @@ hem_list_alloc_item(struct hns_roce_dev *hr_dev, int start, int end, int count,
}
}
+ hem->exist_bt = exist_bt;
hem->count = count;
hem->start = start;
hem->end = end;
@@ -969,22 +971,22 @@ hem_list_alloc_item(struct hns_roce_dev *hr_dev, int start, int end, int count,
}
static void hem_list_free_item(struct hns_roce_dev *hr_dev,
- struct hns_roce_hem_item *hem, bool exist_bt)
+ struct hns_roce_hem_item *hem)
{
- if (exist_bt)
+ if (hem->exist_bt)
dma_free_coherent(hr_dev->dev, hem->count * BA_BYTE_LEN,
hem->addr, hem->dma_addr);
kfree(hem);
}
static void hem_list_free_all(struct hns_roce_dev *hr_dev,
- struct list_head *head, bool exist_bt)
+ struct list_head *head)
{
struct hns_roce_hem_item *hem, *temp_hem;
list_for_each_entry_safe(hem, temp_hem, head, list) {
list_del(&hem->list);
- hem_list_free_item(hr_dev, hem, exist_bt);
+ hem_list_free_item(hr_dev, hem);
}
}
@@ -1084,6 +1086,10 @@ int hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region *regions,
for (i = 0; i < region_cnt; i++) {
r = (struct hns_roce_buf_region *)&regions[i];
+ /* when r->hopnum = 0, the region should not occupy root_ba. */
+ if (!r->hopnum)
+ continue;
+
if (r->hopnum > 1) {
step = hem_list_calc_ba_range(r->hopnum, 1, unit);
if (step > 0)
@@ -1177,7 +1183,7 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
err_exit:
for (level = 1; level < hopnum; level++)
- hem_list_free_all(hr_dev, &temp_list[level], true);
+ hem_list_free_all(hr_dev, &temp_list[level]);
return ret;
}
@@ -1218,16 +1224,26 @@ static int alloc_fake_root_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
{
struct hns_roce_hem_item *hem;
+ /* This is on the has_mtt branch, if r->hopnum
+ * is 0, there is no root_ba to reuse for the
+ * region's fake hem, so a dma_alloc request is
+ * necessary here.
+ */
hem = hem_list_alloc_item(hr_dev, r->offset, r->offset + r->count - 1,
- r->count, false);
+ r->count, !r->hopnum);
if (!hem)
return -ENOMEM;
- hem_list_assign_bt(hem, cpu_base, phy_base);
+ /* The root_ba can be reused only when r->hopnum > 0. */
+ if (r->hopnum)
+ hem_list_assign_bt(hem, cpu_base, phy_base);
list_add(&hem->list, branch_head);
list_add(&hem->sibling, leaf_head);
- return r->count;
+ /* If r->hopnum == 0, 0 is returned,
+ * so that the root_bt entry is not occupied.
+ */
+ return r->hopnum ? r->count : 0;
}
static int setup_middle_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
@@ -1271,7 +1287,7 @@ setup_root_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem_list *hem_list,
return -ENOMEM;
total = 0;
- for (i = 0; i < region_cnt && total < max_ba_num; i++) {
+ for (i = 0; i < region_cnt && total <= max_ba_num; i++) {
r = &regions[i];
if (!r->count)
continue;
@@ -1337,9 +1353,9 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
region_cnt);
if (ret) {
for (i = 0; i < region_cnt; i++)
- hem_list_free_all(hr_dev, &head.branch[i], false);
+ hem_list_free_all(hr_dev, &head.branch[i]);
- hem_list_free_all(hr_dev, &head.root, true);
+ hem_list_free_all(hr_dev, &head.root);
}
return ret;
@@ -1402,10 +1418,9 @@ void hns_roce_hem_list_release(struct hns_roce_dev *hr_dev,
for (i = 0; i < HNS_ROCE_MAX_BT_REGION; i++)
for (j = 0; j < HNS_ROCE_MAX_BT_LEVEL; j++)
- hem_list_free_all(hr_dev, &hem_list->mid_bt[i][j],
- j != 0);
+ hem_list_free_all(hr_dev, &hem_list->mid_bt[i][j]);
- hem_list_free_all(hr_dev, &hem_list->root_bt, true);
+ hem_list_free_all(hr_dev, &hem_list->root_bt);
INIT_LIST_HEAD(&hem_list->btm_bt);
hem_list->root_ba = 0;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 697b17cca02e..0144e7210d05 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -468,7 +468,7 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp,
valid_num_sge = calc_wr_sge_num(wr, &msg_len);
ret = set_ud_opcode(ud_sq_wqe, wr);
- if (WARN_ON(ret))
+ if (WARN_ON_ONCE(ret))
return ret;
ud_sq_wqe->msg_len = cpu_to_le32(msg_len);
@@ -572,7 +572,7 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
rc_sq_wqe->msg_len = cpu_to_le32(msg_len);
ret = set_rc_opcode(hr_dev, rc_sq_wqe, wr);
- if (WARN_ON(ret))
+ if (WARN_ON_ONCE(ret))
return ret;
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SO,
@@ -670,6 +670,10 @@ static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
#define HNS_ROCE_SL_SHIFT 2
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
+ if (unlikely(qp->state == IB_QPS_ERR)) {
+ flush_cqe(hr_dev, qp);
+ return;
+ }
/* All kinds of DirectWQE have the same header field layout */
hr_reg_enable(rc_sq_wqe, RC_SEND_WQE_FLAG);
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_DB_SL_L, qp->sl);
@@ -5619,6 +5623,9 @@ static void put_dip_ctx_idx(struct hns_roce_dev *hr_dev,
{
struct hns_roce_dip *hr_dip = hr_qp->dip;
+ if (!hr_dip)
+ return;
+
xa_lock(&hr_dev->qp_table.dip_xa);
hr_dip->qp_cnt--;
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index bf30b3a65a9b..55b9283bfc6f 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -814,11 +814,6 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
for (i = 0, mapped_cnt = 0; i < mtr->hem_cfg.region_count &&
mapped_cnt < page_cnt; i++) {
r = &mtr->hem_cfg.region[i];
- /* if hopnum is 0, no need to map pages in this region */
- if (!r->hopnum) {
- mapped_cnt += r->count;
- continue;
- }
if (r->offset + r->count > page_cnt) {
ret = -EINVAL;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index bc7930d0c564..f5b59d02f4d3 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -2839,7 +2839,7 @@ static int mlx5_ib_get_plane_num(struct mlx5_core_dev *mdev, u8 *num_plane)
int err;
*num_plane = 0;
- if (!MLX5_CAP_GEN(mdev, ib_virt))
+ if (!MLX5_CAP_GEN(mdev, ib_virt) || !MLX5_CAP_GEN_2(mdev, multiplane))
return 0;
err = mlx5_query_hca_vport_context(mdev, 0, 1, 0, &vport_ctx);
@@ -3639,7 +3639,8 @@ static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
list_for_each_entry(mpi, &mlx5_ib_unaffiliated_port_list,
list) {
if (dev->sys_image_guid == mpi->sys_image_guid &&
- (mlx5_core_native_port_num(mpi->mdev) - 1) == i) {
+ (mlx5_core_native_port_num(mpi->mdev) - 1) == i &&
+ mlx5_core_same_coredev_type(dev->mdev, mpi->mdev)) {
bound = mlx5_ib_bind_slave_port(dev, mpi);
}
@@ -4785,7 +4786,8 @@ static int mlx5r_mp_probe(struct auxiliary_device *adev,
mutex_lock(&mlx5_ib_multiport_mutex);
list_for_each_entry(dev, &mlx5_ib_dev_list, ib_dev_list) {
- if (dev->sys_image_guid == mpi->sys_image_guid)
+ if (dev->sys_image_guid == mpi->sys_image_guid &&
+ mlx5_core_same_coredev_type(dev->mdev, mpi->mdev))
bound = mlx5_ib_bind_slave_port(dev, mpi);
if (bound) {
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 255677bc12b2..1ba4a0c8726a 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -40,6 +40,8 @@ void rxe_dealloc(struct ib_device *ib_dev)
/* initialize rxe device parameters */
static void rxe_init_device_param(struct rxe_dev *rxe)
{
+ struct net_device *ndev;
+
rxe->max_inline_data = RXE_MAX_INLINE_DATA;
rxe->attr.vendor_id = RXE_VENDOR_ID;
@@ -71,8 +73,15 @@ static void rxe_init_device_param(struct rxe_dev *rxe)
rxe->attr.max_fast_reg_page_list_len = RXE_MAX_FMR_PAGE_LIST_LEN;
rxe->attr.max_pkeys = RXE_MAX_PKEYS;
rxe->attr.local_ca_ack_delay = RXE_LOCAL_CA_ACK_DELAY;
+
+ ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
+ if (!ndev)
+ return;
+
addrconf_addr_eui48((unsigned char *)&rxe->attr.sys_image_guid,
- rxe->ndev->dev_addr);
+ ndev->dev_addr);
+
+ dev_put(ndev);
rxe->max_ucontext = RXE_MAX_UCONTEXT;
}
@@ -109,10 +118,15 @@ static void rxe_init_port_param(struct rxe_port *port)
static void rxe_init_ports(struct rxe_dev *rxe)
{
struct rxe_port *port = &rxe->port;
+ struct net_device *ndev;
rxe_init_port_param(port);
+ ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
+ if (!ndev)
+ return;
addrconf_addr_eui48((unsigned char *)&port->port_guid,
- rxe->ndev->dev_addr);
+ ndev->dev_addr);
+ dev_put(ndev);
spin_lock_init(&port->port_lock);
}
@@ -167,12 +181,13 @@ void rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu)
/* called by ifc layer to create new rxe device.
* The caller should allocate memory for rxe by calling ib_alloc_device.
*/
-int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name)
+int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name,
+ struct net_device *ndev)
{
rxe_init(rxe);
rxe_set_mtu(rxe, mtu);
- return rxe_register_device(rxe, ibdev_name);
+ return rxe_register_device(rxe, ibdev_name, ndev);
}
static int rxe_newlink(const char *ibdev_name, struct net_device *ndev)
diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
index d8fb2c7af30a..fe7f97066732 100644
--- a/drivers/infiniband/sw/rxe/rxe.h
+++ b/drivers/infiniband/sw/rxe/rxe.h
@@ -139,7 +139,8 @@ enum resp_states {
void rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu);
-int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name);
+int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name,
+ struct net_device *ndev);
void rxe_rcv(struct sk_buff *skb);
diff --git a/drivers/infiniband/sw/rxe/rxe_mcast.c b/drivers/infiniband/sw/rxe/rxe_mcast.c
index 86cc2e18a7fd..07ff47bae31d 100644
--- a/drivers/infiniband/sw/rxe/rxe_mcast.c
+++ b/drivers/infiniband/sw/rxe/rxe_mcast.c
@@ -31,10 +31,19 @@
static int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
{
unsigned char ll_addr[ETH_ALEN];
+ struct net_device *ndev;
+ int ret;
+
+ ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
+ if (!ndev)
+ return -ENODEV;
ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
- return dev_mc_add(rxe->ndev, ll_addr);
+ ret = dev_mc_add(ndev, ll_addr);
+ dev_put(ndev);
+
+ return ret;
}
/**
@@ -47,10 +56,19 @@ static int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
static int rxe_mcast_del(struct rxe_dev *rxe, union ib_gid *mgid)
{
unsigned char ll_addr[ETH_ALEN];
+ struct net_device *ndev;
+ int ret;
+
+ ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
+ if (!ndev)
+ return -ENODEV;
ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
- return dev_mc_del(rxe->ndev, ll_addr);
+ ret = dev_mc_del(ndev, ll_addr);
+ dev_put(ndev);
+
+ return ret;
}
/**
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 75d1407db52d..8cc64ceeb356 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -524,7 +524,16 @@ out:
*/
const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num)
{
- return rxe->ndev->name;
+ struct net_device *ndev;
+ char *ndev_name;
+
+ ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
+ if (!ndev)
+ return NULL;
+ ndev_name = ndev->name;
+ dev_put(ndev);
+
+ return ndev_name;
}
int rxe_net_add(const char *ibdev_name, struct net_device *ndev)
@@ -536,10 +545,9 @@ int rxe_net_add(const char *ibdev_name, struct net_device *ndev)
if (!rxe)
return -ENOMEM;
- rxe->ndev = ndev;
ib_mark_name_assigned_by_user(&rxe->ib_dev);
- err = rxe_add(rxe, ndev->mtu, ibdev_name);
+ err = rxe_add(rxe, ndev->mtu, ibdev_name, ndev);
if (err) {
ib_dealloc_device(&rxe->ib_dev);
return err;
@@ -587,10 +595,18 @@ void rxe_port_down(struct rxe_dev *rxe)
void rxe_set_port_state(struct rxe_dev *rxe)
{
- if (netif_running(rxe->ndev) && netif_carrier_ok(rxe->ndev))
+ struct net_device *ndev;
+
+ ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
+ if (!ndev)
+ return;
+
+ if (netif_running(ndev) && netif_carrier_ok(ndev))
rxe_port_up(rxe);
else
rxe_port_down(rxe);
+
+ dev_put(ndev);
}
static int rxe_notify(struct notifier_block *not_blk,
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 5c18f7e342f2..8a5fc20fd186 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -41,6 +41,7 @@ static int rxe_query_port(struct ib_device *ibdev,
u32 port_num, struct ib_port_attr *attr)
{
struct rxe_dev *rxe = to_rdev(ibdev);
+ struct net_device *ndev;
int err, ret;
if (port_num != 1) {
@@ -49,6 +50,12 @@ static int rxe_query_port(struct ib_device *ibdev,
goto err_out;
}
+ ndev = rxe_ib_device_get_netdev(ibdev);
+ if (!ndev) {
+ err = -ENODEV;
+ goto err_out;
+ }
+
memcpy(attr, &rxe->port.attr, sizeof(*attr));
mutex_lock(&rxe->usdev_lock);
@@ -57,13 +64,14 @@ static int rxe_query_port(struct ib_device *ibdev,
if (attr->state == IB_PORT_ACTIVE)
attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
- else if (dev_get_flags(rxe->ndev) & IFF_UP)
+ else if (dev_get_flags(ndev) & IFF_UP)
attr->phys_state = IB_PORT_PHYS_STATE_POLLING;
else
attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
mutex_unlock(&rxe->usdev_lock);
+ dev_put(ndev);
return ret;
err_out:
@@ -1425,9 +1433,16 @@ static const struct attribute_group rxe_attr_group = {
static int rxe_enable_driver(struct ib_device *ib_dev)
{
struct rxe_dev *rxe = container_of(ib_dev, struct rxe_dev, ib_dev);
+ struct net_device *ndev;
+
+ ndev = rxe_ib_device_get_netdev(ib_dev);
+ if (!ndev)
+ return -ENODEV;
rxe_set_port_state(rxe);
- dev_info(&rxe->ib_dev.dev, "added %s\n", netdev_name(rxe->ndev));
+ dev_info(&rxe->ib_dev.dev, "added %s\n", netdev_name(ndev));
+
+ dev_put(ndev);
return 0;
}
@@ -1495,7 +1510,8 @@ static const struct ib_device_ops rxe_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_mw, rxe_mw, ibmw),
};
-int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
+int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name,
+ struct net_device *ndev)
{
int err;
struct ib_device *dev = &rxe->ib_dev;
@@ -1507,13 +1523,13 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
dev->num_comp_vectors = num_possible_cpus();
dev->local_dma_lkey = 0;
addrconf_addr_eui48((unsigned char *)&dev->node_guid,
- rxe->ndev->dev_addr);
+ ndev->dev_addr);
dev->uverbs_cmd_mask |= BIT_ULL(IB_USER_VERBS_CMD_POST_SEND) |
BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ);
ib_set_device_ops(dev, &rxe_dev_ops);
- err = ib_device_set_netdev(&rxe->ib_dev, rxe->ndev, 1);
+ err = ib_device_set_netdev(&rxe->ib_dev, ndev, 1);
if (err)
return err;
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index 3c1354f82283..6573ceec0ef5 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -370,6 +370,7 @@ struct rxe_port {
u32 qp_gsi_index;
};
+#define RXE_PORT 1
struct rxe_dev {
struct ib_device ib_dev;
struct ib_device_attr attr;
@@ -377,8 +378,6 @@ struct rxe_dev {
int max_inline_data;
struct mutex usdev_lock;
- struct net_device *ndev;
-
struct rxe_pool uc_pool;
struct rxe_pool pd_pool;
struct rxe_pool ah_pool;
@@ -406,6 +405,11 @@ struct rxe_dev {
struct crypto_shash *tfm;
};
+static inline struct net_device *rxe_ib_device_get_netdev(struct ib_device *dev)
+{
+ return ib_device_get_netdev(dev, RXE_PORT);
+}
+
static inline void rxe_counter_inc(struct rxe_dev *rxe, enum rxe_counters index)
{
atomic64_inc(&rxe->stats_counters[index]);
@@ -471,6 +475,7 @@ static inline struct rxe_pd *rxe_mw_pd(struct rxe_mw *mw)
return to_rpd(mw->ibmw.pd);
}
-int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name);
+int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name,
+ struct net_device *ndev);
#endif /* RXE_VERBS_H */
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
index 86d4d6a2170e..ea5eee50dc39 100644
--- a/drivers/infiniband/sw/siw/siw.h
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -46,6 +46,9 @@
*/
#define SIW_IRQ_MAXBURST_SQ_ACTIVE 4
+/* There is always only a port 1 per siw device */
+#define SIW_PORT 1
+
struct siw_dev_cap {
int max_qp;
int max_qp_wr;
@@ -69,16 +72,12 @@ struct siw_pd {
struct siw_device {
struct ib_device base_dev;
- struct net_device *netdev;
struct siw_dev_cap attrs;
u32 vendor_part_id;
int numa_node;
char raw_gid[ETH_ALEN];
- /* physical port state (only one port per device) */
- enum ib_port_state state;
-
spinlock_t lock;
struct xarray qp_xa;
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
index 86323918a570..708b13993fdf 100644
--- a/drivers/infiniband/sw/siw/siw_cm.c
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -1759,6 +1759,7 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
{
struct socket *s;
struct siw_cep *cep = NULL;
+ struct net_device *ndev = NULL;
struct siw_device *sdev = to_siw_dev(id->device);
int addr_family = id->local_addr.ss_family;
int rv = 0;
@@ -1779,9 +1780,15 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
struct sockaddr_in *laddr = &to_sockaddr_in(id->local_addr);
/* For wildcard addr, limit binding to current device only */
- if (ipv4_is_zeronet(laddr->sin_addr.s_addr))
- s->sk->sk_bound_dev_if = sdev->netdev->ifindex;
-
+ if (ipv4_is_zeronet(laddr->sin_addr.s_addr)) {
+ ndev = ib_device_get_netdev(id->device, SIW_PORT);
+ if (ndev) {
+ s->sk->sk_bound_dev_if = ndev->ifindex;
+ } else {
+ rv = -ENODEV;
+ goto error;
+ }
+ }
rv = s->ops->bind(s, (struct sockaddr *)laddr,
sizeof(struct sockaddr_in));
} else {
@@ -1797,9 +1804,15 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
}
/* For wildcard addr, limit binding to current device only */
- if (ipv6_addr_any(&laddr->sin6_addr))
- s->sk->sk_bound_dev_if = sdev->netdev->ifindex;
-
+ if (ipv6_addr_any(&laddr->sin6_addr)) {
+ ndev = ib_device_get_netdev(id->device, SIW_PORT);
+ if (ndev) {
+ s->sk->sk_bound_dev_if = ndev->ifindex;
+ } else {
+ rv = -ENODEV;
+ goto error;
+ }
+ }
rv = s->ops->bind(s, (struct sockaddr *)laddr,
sizeof(struct sockaddr_in6));
}
@@ -1860,6 +1873,7 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
}
list_add_tail(&cep->listenq, (struct list_head *)id->provider_data);
cep->state = SIW_EPSTATE_LISTENING;
+ dev_put(ndev);
siw_dbg(id->device, "Listen at laddr %pISp\n", &id->local_addr);
@@ -1879,6 +1893,7 @@ error:
siw_cep_set_free_and_put(cep);
}
sock_release(s);
+ dev_put(ndev);
return rv;
}
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index 17abef48abcd..14d3103aee6f 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -287,7 +287,6 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
return NULL;
base_dev = &sdev->base_dev;
- sdev->netdev = netdev;
if (netdev->addr_len) {
memcpy(sdev->raw_gid, netdev->dev_addr,
@@ -381,12 +380,10 @@ static int siw_netdev_event(struct notifier_block *nb, unsigned long event,
switch (event) {
case NETDEV_UP:
- sdev->state = IB_PORT_ACTIVE;
siw_port_event(sdev, 1, IB_EVENT_PORT_ACTIVE);
break;
case NETDEV_DOWN:
- sdev->state = IB_PORT_DOWN;
siw_port_event(sdev, 1, IB_EVENT_PORT_ERR);
break;
@@ -407,12 +404,8 @@ static int siw_netdev_event(struct notifier_block *nb, unsigned long event,
siw_port_event(sdev, 1, IB_EVENT_LID_CHANGE);
break;
/*
- * Todo: Below netdev events are currently not handled.
+ * All other events are not handled
*/
- case NETDEV_CHANGEMTU:
- case NETDEV_CHANGE:
- break;
-
default:
break;
}
@@ -442,12 +435,6 @@ static int siw_newlink(const char *basedev_name, struct net_device *netdev)
sdev = siw_device_create(netdev);
if (sdev) {
dev_dbg(&netdev->dev, "siw: new device\n");
-
- if (netif_running(netdev) && netif_carrier_ok(netdev))
- sdev->state = IB_PORT_ACTIVE;
- else
- sdev->state = IB_PORT_DOWN;
-
ib_mark_name_assigned_by_user(&sdev->base_dev);
rv = siw_device_register(sdev, basedev_name);
if (rv)
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index 986666c19378..7ca0297d68a4 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -171,21 +171,29 @@ int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr,
int siw_query_port(struct ib_device *base_dev, u32 port,
struct ib_port_attr *attr)
{
- struct siw_device *sdev = to_siw_dev(base_dev);
+ struct net_device *ndev;
int rv;
memset(attr, 0, sizeof(*attr));
rv = ib_get_eth_speed(base_dev, port, &attr->active_speed,
&attr->active_width);
+ if (rv)
+ return rv;
+
+ ndev = ib_device_get_netdev(base_dev, SIW_PORT);
+ if (!ndev)
+ return -ENODEV;
+
attr->gid_tbl_len = 1;
attr->max_msg_sz = -1;
- attr->max_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
- attr->active_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
- attr->phys_state = sdev->state == IB_PORT_ACTIVE ?
+ attr->max_mtu = ib_mtu_int_to_enum(ndev->max_mtu);
+ attr->active_mtu = ib_mtu_int_to_enum(READ_ONCE(ndev->mtu));
+ attr->phys_state = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED;
+ attr->state = attr->phys_state == IB_PORT_PHYS_STATE_LINK_UP ?
+ IB_PORT_ACTIVE : IB_PORT_DOWN;
attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP;
- attr->state = sdev->state;
/*
* All zero
*
@@ -199,6 +207,7 @@ int siw_query_port(struct ib_device *base_dev, u32 port,
* attr->subnet_timeout = 0;
* attr->init_type_repy = 0;
*/
+ dev_put(ndev);
return rv;
}
@@ -505,21 +514,24 @@ int siw_query_qp(struct ib_qp *base_qp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
{
struct siw_qp *qp;
- struct siw_device *sdev;
+ struct net_device *ndev;
- if (base_qp && qp_attr && qp_init_attr) {
+ if (base_qp && qp_attr && qp_init_attr)
qp = to_siw_qp(base_qp);
- sdev = to_siw_dev(base_qp->device);
- } else {
+ else
return -EINVAL;
- }
+
+ ndev = ib_device_get_netdev(base_qp->device, SIW_PORT);
+ if (!ndev)
+ return -ENODEV;
+
qp_attr->qp_state = siw_qp_state_to_ib_qp_state[qp->attrs.state];
qp_attr->cap.max_inline_data = SIW_MAX_INLINE;
qp_attr->cap.max_send_wr = qp->attrs.sq_size;
qp_attr->cap.max_send_sge = qp->attrs.sq_max_sges;
qp_attr->cap.max_recv_wr = qp->attrs.rq_size;
qp_attr->cap.max_recv_sge = qp->attrs.rq_max_sges;
- qp_attr->path_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
+ qp_attr->path_mtu = ib_mtu_int_to_enum(READ_ONCE(ndev->mtu));
qp_attr->max_rd_atomic = qp->attrs.irq_size;
qp_attr->max_dest_rd_atomic = qp->attrs.orq_size;
@@ -534,6 +546,7 @@ int siw_query_qp(struct ib_qp *base_qp, struct ib_qp_attr *qp_attr,
qp_init_attr->cap = qp_attr->cap;
+ dev_put(ndev);
return 0;
}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
index e83d95647852..ef4abdea3c2d 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
@@ -349,6 +349,7 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
struct rtrs_srv_mr *srv_mr;
bool need_inval = false;
enum ib_send_flags flags;
+ struct ib_sge list;
u32 imm;
int err;
@@ -401,7 +402,6 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
imm = rtrs_to_io_rsp_imm(id->msg_id, errno, need_inval);
imm_wr.wr.next = NULL;
if (always_invalidate) {
- struct ib_sge list;
struct rtrs_msg_rkey_rsp *msg;
srv_mr = &srv_path->mrs[id->msg_id];
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index fb38f684444f..d00e713c1092 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -120,6 +120,7 @@ config PMAC_MEDIABAY
config PMAC_BACKLIGHT
bool "Backlight control for LCD screens"
depends on PPC_PMAC && ADB_PMU && FB = y && (BROKEN || !PPC64)
+ depends on BACKLIGHT_CLASS_DEVICE=y
select FB_BACKLIGHT
help
Say Y here to enable Macintosh specific extensions of the generic
diff --git a/drivers/media/dvb-frontends/dib3000mb.c b/drivers/media/dvb-frontends/dib3000mb.c
index 822639f11c04..63bc7b74bc8b 100644
--- a/drivers/media/dvb-frontends/dib3000mb.c
+++ b/drivers/media/dvb-frontends/dib3000mb.c
@@ -51,7 +51,7 @@ MODULE_PARM_DESC(debug, "set debugging level (1=info,2=xfer,4=setfe,8=getfe (|-a
static int dib3000_read_reg(struct dib3000_state *state, u16 reg)
{
u8 wb[] = { ((reg >> 8) | 0x80) & 0xff, reg & 0xff };
- u8 rb[2];
+ u8 rb[2] = {};
struct i2c_msg msg[] = {
{ .addr = state->config.demod_address, .flags = 0, .buf = wb, .len = 2 },
{ .addr = state->config.demod_address, .flags = I2C_M_RD, .buf = rb, .len = 2 },
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c
index eea709d93820..47c302745c1d 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c
@@ -1188,7 +1188,8 @@ err:
return ret;
}
-static
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
void vdec_vp9_slice_map_counts_eob_coef(unsigned int i, unsigned int j, unsigned int k,
struct vdec_vp9_slice_frame_counts *counts,
struct v4l2_vp9_frame_symbol_counts *counts_helper)
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index efb0d2d5716b..af445d3f8e2a 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -3070,6 +3070,7 @@ release_clk:
msdc_gate_clock(host);
platform_set_drvdata(pdev, NULL);
release_mem:
+ device_init_wakeup(&pdev->dev, false);
if (host->dma.gpd)
dma_free_coherent(&pdev->dev,
2 * sizeof(struct mt_gpdma_desc),
@@ -3103,6 +3104,7 @@ static void msdc_drv_remove(struct platform_device *pdev)
host->dma.gpd, host->dma.gpd_addr);
dma_free_coherent(&pdev->dev, MAX_BD_NUM * sizeof(struct mt_bdma_desc),
host->dma.bd, host->dma.bd_addr);
+ device_init_wakeup(&pdev->dev, false);
}
static void msdc_save_reg(struct msdc_host *host)
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index e00208535bd1..319f0ebbe652 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -1867,20 +1867,20 @@ static int sdhci_msm_program_key(struct cqhci_host *cq_host,
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
union cqhci_crypto_cap_entry cap;
+ if (!(cfg->config_enable & CQHCI_CRYPTO_CONFIGURATION_ENABLE))
+ return qcom_ice_evict_key(msm_host->ice, slot);
+
/* Only AES-256-XTS has been tested so far. */
cap = cq_host->crypto_cap_array[cfg->crypto_cap_idx];
if (cap.algorithm_id != CQHCI_CRYPTO_ALG_AES_XTS ||
cap.key_size != CQHCI_CRYPTO_KEY_SIZE_256)
return -EINVAL;
- if (cfg->config_enable & CQHCI_CRYPTO_CONFIGURATION_ENABLE)
- return qcom_ice_program_key(msm_host->ice,
- QCOM_ICE_CRYPTO_ALG_AES_XTS,
- QCOM_ICE_CRYPTO_KEY_SIZE_256,
- cfg->crypto_key,
- cfg->data_unit_size, slot);
- else
- return qcom_ice_evict_key(msm_host->ice, slot);
+ return qcom_ice_program_key(msm_host->ice,
+ QCOM_ICE_CRYPTO_ALG_AES_XTS,
+ QCOM_ICE_CRYPTO_KEY_SIZE_256,
+ cfg->crypto_key,
+ cfg->data_unit_size, slot);
}
#else /* CONFIG_MMC_CRYPTO */
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 4d402b601883..b2f5c3f8b839 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -1525,7 +1525,6 @@ static const struct sdhci_pltfm_data sdhci_tegra186_pdata = {
.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
SDHCI_QUIRK_SINGLE_POWER_WRITE |
SDHCI_QUIRK_NO_HISPD_BIT |
- SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
SDHCI_QUIRK2_ISSUE_CMD_DAT_RESET_TOGETHER,
diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c
index db42aa0c7b6b..865754737f5f 100644
--- a/drivers/mtd/nand/raw/arasan-nand-controller.c
+++ b/drivers/mtd/nand/raw/arasan-nand-controller.c
@@ -1409,8 +1409,8 @@ static int anfc_parse_cs(struct arasan_nfc *nfc)
* case, the "not" chosen CS is assigned to nfc->spare_cs and selected
* whenever a GPIO CS must be asserted.
*/
- if (nfc->cs_array && nfc->ncs > 2) {
- if (!nfc->cs_array[0] && !nfc->cs_array[1]) {
+ if (nfc->cs_array) {
+ if (nfc->ncs > 2 && !nfc->cs_array[0] && !nfc->cs_array[1]) {
dev_err(nfc->dev,
"Assign a single native CS when using GPIOs\n");
return -EINVAL;
@@ -1478,8 +1478,15 @@ static int anfc_probe(struct platform_device *pdev)
static void anfc_remove(struct platform_device *pdev)
{
+ int i;
struct arasan_nfc *nfc = platform_get_drvdata(pdev);
+ for (i = 0; i < nfc->ncs; i++) {
+ if (nfc->cs_array[i]) {
+ gpiod_put(nfc->cs_array[i]);
+ }
+ }
+
anfc_chips_cleanup(nfc);
}
diff --git a/drivers/mtd/nand/raw/atmel/pmecc.c b/drivers/mtd/nand/raw/atmel/pmecc.c
index a22aab4ed4e8..3c7dee1be21d 100644
--- a/drivers/mtd/nand/raw/atmel/pmecc.c
+++ b/drivers/mtd/nand/raw/atmel/pmecc.c
@@ -380,10 +380,8 @@ atmel_pmecc_create_user(struct atmel_pmecc *pmecc,
user->delta = user->dmu + req->ecc.strength + 1;
gf_tables = atmel_pmecc_get_gf_tables(req);
- if (IS_ERR(gf_tables)) {
- kfree(user);
+ if (IS_ERR(gf_tables))
return ERR_CAST(gf_tables);
- }
user->gf_tables = gf_tables;
diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c
index 8db7fc424571..70d6c2250f32 100644
--- a/drivers/mtd/nand/raw/diskonchip.c
+++ b/drivers/mtd/nand/raw/diskonchip.c
@@ -1098,7 +1098,7 @@ static inline int __init inftl_partscan(struct mtd_info *mtd, struct mtd_partiti
(i == 0) && (ip->firstUnit > 0)) {
parts[0].name = " DiskOnChip IPL / Media Header partition";
parts[0].offset = 0;
- parts[0].size = mtd->erasesize * ip->firstUnit;
+ parts[0].size = (uint64_t)mtd->erasesize * ip->firstUnit;
numparts = 1;
}
diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
index d9141f3c0dd1..b8af3a3533fc 100644
--- a/drivers/mtd/nand/raw/omap2.c
+++ b/drivers/mtd/nand/raw/omap2.c
@@ -254,6 +254,10 @@ static int omap_prefetch_reset(int cs, struct omap_nand_info *info)
/**
* omap_nand_data_in_pref - NAND data in using prefetch engine
+ * @chip: NAND chip
+ * @buf: output buffer where NAND data is placed into
+ * @len: length of transfer
+ * @force_8bit: force 8-bit transfers
*/
static void omap_nand_data_in_pref(struct nand_chip *chip, void *buf,
unsigned int len, bool force_8bit)
@@ -297,6 +301,10 @@ static void omap_nand_data_in_pref(struct nand_chip *chip, void *buf,
/**
* omap_nand_data_out_pref - NAND data out using Write Posting engine
+ * @chip: NAND chip
+ * @buf: input buffer that is sent to NAND
+ * @len: length of transfer
+ * @force_8bit: force 8-bit transfers
*/
static void omap_nand_data_out_pref(struct nand_chip *chip,
const void *buf, unsigned int len,
@@ -440,6 +448,10 @@ out_copy:
/**
* omap_nand_data_in_dma_pref - NAND data in using DMA and Prefetch
+ * @chip: NAND chip
+ * @buf: output buffer where NAND data is placed into
+ * @len: length of transfer
+ * @force_8bit: force 8-bit transfers
*/
static void omap_nand_data_in_dma_pref(struct nand_chip *chip, void *buf,
unsigned int len, bool force_8bit)
@@ -460,6 +472,10 @@ static void omap_nand_data_in_dma_pref(struct nand_chip *chip, void *buf,
/**
* omap_nand_data_out_dma_pref - NAND data out using DMA and write posting
+ * @chip: NAND chip
+ * @buf: input buffer that is sent to NAND
+ * @len: length of transfer
+ * @force_8bit: force 8-bit transfers
*/
static void omap_nand_data_out_dma_pref(struct nand_chip *chip,
const void *buf, unsigned int len,
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 533bcb77c9f9..97cd8bbf2e32 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -1220,20 +1220,32 @@ static void m_can_coalescing_update(struct m_can_classdev *cdev, u32 ir)
static int m_can_interrupt_handler(struct m_can_classdev *cdev)
{
struct net_device *dev = cdev->net;
- u32 ir;
+ u32 ir = 0, ir_read;
int ret;
if (pm_runtime_suspended(cdev->dev))
return IRQ_NONE;
- ir = m_can_read(cdev, M_CAN_IR);
+ /* The m_can controller signals its interrupt status as a level, but
+ * depending in the integration the CPU may interpret the signal as
+ * edge-triggered (for example with m_can_pci). For these
+ * edge-triggered integrations, we must observe that IR is 0 at least
+ * once to be sure that the next interrupt will generate an edge.
+ */
+ while ((ir_read = m_can_read(cdev, M_CAN_IR)) != 0) {
+ ir |= ir_read;
+
+ /* ACK all irqs */
+ m_can_write(cdev, M_CAN_IR, ir);
+
+ if (!cdev->irq_edge_triggered)
+ break;
+ }
+
m_can_coalescing_update(cdev, ir);
if (!ir)
return IRQ_NONE;
- /* ACK all irqs */
- m_can_write(cdev, M_CAN_IR, ir);
-
if (cdev->ops->clear_interrupts)
cdev->ops->clear_interrupts(cdev);
@@ -1695,6 +1707,14 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
return -EINVAL;
}
+ /* Write the INIT bit, in case no hardware reset has happened before
+ * the probe (for example, it was observed that the Intel Elkhart Lake
+ * SoCs do not properly reset the CAN controllers on reboot)
+ */
+ err = m_can_cccr_update_bits(cdev, CCCR_INIT, CCCR_INIT);
+ if (err)
+ return err;
+
if (!cdev->is_peripheral)
netif_napi_add(dev, &cdev->napi, m_can_poll);
@@ -1746,11 +1766,7 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
return -EINVAL;
}
- /* Forcing standby mode should be redundant, as the chip should be in
- * standby after a reset. Write the INIT bit anyways, should the chip
- * be configured by previous stage.
- */
- return m_can_cccr_update_bits(cdev, CCCR_INIT, CCCR_INIT);
+ return 0;
}
static void m_can_stop(struct net_device *dev)
diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h
index 92b2bd8628e6..ef39e8e527ab 100644
--- a/drivers/net/can/m_can/m_can.h
+++ b/drivers/net/can/m_can/m_can.h
@@ -99,6 +99,7 @@ struct m_can_classdev {
int pm_clock_support;
int pm_wake_source;
int is_peripheral;
+ bool irq_edge_triggered;
// Cached M_CAN_IE register content
u32 active_interrupts;
diff --git a/drivers/net/can/m_can/m_can_pci.c b/drivers/net/can/m_can/m_can_pci.c
index d72fe771dfc7..9ad7419f88f8 100644
--- a/drivers/net/can/m_can/m_can_pci.c
+++ b/drivers/net/can/m_can/m_can_pci.c
@@ -127,6 +127,7 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
mcan_class->pm_clock_support = 1;
mcan_class->pm_wake_source = 0;
mcan_class->can.clock.freq = id->driver_data;
+ mcan_class->irq_edge_triggered = true;
mcan_class->ops = &m_can_pci_ops;
pci_set_drvdata(pci, mcan_class);
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index d16817e0476f..29fe79ea74cd 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -2,7 +2,7 @@
/*
* Microchip KSZ9477 switch driver main logic
*
- * Copyright (C) 2017-2019 Microchip Technology Inc.
+ * Copyright (C) 2017-2024 Microchip Technology Inc.
*/
#include <linux/kernel.h>
@@ -983,26 +983,51 @@ void ksz9477_get_caps(struct ksz_device *dev, int port,
int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
{
u32 secs = msecs / 1000;
- u8 value;
- u8 data;
+ u8 data, mult, value;
+ u32 max_val;
int ret;
- value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
+#define MAX_TIMER_VAL ((1 << 8) - 1)
- ret = ksz_write8(dev, REG_SW_LUE_CTRL_3, value);
- if (ret < 0)
- return ret;
+ /* The aging timer comprises a 3-bit multiplier and an 8-bit second
+ * value. Either of them cannot be zero. The maximum timer is then
+ * 7 * 255 = 1785 seconds.
+ */
+ if (!secs)
+ secs = 1;
- data = FIELD_GET(SW_AGE_PERIOD_10_8_M, secs);
+ /* Return error if too large. */
+ else if (secs > 7 * MAX_TIMER_VAL)
+ return -EINVAL;
ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value);
if (ret < 0)
return ret;
- value &= ~SW_AGE_CNT_M;
- value |= FIELD_PREP(SW_AGE_CNT_M, data);
+ /* Check whether there is need to update the multiplier. */
+ mult = FIELD_GET(SW_AGE_CNT_M, value);
+ max_val = MAX_TIMER_VAL;
+ if (mult > 0) {
+ /* Try to use the same multiplier already in the register as
+ * the hardware default uses multiplier 4 and 75 seconds for
+ * 300 seconds.
+ */
+ max_val = DIV_ROUND_UP(secs, mult);
+ if (max_val > MAX_TIMER_VAL || max_val * mult != secs)
+ max_val = MAX_TIMER_VAL;
+ }
+
+ data = DIV_ROUND_UP(secs, max_val);
+ if (mult != data) {
+ value &= ~SW_AGE_CNT_M;
+ value |= FIELD_PREP(SW_AGE_CNT_M, data);
+ ret = ksz_write8(dev, REG_SW_LUE_CTRL_0, value);
+ if (ret < 0)
+ return ret;
+ }
- return ksz_write8(dev, REG_SW_LUE_CTRL_0, value);
+ value = DIV_ROUND_UP(secs, data);
+ return ksz_write8(dev, REG_SW_LUE_CTRL_3, value);
}
void ksz9477_port_queue_split(struct ksz_device *dev, int port)
diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h
index 04235c22bf40..ff579920078e 100644
--- a/drivers/net/dsa/microchip/ksz9477_reg.h
+++ b/drivers/net/dsa/microchip/ksz9477_reg.h
@@ -2,7 +2,7 @@
/*
* Microchip KSZ9477 register definitions
*
- * Copyright (C) 2017-2018 Microchip Technology Inc.
+ * Copyright (C) 2017-2024 Microchip Technology Inc.
*/
#ifndef __KSZ9477_REGS_H
@@ -165,8 +165,6 @@
#define SW_VLAN_ENABLE BIT(7)
#define SW_DROP_INVALID_VID BIT(6)
#define SW_AGE_CNT_M GENMASK(5, 3)
-#define SW_AGE_CNT_S 3
-#define SW_AGE_PERIOD_10_8_M GENMASK(10, 8)
#define SW_RESV_MCAST_ENABLE BIT(2)
#define SW_HASH_OPTION_M 0x03
#define SW_HASH_OPTION_CRC 1
diff --git a/drivers/net/dsa/microchip/lan937x_main.c b/drivers/net/dsa/microchip/lan937x_main.c
index b7652efd632e..b1ae3b9de3d1 100644
--- a/drivers/net/dsa/microchip/lan937x_main.c
+++ b/drivers/net/dsa/microchip/lan937x_main.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Microchip LAN937X switch driver main logic
- * Copyright (C) 2019-2022 Microchip Technology Inc.
+ * Copyright (C) 2019-2024 Microchip Technology Inc.
*/
#include <linux/kernel.h>
#include <linux/module.h>
@@ -461,10 +461,66 @@ int lan937x_change_mtu(struct ksz_device *dev, int port, int new_mtu)
int lan937x_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
{
- u32 secs = msecs / 1000;
- u32 value;
+ u8 data, mult, value8;
+ bool in_msec = false;
+ u32 max_val, value;
+ u32 secs = msecs;
int ret;
+#define MAX_TIMER_VAL ((1 << 20) - 1)
+
+ /* The aging timer comprises a 3-bit multiplier and a 20-bit second
+ * value. Either of them cannot be zero. The maximum timer is then
+ * 7 * 1048575 = 7340025 seconds. As this value is too large for
+ * practical use it can be interpreted as microseconds, making the
+ * maximum timer 7340 seconds with finer control. This allows for
+ * maximum 122 minutes compared to 29 minutes in KSZ9477 switch.
+ */
+ if (msecs % 1000)
+ in_msec = true;
+ else
+ secs /= 1000;
+ if (!secs)
+ secs = 1;
+
+ /* Return error if too large. */
+ else if (secs > 7 * MAX_TIMER_VAL)
+ return -EINVAL;
+
+ /* Configure how to interpret the number value. */
+ ret = ksz_rmw8(dev, REG_SW_LUE_CTRL_2, SW_AGE_CNT_IN_MICROSEC,
+ in_msec ? SW_AGE_CNT_IN_MICROSEC : 0);
+ if (ret < 0)
+ return ret;
+
+ ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value8);
+ if (ret < 0)
+ return ret;
+
+ /* Check whether there is need to update the multiplier. */
+ mult = FIELD_GET(SW_AGE_CNT_M, value8);
+ max_val = MAX_TIMER_VAL;
+ if (mult > 0) {
+ /* Try to use the same multiplier already in the register as
+ * the hardware default uses multiplier 4 and 75 seconds for
+ * 300 seconds.
+ */
+ max_val = DIV_ROUND_UP(secs, mult);
+ if (max_val > MAX_TIMER_VAL || max_val * mult != secs)
+ max_val = MAX_TIMER_VAL;
+ }
+
+ data = DIV_ROUND_UP(secs, max_val);
+ if (mult != data) {
+ value8 &= ~SW_AGE_CNT_M;
+ value8 |= FIELD_PREP(SW_AGE_CNT_M, data);
+ ret = ksz_write8(dev, REG_SW_LUE_CTRL_0, value8);
+ if (ret < 0)
+ return ret;
+ }
+
+ secs = DIV_ROUND_UP(secs, data);
+
value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
ret = ksz_write8(dev, REG_SW_AGE_PERIOD__1, value);
diff --git a/drivers/net/dsa/microchip/lan937x_reg.h b/drivers/net/dsa/microchip/lan937x_reg.h
index 4ec93e421da4..72042fd64e5b 100644
--- a/drivers/net/dsa/microchip/lan937x_reg.h
+++ b/drivers/net/dsa/microchip/lan937x_reg.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Microchip LAN937X switch register definitions
- * Copyright (C) 2019-2021 Microchip Technology Inc.
+ * Copyright (C) 2019-2024 Microchip Technology Inc.
*/
#ifndef __LAN937X_REG_H
#define __LAN937X_REG_H
@@ -56,8 +56,7 @@
#define SW_VLAN_ENABLE BIT(7)
#define SW_DROP_INVALID_VID BIT(6)
-#define SW_AGE_CNT_M 0x7
-#define SW_AGE_CNT_S 3
+#define SW_AGE_CNT_M GENMASK(5, 3)
#define SW_RESV_MCAST_ENABLE BIT(2)
#define REG_SW_LUE_CTRL_1 0x0311
@@ -70,6 +69,10 @@
#define SW_FAST_AGING BIT(1)
#define SW_LINK_AUTO_AGING BIT(0)
+#define REG_SW_LUE_CTRL_2 0x0312
+
+#define SW_AGE_CNT_IN_MICROSEC BIT(7)
+
#define REG_SW_AGE_PERIOD__1 0x0313
#define SW_AGE_PERIOD_7_0_M GENMASK(7, 0)
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 42672c63f108..bc4e1f3b3752 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1933,7 +1933,11 @@ static int bcm_sysport_open(struct net_device *dev)
unsigned int i;
int ret;
- clk_prepare_enable(priv->clk);
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ netdev_err(dev, "could not enable priv clock\n");
+ return ret;
+ }
/* Reset UniMAC */
umac_reset(priv);
@@ -2591,7 +2595,11 @@ static int bcm_sysport_probe(struct platform_device *pdev)
goto err_deregister_notifier;
}
- clk_prepare_enable(priv->clk);
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "could not enable priv clock\n");
+ goto err_deregister_netdev;
+ }
priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
dev_info(&pdev->dev,
@@ -2605,6 +2613,8 @@ static int bcm_sysport_probe(struct platform_device *pdev)
return 0;
+err_deregister_netdev:
+ unregister_netdev(dev);
err_deregister_notifier:
unregister_netdevice_notifier(&priv->netdev_notifier);
err_deregister_fixed_link:
@@ -2774,7 +2784,12 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
if (!netif_running(dev))
return 0;
- clk_prepare_enable(priv->clk);
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ netdev_err(dev, "could not enable priv clock\n");
+ return ret;
+ }
+
if (priv->wolopts)
clk_disable_unprepare(priv->wol_clk);
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index ecce23cecbea..4e266ce41180 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -171,6 +171,7 @@ static int platform_phy_connect(struct bgmac *bgmac)
static int bgmac_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
+ struct device_node *phy_node;
struct bgmac *bgmac;
struct resource *regs;
int ret;
@@ -236,7 +237,9 @@ static int bgmac_probe(struct platform_device *pdev)
bgmac->cco_ctl_maskset = platform_bgmac_cco_ctl_maskset;
bgmac->get_bus_clock = platform_bgmac_get_bus_clock;
bgmac->cmn_maskset32 = platform_bgmac_cmn_maskset32;
- if (of_parse_phandle(np, "phy-handle", 0)) {
+ phy_node = of_parse_phandle(np, "phy-handle", 0);
+ if (phy_node) {
+ of_node_put(phy_node);
bgmac->phy_connect = platform_phy_connect;
} else {
bgmac->phy_connect = bgmac_phy_connect_direct;
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c
index 96fd31d75dfd..daa1ebaef511 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c
@@ -346,8 +346,9 @@ static struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
* driver. Once driver synthesizes cpl_pass_accept_req the skb will go
* through the regular cpl_pass_accept_req processing in TOM.
*/
- skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req)
- - pktshift, GFP_ATOMIC);
+ skb = alloc_skb(size_add(gl->tot_len,
+ sizeof(struct cpl_pass_accept_req)) -
+ pktshift, GFP_ATOMIC);
if (unlikely(!skb))
return NULL;
__skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req)
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index dd92949bb214..8167cc5fb0df 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -1140,6 +1140,7 @@ int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid);
bool gve_tx_poll(struct gve_notify_block *block, int budget);
bool gve_xdp_poll(struct gve_notify_block *block, int budget);
+int gve_xsk_tx_poll(struct gve_notify_block *block, int budget);
int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg);
void gve_tx_free_rings_gqi(struct gve_priv *priv,
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index e171ca248f9a..8a8f6ab12a98 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -333,6 +333,14 @@ int gve_napi_poll(struct napi_struct *napi, int budget)
if (block->rx) {
work_done = gve_rx_poll(block, budget);
+
+ /* Poll XSK TX as part of RX NAPI. Setup re-poll based on max of
+ * TX and RX work done.
+ */
+ if (priv->xdp_prog)
+ work_done = max_t(int, work_done,
+ gve_xsk_tx_poll(block, budget));
+
reschedule |= work_done == budget;
}
@@ -922,11 +930,13 @@ static void gve_init_sync_stats(struct gve_priv *priv)
static void gve_tx_get_curr_alloc_cfg(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg)
{
+ int num_xdp_queues = priv->xdp_prog ? priv->rx_cfg.num_queues : 0;
+
cfg->qcfg = &priv->tx_cfg;
cfg->raw_addressing = !gve_is_qpl(priv);
cfg->ring_size = priv->tx_desc_cnt;
cfg->start_idx = 0;
- cfg->num_rings = gve_num_tx_queues(priv);
+ cfg->num_rings = priv->tx_cfg.num_queues + num_xdp_queues;
cfg->tx = priv->tx;
}
@@ -1623,8 +1633,8 @@ static int gve_xsk_pool_enable(struct net_device *dev,
if (err)
return err;
- /* If XDP prog is not installed, return */
- if (!priv->xdp_prog)
+ /* If XDP prog is not installed or interface is down, return. */
+ if (!priv->xdp_prog || !netif_running(dev))
return 0;
rx = &priv->rx[qid];
@@ -1669,21 +1679,16 @@ static int gve_xsk_pool_disable(struct net_device *dev,
if (qid >= priv->rx_cfg.num_queues)
return -EINVAL;
- /* If XDP prog is not installed, unmap DMA and return */
- if (!priv->xdp_prog)
- goto done;
-
- tx_qid = gve_xdp_tx_queue_id(priv, qid);
- if (!netif_running(dev)) {
- priv->rx[qid].xsk_pool = NULL;
- xdp_rxq_info_unreg(&priv->rx[qid].xsk_rxq);
- priv->tx[tx_qid].xsk_pool = NULL;
+ /* If XDP prog is not installed or interface is down, unmap DMA and
+ * return.
+ */
+ if (!priv->xdp_prog || !netif_running(dev))
goto done;
- }
napi_rx = &priv->ntfy_blocks[priv->rx[qid].ntfy_id].napi;
napi_disable(napi_rx); /* make sure current rx poll is done */
+ tx_qid = gve_xdp_tx_queue_id(priv, qid);
napi_tx = &priv->ntfy_blocks[priv->tx[tx_qid].ntfy_id].napi;
napi_disable(napi_tx); /* make sure current tx poll is done */
@@ -1709,24 +1714,20 @@ done:
static int gve_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
{
struct gve_priv *priv = netdev_priv(dev);
- int tx_queue_id = gve_xdp_tx_queue_id(priv, queue_id);
+ struct napi_struct *napi;
+
+ if (!gve_get_napi_enabled(priv))
+ return -ENETDOWN;
if (queue_id >= priv->rx_cfg.num_queues || !priv->xdp_prog)
return -EINVAL;
- if (flags & XDP_WAKEUP_TX) {
- struct gve_tx_ring *tx = &priv->tx[tx_queue_id];
- struct napi_struct *napi =
- &priv->ntfy_blocks[tx->ntfy_id].napi;
-
- if (!napi_if_scheduled_mark_missed(napi)) {
- /* Call local_bh_enable to trigger SoftIRQ processing */
- local_bh_disable();
- napi_schedule(napi);
- local_bh_enable();
- }
-
- tx->xdp_xsk_wakeup++;
+ napi = &priv->ntfy_blocks[gve_rx_idx_to_ntfy(priv, queue_id)].napi;
+ if (!napi_if_scheduled_mark_missed(napi)) {
+ /* Call local_bh_enable to trigger SoftIRQ processing */
+ local_bh_disable();
+ napi_schedule(napi);
+ local_bh_enable();
}
return 0;
@@ -1837,6 +1838,7 @@ int gve_adjust_queues(struct gve_priv *priv,
{
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
+ int num_xdp_queues;
int err;
gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
@@ -1847,6 +1849,10 @@ int gve_adjust_queues(struct gve_priv *priv,
rx_alloc_cfg.qcfg = &new_rx_config;
tx_alloc_cfg.num_rings = new_tx_config.num_queues;
+ /* Add dedicated XDP TX queues if enabled. */
+ num_xdp_queues = priv->xdp_prog ? new_rx_config.num_queues : 0;
+ tx_alloc_cfg.num_rings += num_xdp_queues;
+
if (netif_running(priv->dev)) {
err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
return err;
@@ -1899,6 +1905,9 @@ static void gve_turndown(struct gve_priv *priv)
gve_clear_napi_enabled(priv);
gve_clear_report_stats(priv);
+
+ /* Make sure that all traffic is finished processing. */
+ synchronize_net();
}
static void gve_turnup(struct gve_priv *priv)
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index e7fb7d6d283d..4350ebd9c2bd 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -206,7 +206,10 @@ void gve_tx_stop_ring_gqi(struct gve_priv *priv, int idx)
return;
gve_remove_napi(priv, ntfy_idx);
- gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
+ if (tx->q_num < priv->tx_cfg.num_queues)
+ gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
+ else
+ gve_clean_xdp_done(priv, tx, priv->tx_desc_cnt);
netdev_tx_reset_queue(tx->netdev_txq);
gve_tx_remove_from_block(priv, idx);
}
@@ -834,9 +837,12 @@ int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
struct gve_tx_ring *tx;
int i, err = 0, qid;
- if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK) || !priv->xdp_prog)
return -EINVAL;
+ if (!gve_get_napi_enabled(priv))
+ return -ENETDOWN;
+
qid = gve_xdp_tx_queue_id(priv,
smp_processor_id() % priv->num_xdp_queues);
@@ -975,33 +981,41 @@ out:
return sent;
}
+int gve_xsk_tx_poll(struct gve_notify_block *rx_block, int budget)
+{
+ struct gve_rx_ring *rx = rx_block->rx;
+ struct gve_priv *priv = rx->gve;
+ struct gve_tx_ring *tx;
+ int sent = 0;
+
+ tx = &priv->tx[gve_xdp_tx_queue_id(priv, rx->q_num)];
+ if (tx->xsk_pool) {
+ sent = gve_xsk_tx(priv, tx, budget);
+
+ u64_stats_update_begin(&tx->statss);
+ tx->xdp_xsk_sent += sent;
+ u64_stats_update_end(&tx->statss);
+ if (xsk_uses_need_wakeup(tx->xsk_pool))
+ xsk_set_tx_need_wakeup(tx->xsk_pool);
+ }
+
+ return sent;
+}
+
bool gve_xdp_poll(struct gve_notify_block *block, int budget)
{
struct gve_priv *priv = block->priv;
struct gve_tx_ring *tx = block->tx;
u32 nic_done;
- bool repoll;
u32 to_do;
/* Find out how much work there is to be done */
nic_done = gve_tx_load_event_counter(priv, tx);
to_do = min_t(u32, (nic_done - tx->done), budget);
gve_clean_xdp_done(priv, tx, to_do);
- repoll = nic_done != tx->done;
-
- if (tx->xsk_pool) {
- int sent = gve_xsk_tx(priv, tx, budget);
-
- u64_stats_update_begin(&tx->statss);
- tx->xdp_xsk_sent += sent;
- u64_stats_update_end(&tx->statss);
- repoll |= (sent == budget);
- if (xsk_uses_need_wakeup(tx->xsk_pool))
- xsk_set_tx_need_wakeup(tx->xsk_pool);
- }
/* If we still have work we want to repoll */
- return repoll;
+ return nic_done != tx->done;
}
bool gve_tx_poll(struct gve_notify_block *block, int budget)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 890f213da8d1..ae1f523d6841 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -172,6 +172,7 @@ err_init_txq:
hinic_sq_dbgfs_uninit(nic_dev);
devm_kfree(&netdev->dev, nic_dev->txqs);
+ nic_dev->txqs = NULL;
return err;
}
@@ -268,6 +269,7 @@ err_init_rxq:
hinic_rq_dbgfs_uninit(nic_dev);
devm_kfree(&netdev->dev, nic_dev->rxqs);
+ nic_dev->rxqs = NULL;
return err;
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_dev.c
index 6c913a703df6..41e4bd49402a 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_dev.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_dev.c
@@ -101,6 +101,9 @@ static int idpf_intr_reg_init(struct idpf_vport *vport)
intr->dyn_ctl_itridx_s = PF_GLINT_DYN_CTL_ITR_INDX_S;
intr->dyn_ctl_intrvl_s = PF_GLINT_DYN_CTL_INTERVAL_S;
intr->dyn_ctl_wb_on_itr_m = PF_GLINT_DYN_CTL_WB_ON_ITR_M;
+ intr->dyn_ctl_swint_trig_m = PF_GLINT_DYN_CTL_SWINT_TRIG_M;
+ intr->dyn_ctl_sw_itridx_ena_m =
+ PF_GLINT_DYN_CTL_SW_ITR_INDX_ENA_M;
spacing = IDPF_ITR_IDX_SPACING(reg_vals[vec_id].itrn_index_spacing,
IDPF_PF_ITR_IDX_SPACING);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 34f4118c7bc0..2fa9c36e33c9 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -3604,21 +3604,31 @@ static void idpf_vport_intr_dis_irq_all(struct idpf_vport *vport)
/**
* idpf_vport_intr_buildreg_itr - Enable default interrupt generation settings
* @q_vector: pointer to q_vector
- * @type: itr index
- * @itr: itr value
*/
-static u32 idpf_vport_intr_buildreg_itr(struct idpf_q_vector *q_vector,
- const int type, u16 itr)
+static u32 idpf_vport_intr_buildreg_itr(struct idpf_q_vector *q_vector)
{
- u32 itr_val;
+ u32 itr_val = q_vector->intr_reg.dyn_ctl_intena_m;
+ int type = IDPF_NO_ITR_UPDATE_IDX;
+ u16 itr = 0;
+
+ if (q_vector->wb_on_itr) {
+ /*
+ * Trigger a software interrupt when exiting wb_on_itr, to make
+ * sure we catch any pending write backs that might have been
+ * missed due to interrupt state transition.
+ */
+ itr_val |= q_vector->intr_reg.dyn_ctl_swint_trig_m |
+ q_vector->intr_reg.dyn_ctl_sw_itridx_ena_m;
+ type = IDPF_SW_ITR_UPDATE_IDX;
+ itr = IDPF_ITR_20K;
+ }
itr &= IDPF_ITR_MASK;
/* Don't clear PBA because that can cause lost interrupts that
* came in while we were cleaning/polling
*/
- itr_val = q_vector->intr_reg.dyn_ctl_intena_m |
- (type << q_vector->intr_reg.dyn_ctl_itridx_s) |
- (itr << (q_vector->intr_reg.dyn_ctl_intrvl_s - 1));
+ itr_val |= (type << q_vector->intr_reg.dyn_ctl_itridx_s) |
+ (itr << (q_vector->intr_reg.dyn_ctl_intrvl_s - 1));
return itr_val;
}
@@ -3716,9 +3726,8 @@ void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector)
/* net_dim() updates ITR out-of-band using a work item */
idpf_net_dim(q_vector);
+ intval = idpf_vport_intr_buildreg_itr(q_vector);
q_vector->wb_on_itr = false;
- intval = idpf_vport_intr_buildreg_itr(q_vector,
- IDPF_NO_ITR_UPDATE_IDX, 0);
writel(intval, q_vector->intr_reg.dyn_ctl);
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index 9c1fe84108ed..0f71a6f5557b 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -354,6 +354,8 @@ struct idpf_vec_regs {
* @dyn_ctl_itridx_m: Mask for ITR index
* @dyn_ctl_intrvl_s: Register bit offset for ITR interval
* @dyn_ctl_wb_on_itr_m: Mask for WB on ITR feature
+ * @dyn_ctl_sw_itridx_ena_m: Mask for SW ITR index
+ * @dyn_ctl_swint_trig_m: Mask for dyn_ctl SW triggered interrupt enable
* @rx_itr: RX ITR register
* @tx_itr: TX ITR register
* @icr_ena: Interrupt cause register offset
@@ -367,6 +369,8 @@ struct idpf_intr_reg {
u32 dyn_ctl_itridx_m;
u32 dyn_ctl_intrvl_s;
u32 dyn_ctl_wb_on_itr_m;
+ u32 dyn_ctl_sw_itridx_ena_m;
+ u32 dyn_ctl_swint_trig_m;
void __iomem *rx_itr;
void __iomem *tx_itr;
void __iomem *icr_ena;
@@ -437,7 +441,7 @@ struct idpf_q_vector {
cpumask_var_t affinity_mask;
__cacheline_group_end_aligned(cold);
};
-libeth_cacheline_set_assert(struct idpf_q_vector, 112,
+libeth_cacheline_set_assert(struct idpf_q_vector, 120,
24 + sizeof(struct napi_struct) +
2 * sizeof(struct dim),
8 + sizeof(cpumask_var_t));
@@ -471,6 +475,8 @@ struct idpf_tx_queue_stats {
#define IDPF_ITR_IS_DYNAMIC(itr_mode) (itr_mode)
#define IDPF_ITR_TX_DEF IDPF_ITR_20K
#define IDPF_ITR_RX_DEF IDPF_ITR_20K
+/* Index used for 'SW ITR' update in DYN_CTL register */
+#define IDPF_SW_ITR_UPDATE_IDX 2
/* Index used for 'No ITR' update in DYN_CTL register */
#define IDPF_NO_ITR_UPDATE_IDX 3
#define IDPF_ITR_IDX_SPACING(spacing, dflt) (spacing ? spacing : dflt)
diff --git a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
index aad62e270ae4..aba828abcb17 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
@@ -101,6 +101,9 @@ static int idpf_vf_intr_reg_init(struct idpf_vport *vport)
intr->dyn_ctl_itridx_s = VF_INT_DYN_CTLN_ITR_INDX_S;
intr->dyn_ctl_intrvl_s = VF_INT_DYN_CTLN_INTERVAL_S;
intr->dyn_ctl_wb_on_itr_m = VF_INT_DYN_CTLN_WB_ON_ITR_M;
+ intr->dyn_ctl_swint_trig_m = VF_INT_DYN_CTLN_SWINT_TRIG_M;
+ intr->dyn_ctl_sw_itridx_ena_m =
+ VF_INT_DYN_CTLN_SW_ITR_INDX_ENA_M;
spacing = IDPF_ITR_IDX_SPACING(reg_vals[vec_id].itrn_index_spacing,
IDPF_VF_ITR_IDX_SPACING);
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index a06048719e84..67a6ff07c83d 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2704,9 +2704,15 @@ static struct platform_device *port_platdev[3];
static void mv643xx_eth_shared_of_remove(void)
{
+ struct mv643xx_eth_platform_data *pd;
int n;
for (n = 0; n < 3; n++) {
+ if (!port_platdev[n])
+ continue;
+ pd = dev_get_platdata(&port_platdev[n]->dev);
+ if (pd)
+ of_node_put(pd->phy_node);
platform_device_del(port_platdev[n]);
port_platdev[n] = NULL;
}
@@ -2769,8 +2775,10 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
}
ppdev = platform_device_alloc(MV643XX_ETH_NAME, dev_num);
- if (!ppdev)
- return -ENOMEM;
+ if (!ppdev) {
+ ret = -ENOMEM;
+ goto put_err;
+ }
ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
ppdev->dev.of_node = pnp;
@@ -2792,6 +2800,8 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
port_err:
platform_device_put(ppdev);
+put_err:
+ of_node_put(ppd.phy_node);
return ret;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c
index 232b10740c13..04e08e06f30f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c
@@ -680,14 +680,17 @@ int rvu_rep_create(struct otx2_nic *priv, struct netlink_ext_ack *extack)
ndev->features |= ndev->hw_features;
eth_hw_addr_random(ndev);
err = rvu_rep_devlink_port_register(rep);
- if (err)
+ if (err) {
+ free_netdev(ndev);
goto exit;
+ }
SET_NETDEV_DEVLINK_PORT(ndev, &rep->dl_port);
err = register_netdev(ndev);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"PFVF representor registration failed");
+ rvu_rep_devlink_port_unregister(rep);
free_netdev(ndev);
goto exit;
}
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 3914cd9210d4..988fa28cfb5f 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -130,6 +130,7 @@ static const struct pci_device_id sky2_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436C) }, /* 88E8072 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436D) }, /* 88E8055 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4373) }, /* 88E8075 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4381) }, /* 88E8059 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4382) }, /* 88E8079 */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
index cc9bcc420032..6ab02f3fc291 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
@@ -339,9 +339,13 @@ static int mlx5e_macsec_init_sa_fs(struct macsec_context *ctx,
{
struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
struct mlx5_macsec_fs *macsec_fs = priv->mdev->macsec_fs;
+ const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
struct mlx5_macsec_rule_attrs rule_attrs;
union mlx5_macsec_rule *macsec_rule;
+ if (is_tx && tx_sc->encoding_sa != sa->assoc_num)
+ return 0;
+
rule_attrs.macsec_obj_id = sa->macsec_obj_id;
rule_attrs.sci = sa->sci;
rule_attrs.assoc_num = sa->assoc_num;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index dd16d73000c3..0ec17c276bdd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -6542,8 +6542,23 @@ static void _mlx5e_remove(struct auxiliary_device *adev)
mlx5_core_uplink_netdev_set(mdev, NULL);
mlx5e_dcbnl_delete_app(priv);
- unregister_netdev(priv->netdev);
- _mlx5e_suspend(adev, false);
+ /* When unload driver, the netdev is in registered state
+ * if it's from legacy mode. If from switchdev mode, it
+ * is already unregistered before changing to NIC profile.
+ */
+ if (priv->netdev->reg_state == NETREG_REGISTERED) {
+ unregister_netdev(priv->netdev);
+ _mlx5e_suspend(adev, false);
+ } else {
+ struct mlx5_core_dev *pos;
+ int i;
+
+ if (test_bit(MLX5E_STATE_DESTROYING, &priv->state))
+ mlx5_sd_for_each_dev(i, mdev, pos)
+ mlx5e_destroy_mdev_resources(pos);
+ else
+ _mlx5e_suspend(adev, true);
+ }
/* Avoid cleanup if profile rollback failed. */
if (priv->profile)
priv->profile->cleanup(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 554f9cb5b53f..fdff9fd8a89e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -1509,6 +1509,21 @@ mlx5e_vport_uplink_rep_unload(struct mlx5e_rep_priv *rpriv)
priv = netdev_priv(netdev);
+ /* This bit is set when using devlink to change eswitch mode from
+ * switchdev to legacy. As need to keep uplink netdev ifindex, we
+ * detach uplink representor profile and attach NIC profile only.
+ * The netdev will be unregistered later when unload NIC auxiliary
+ * driver for this case.
+ * We explicitly block devlink eswitch mode change if any IPSec rules
+ * offloaded, but can't block other cases, such as driver unload
+ * and devlink reload. We have to unregister netdev before profile
+ * change for those cases. This is to avoid resource leak because
+ * the offloaded rules don't have the chance to be unoffloaded before
+ * cleanup which is triggered by detach uplink representor profile.
+ */
+ if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_SWITCH_LEGACY))
+ unregister_netdev(netdev);
+
mlx5e_netdev_attach_nic_profile(priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
index 5a0047bdcb51..ed977ae75fab 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
@@ -150,11 +150,11 @@ void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev)
unsigned long i;
int err;
- xa_for_each(&esw->offloads.vport_reps, i, rep) {
- rpriv = rep->rep_data[REP_ETH].priv;
- if (!rpriv || !rpriv->netdev)
+ mlx5_esw_for_each_rep(esw, i, rep) {
+ if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
continue;
+ rpriv = rep->rep_data[REP_ETH].priv;
rhashtable_walk_enter(&rpriv->tc_ht, &iter);
rhashtable_walk_start(&iter);
while ((flow = rhashtable_walk_next(&iter)) != NULL) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index a83d41121db6..8573d36785f4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -714,6 +714,9 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
MLX5_CAP_GEN_2((esw->dev), ec_vf_vport_base) +\
(last) - 1)
+#define mlx5_esw_for_each_rep(esw, i, rep) \
+ xa_for_each(&((esw)->offloads.vport_reps), i, rep)
+
struct mlx5_eswitch *__must_check
mlx5_devlink_eswitch_get(struct devlink *devlink);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index d5b42b3a19fd..06076dd9ec64 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -53,9 +53,6 @@
#include "lag/lag.h"
#include "en/tc/post_meter.h"
-#define mlx5_esw_for_each_rep(esw, i, rep) \
- xa_for_each(&((esw)->offloads.vport_reps), i, rep)
-
/* There are two match-all miss flows, one for unicast dst mac and
* one for multicast.
*/
@@ -3780,6 +3777,8 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
esw->eswitch_operation_in_progress = true;
up_write(&esw->mode_lock);
+ if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
+ esw->dev->priv.flags |= MLX5_PRIV_FLAGS_SWITCH_LEGACY;
mlx5_eswitch_disable_locked(esw);
if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) {
if (mlx5_devlink_trap_get_num_active(esw->dev)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c
index 6fa06ba2d346..f57c84e5128b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c
@@ -1067,7 +1067,6 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
int inlen, err, eqn;
void *cqc, *in;
__be64 *pas;
- int vector;
u32 i;
cq = kzalloc(sizeof(*cq), GFP_KERNEL);
@@ -1096,8 +1095,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
if (!in)
goto err_cqwq;
- vector = raw_smp_processor_id() % mlx5_comp_vectors_max(mdev);
- err = mlx5_comp_eqn_get(mdev, vector, &eqn);
+ err = mlx5_comp_eqn_get(mdev, 0, &eqn);
if (err) {
kvfree(in);
goto err_cqwq;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index 4b5fd71c897d..32d2e61f2b82 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -423,8 +423,7 @@ mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp,
- 0, 0, dev_net(to_dev), parms.link, tun->fwmark, 0,
- 0);
+ 0, 0, tun->net, parms.link, tun->fwmark, 0, 0);
rt = ip_route_output_key(tun->net, &fl4);
if (IS_ERR(rt))
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.c b/drivers/net/ethernet/meta/fbnic/fbnic_csr.c
index 2118901b25e9..aeb9f333f4c7 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_csr.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.c
@@ -64,7 +64,7 @@ static void fbnic_csr_get_regs_rpc_ram(struct fbnic_dev *fbd, u32 **data_p)
u32 i, j;
*(data++) = start;
- *(data++) = end - 1;
+ *(data++) = end;
/* FBNIC_RPC_TCAM_ACT */
for (i = 0; i < FBNIC_RPC_TCAM_ACT_NUM_ENTRIES; i++) {
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 3d72aa7b1305..ef93df520887 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -1432,7 +1432,7 @@ void ocelot_ifh_set_basic(void *ifh, struct ocelot *ocelot, int port,
memset(ifh, 0, OCELOT_TAG_LEN);
ocelot_ifh_set_bypass(ifh, 1);
- ocelot_ifh_set_src(ifh, BIT_ULL(ocelot->num_phys_ports));
+ ocelot_ifh_set_src(ifh, ocelot->num_phys_ports);
ocelot_ifh_set_dest(ifh, BIT_ULL(port));
ocelot_ifh_set_qos_class(ifh, qos_class);
ocelot_ifh_set_tag_type(ifh, tag_type);
diff --git a/drivers/net/ethernet/oa_tc6.c b/drivers/net/ethernet/oa_tc6.c
index f9c0dcd965c2..db200e4ec284 100644
--- a/drivers/net/ethernet/oa_tc6.c
+++ b/drivers/net/ethernet/oa_tc6.c
@@ -113,6 +113,7 @@ struct oa_tc6 {
struct mii_bus *mdiobus;
struct spi_device *spi;
struct mutex spi_ctrl_lock; /* Protects spi control transfer */
+ spinlock_t tx_skb_lock; /* Protects tx skb handling */
void *spi_ctrl_tx_buf;
void *spi_ctrl_rx_buf;
void *spi_data_tx_buf;
@@ -1004,8 +1005,10 @@ static u16 oa_tc6_prepare_spi_tx_buf_for_tx_skbs(struct oa_tc6 *tc6)
for (used_tx_credits = 0; used_tx_credits < tc6->tx_credits;
used_tx_credits++) {
if (!tc6->ongoing_tx_skb) {
+ spin_lock_bh(&tc6->tx_skb_lock);
tc6->ongoing_tx_skb = tc6->waiting_tx_skb;
tc6->waiting_tx_skb = NULL;
+ spin_unlock_bh(&tc6->tx_skb_lock);
}
if (!tc6->ongoing_tx_skb)
break;
@@ -1111,8 +1114,9 @@ static int oa_tc6_spi_thread_handler(void *data)
/* This kthread will be waken up if there is a tx skb or mac-phy
* interrupt to perform spi transfer with tx chunks.
*/
- wait_event_interruptible(tc6->spi_wq, tc6->waiting_tx_skb ||
- tc6->int_flag ||
+ wait_event_interruptible(tc6->spi_wq, tc6->int_flag ||
+ (tc6->waiting_tx_skb &&
+ tc6->tx_credits) ||
kthread_should_stop());
if (kthread_should_stop())
@@ -1209,7 +1213,9 @@ netdev_tx_t oa_tc6_start_xmit(struct oa_tc6 *tc6, struct sk_buff *skb)
return NETDEV_TX_OK;
}
+ spin_lock_bh(&tc6->tx_skb_lock);
tc6->waiting_tx_skb = skb;
+ spin_unlock_bh(&tc6->tx_skb_lock);
/* Wake spi kthread to perform spi transfer */
wake_up_interruptible(&tc6->spi_wq);
@@ -1239,6 +1245,7 @@ struct oa_tc6 *oa_tc6_init(struct spi_device *spi, struct net_device *netdev)
tc6->netdev = netdev;
SET_NETDEV_DEV(netdev, &spi->dev);
mutex_init(&tc6->spi_ctrl_lock);
+ spin_lock_init(&tc6->tx_skb_lock);
/* Set the SPI controller to pump at realtime priority */
tc6->spi->rt = true;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index 9e42d599840d..57edcde9e6f8 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -277,7 +277,10 @@ void ionic_dev_teardown(struct ionic *ionic)
idev->phy_cmb_pages = 0;
idev->cmb_npages = 0;
- destroy_workqueue(ionic->wq);
+ if (ionic->wq) {
+ destroy_workqueue(ionic->wq);
+ ionic->wq = NULL;
+ }
mutex_destroy(&idev->cmb_inuse_lock);
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index dda22fa4448c..9b7f78b6cdb1 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -961,8 +961,8 @@ static int ionic_get_module_eeprom(struct net_device *netdev,
len = min_t(u32, sizeof(xcvr->sprom), ee->len);
do {
- memcpy(data, xcvr->sprom, len);
- memcpy(tbuf, xcvr->sprom, len);
+ memcpy(data, &xcvr->sprom[ee->offset], len);
+ memcpy(tbuf, &xcvr->sprom[ee->offset], len);
/* Let's make sure we got a consistent copy */
if (!memcmp(data, tbuf, len))
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 40496587b2b3..3d3f936779f7 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -3869,8 +3869,8 @@ int ionic_lif_register(struct ionic_lif *lif)
/* only register LIF0 for now */
err = register_netdev(lif->netdev);
if (err) {
- dev_err(lif->ionic->dev, "Cannot register net device, aborting\n");
- ionic_lif_unregister_phc(lif);
+ dev_err(lif->ionic->dev, "Cannot register net device: %d, aborting\n", err);
+ ionic_lif_unregister(lif);
return err;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index b45efc272fdb..c7f497c36f66 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -3358,6 +3358,7 @@ int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn)
p_ptt, &nvm_info.num_images);
if (rc == -EOPNOTSUPP) {
DP_INFO(p_hwfn, "DRV_MSG_CODE_BIST_TEST is not supported\n");
+ nvm_info.num_images = 0;
goto out;
} else if (rc || !nvm_info.num_images) {
DP_ERR(p_hwfn, "Failed getting number of images\n");
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
index dbbbf024e7ab..9ac6e2aad18f 100644
--- a/drivers/net/ethernet/renesas/rswitch.c
+++ b/drivers/net/ethernet/renesas/rswitch.c
@@ -547,7 +547,6 @@ static int rswitch_gwca_ts_queue_alloc(struct rswitch_private *priv)
desc = &gq->ts_ring[gq->ring_size];
desc->desc.die_dt = DT_LINKFIX;
rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
- INIT_LIST_HEAD(&priv->gwca.ts_info_list);
return 0;
}
@@ -1003,9 +1002,10 @@ static int rswitch_gwca_request_irqs(struct rswitch_private *priv)
static void rswitch_ts(struct rswitch_private *priv)
{
struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
- struct rswitch_gwca_ts_info *ts_info, *ts_info2;
struct skb_shared_hwtstamps shhwtstamps;
struct rswitch_ts_desc *desc;
+ struct rswitch_device *rdev;
+ struct sk_buff *ts_skb;
struct timespec64 ts;
unsigned int num;
u32 tag, port;
@@ -1015,23 +1015,28 @@ static void rswitch_ts(struct rswitch_private *priv)
dma_rmb();
port = TS_DESC_DPN(__le32_to_cpu(desc->desc.dptrl));
- tag = TS_DESC_TSUN(__le32_to_cpu(desc->desc.dptrl));
-
- list_for_each_entry_safe(ts_info, ts_info2, &priv->gwca.ts_info_list, list) {
- if (!(ts_info->port == port && ts_info->tag == tag))
- continue;
-
- memset(&shhwtstamps, 0, sizeof(shhwtstamps));
- ts.tv_sec = __le32_to_cpu(desc->ts_sec);
- ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
- shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
- skb_tstamp_tx(ts_info->skb, &shhwtstamps);
- dev_consume_skb_irq(ts_info->skb);
- list_del(&ts_info->list);
- kfree(ts_info);
- break;
- }
+ if (unlikely(port >= RSWITCH_NUM_PORTS))
+ goto next;
+ rdev = priv->rdev[port];
+ tag = TS_DESC_TSUN(__le32_to_cpu(desc->desc.dptrl));
+ if (unlikely(tag >= TS_TAGS_PER_PORT))
+ goto next;
+ ts_skb = xchg(&rdev->ts_skb[tag], NULL);
+ smp_mb(); /* order rdev->ts_skb[] read before bitmap update */
+ clear_bit(tag, rdev->ts_skb_used);
+
+ if (unlikely(!ts_skb))
+ goto next;
+
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ ts.tv_sec = __le32_to_cpu(desc->ts_sec);
+ ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
+ shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
+ skb_tstamp_tx(ts_skb, &shhwtstamps);
+ dev_consume_skb_irq(ts_skb);
+
+next:
gq->cur = rswitch_next_queue_index(gq, true, 1);
desc = &gq->ts_ring[gq->cur];
}
@@ -1576,8 +1581,9 @@ static int rswitch_open(struct net_device *ndev)
static int rswitch_stop(struct net_device *ndev)
{
struct rswitch_device *rdev = netdev_priv(ndev);
- struct rswitch_gwca_ts_info *ts_info, *ts_info2;
+ struct sk_buff *ts_skb;
unsigned long flags;
+ unsigned int tag;
netif_tx_stop_all_queues(ndev);
@@ -1594,12 +1600,13 @@ static int rswitch_stop(struct net_device *ndev)
if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID);
- list_for_each_entry_safe(ts_info, ts_info2, &rdev->priv->gwca.ts_info_list, list) {
- if (ts_info->port != rdev->port)
- continue;
- dev_kfree_skb_irq(ts_info->skb);
- list_del(&ts_info->list);
- kfree(ts_info);
+ for (tag = find_first_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT);
+ tag < TS_TAGS_PER_PORT;
+ tag = find_next_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT, tag + 1)) {
+ ts_skb = xchg(&rdev->ts_skb[tag], NULL);
+ clear_bit(tag, rdev->ts_skb_used);
+ if (ts_skb)
+ dev_kfree_skb(ts_skb);
}
return 0;
@@ -1612,20 +1619,17 @@ static bool rswitch_ext_desc_set_info1(struct rswitch_device *rdev,
desc->info1 = cpu_to_le64(INFO1_DV(BIT(rdev->etha->index)) |
INFO1_IPV(GWCA_IPV_NUM) | INFO1_FMT);
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
- struct rswitch_gwca_ts_info *ts_info;
+ unsigned int tag;
- ts_info = kzalloc(sizeof(*ts_info), GFP_ATOMIC);
- if (!ts_info)
+ tag = find_first_zero_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT);
+ if (tag == TS_TAGS_PER_PORT)
return false;
+ smp_mb(); /* order bitmap read before rdev->ts_skb[] write */
+ rdev->ts_skb[tag] = skb_get(skb);
+ set_bit(tag, rdev->ts_skb_used);
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- rdev->ts_tag++;
- desc->info1 |= cpu_to_le64(INFO1_TSUN(rdev->ts_tag) | INFO1_TXC);
-
- ts_info->skb = skb_get(skb);
- ts_info->port = rdev->port;
- ts_info->tag = rdev->ts_tag;
- list_add_tail(&ts_info->list, &rdev->priv->gwca.ts_info_list);
+ desc->info1 |= cpu_to_le64(INFO1_TSUN(tag) | INFO1_TXC);
skb_tx_timestamp(skb);
}
diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h
index e020800dcc57..d8d4ed7d7f8b 100644
--- a/drivers/net/ethernet/renesas/rswitch.h
+++ b/drivers/net/ethernet/renesas/rswitch.h
@@ -972,14 +972,6 @@ struct rswitch_gwca_queue {
};
};
-struct rswitch_gwca_ts_info {
- struct sk_buff *skb;
- struct list_head list;
-
- int port;
- u8 tag;
-};
-
#define RSWITCH_NUM_IRQ_REGS (RSWITCH_MAX_NUM_QUEUES / BITS_PER_TYPE(u32))
struct rswitch_gwca {
unsigned int index;
@@ -989,7 +981,6 @@ struct rswitch_gwca {
struct rswitch_gwca_queue *queues;
int num_queues;
struct rswitch_gwca_queue ts_queue;
- struct list_head ts_info_list;
DECLARE_BITMAP(used, RSWITCH_MAX_NUM_QUEUES);
u32 tx_irq_bits[RSWITCH_NUM_IRQ_REGS];
u32 rx_irq_bits[RSWITCH_NUM_IRQ_REGS];
@@ -997,6 +988,7 @@ struct rswitch_gwca {
};
#define NUM_QUEUES_PER_NDEV 2
+#define TS_TAGS_PER_PORT 256
struct rswitch_device {
struct rswitch_private *priv;
struct net_device *ndev;
@@ -1004,7 +996,8 @@ struct rswitch_device {
void __iomem *addr;
struct rswitch_gwca_queue *tx_queue;
struct rswitch_gwca_queue *rx_queue;
- u8 ts_tag;
+ struct sk_buff *ts_skb[TS_TAGS_PER_PORT];
+ DECLARE_BITMAP(ts_skb_used, TS_TAGS_PER_PORT);
bool disabled;
int port;
diff --git a/drivers/net/ethernet/sfc/tc_conntrack.c b/drivers/net/ethernet/sfc/tc_conntrack.c
index d90206f27161..c0603f54cec3 100644
--- a/drivers/net/ethernet/sfc/tc_conntrack.c
+++ b/drivers/net/ethernet/sfc/tc_conntrack.c
@@ -16,7 +16,7 @@ static int efx_tc_flow_block(enum tc_setup_type type, void *type_data,
void *cb_priv);
static const struct rhashtable_params efx_tc_ct_zone_ht_params = {
- .key_len = offsetof(struct efx_tc_ct_zone, linkage),
+ .key_len = sizeof_field(struct efx_tc_ct_zone, zone),
.key_offset = 0,
.head_offset = offsetof(struct efx_tc_ct_zone, linkage),
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 3ac32444e492..dc9884130b91 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -406,22 +406,6 @@ static int stmmac_of_get_mac_mode(struct device_node *np)
}
/**
- * stmmac_remove_config_dt - undo the effects of stmmac_probe_config_dt()
- * @pdev: platform_device structure
- * @plat: driver data platform structure
- *
- * Release resources claimed by stmmac_probe_config_dt().
- */
-static void stmmac_remove_config_dt(struct platform_device *pdev,
- struct plat_stmmacenet_data *plat)
-{
- clk_disable_unprepare(plat->stmmac_clk);
- clk_disable_unprepare(plat->pclk);
- of_node_put(plat->phy_node);
- of_node_put(plat->mdio_node);
-}
-
-/**
* stmmac_probe_config_dt - parse device-tree driver parameters
* @pdev: platform_device structure
* @mac: MAC address to use
@@ -490,8 +474,10 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
rc = stmmac_mdio_setup(plat, np, &pdev->dev);
- if (rc)
- return ERR_PTR(rc);
+ if (rc) {
+ ret = ERR_PTR(rc);
+ goto error_put_phy;
+ }
of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size);
@@ -581,8 +567,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
GFP_KERNEL);
if (!dma_cfg) {
- stmmac_remove_config_dt(pdev, plat);
- return ERR_PTR(-ENOMEM);
+ ret = ERR_PTR(-ENOMEM);
+ goto error_put_mdio;
}
plat->dma_cfg = dma_cfg;
@@ -610,8 +596,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
rc = stmmac_mtl_setup(pdev, plat);
if (rc) {
- stmmac_remove_config_dt(pdev, plat);
- return ERR_PTR(rc);
+ ret = ERR_PTR(rc);
+ goto error_put_mdio;
}
/* clock setup */
@@ -663,6 +649,10 @@ error_hw_init:
clk_disable_unprepare(plat->pclk);
error_pclk_get:
clk_disable_unprepare(plat->stmmac_clk);
+error_put_mdio:
+ of_node_put(plat->mdio_node);
+error_put_phy:
+ of_node_put(plat->phy_node);
return ret;
}
@@ -671,16 +661,17 @@ static void devm_stmmac_remove_config_dt(void *data)
{
struct plat_stmmacenet_data *plat = data;
- /* Platform data argument is unused */
- stmmac_remove_config_dt(NULL, plat);
+ clk_disable_unprepare(plat->stmmac_clk);
+ clk_disable_unprepare(plat->pclk);
+ of_node_put(plat->mdio_node);
+ of_node_put(plat->phy_node);
}
/**
* devm_stmmac_probe_config_dt
* @pdev: platform_device structure
* @mac: MAC address to use
- * Description: Devres variant of stmmac_probe_config_dt(). Does not require
- * the user to call stmmac_remove_config_dt() at driver detach.
+ * Description: Devres variant of stmmac_probe_config_dt().
*/
struct plat_stmmacenet_data *
devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 14e1df721f2e..5465bf872734 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -3551,7 +3551,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
init_completion(&common->tdown_complete);
common->tx_ch_num = AM65_CPSW_DEFAULT_TX_CHNS;
common->rx_ch_num_flows = AM65_CPSW_DEFAULT_RX_CHN_FLOWS;
- common->pf_p0_rx_ptype_rrobin = false;
+ common->pf_p0_rx_ptype_rrobin = true;
common->default_vlan = 1;
common->ports = devm_kcalloc(dev, common->port_num,
diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c
index 5d6d1cf78e93..768578c0d958 100644
--- a/drivers/net/ethernet/ti/icssg/icss_iep.c
+++ b/drivers/net/ethernet/ti/icssg/icss_iep.c
@@ -215,6 +215,9 @@ static void icss_iep_enable_shadow_mode(struct icss_iep *iep)
for (cmp = IEP_MIN_CMP; cmp < IEP_MAX_CMP; cmp++) {
regmap_update_bits(iep->map, ICSS_IEP_CMP_STAT_REG,
IEP_CMP_STATUS(cmp), IEP_CMP_STATUS(cmp));
+
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_CMP_EN(cmp), 0);
}
/* enable reset counter on CMP0 event */
@@ -780,6 +783,11 @@ int icss_iep_exit(struct icss_iep *iep)
}
icss_iep_disable(iep);
+ if (iep->pps_enabled)
+ icss_iep_pps_enable(iep, false);
+ else if (iep->perout_enabled)
+ icss_iep_perout_enable(iep, NULL, false);
+
return 0;
}
EXPORT_SYMBOL_GPL(icss_iep_exit);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c
index fdebeb2f84e0..74f0f200a89d 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_common.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_common.c
@@ -855,31 +855,6 @@ irqreturn_t prueth_rx_irq(int irq, void *dev_id)
}
EXPORT_SYMBOL_GPL(prueth_rx_irq);
-void prueth_emac_stop(struct prueth_emac *emac)
-{
- struct prueth *prueth = emac->prueth;
- int slice;
-
- switch (emac->port_id) {
- case PRUETH_PORT_MII0:
- slice = ICSS_SLICE0;
- break;
- case PRUETH_PORT_MII1:
- slice = ICSS_SLICE1;
- break;
- default:
- netdev_err(emac->ndev, "invalid port\n");
- return;
- }
-
- emac->fw_running = 0;
- if (!emac->is_sr1)
- rproc_shutdown(prueth->txpru[slice]);
- rproc_shutdown(prueth->rtu[slice]);
- rproc_shutdown(prueth->pru[slice]);
-}
-EXPORT_SYMBOL_GPL(prueth_emac_stop);
-
void prueth_cleanup_tx_ts(struct prueth_emac *emac)
{
int i;
diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.c b/drivers/net/ethernet/ti/icssg/icssg_config.c
index 5d2491c2943a..ddfd1c02a885 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_config.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_config.c
@@ -397,7 +397,7 @@ static int prueth_emac_buffer_setup(struct prueth_emac *emac)
return 0;
}
-static void icssg_init_emac_mode(struct prueth *prueth)
+void icssg_init_emac_mode(struct prueth *prueth)
{
/* When the device is configured as a bridge and it is being brought
* back to the emac mode, the host mac address has to be set as 0.
@@ -406,9 +406,6 @@ static void icssg_init_emac_mode(struct prueth *prueth)
int i;
u8 mac[ETH_ALEN] = { 0 };
- if (prueth->emacs_initialized)
- return;
-
/* Set VLAN TABLE address base */
regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK,
addr << SMEM_VLAN_OFFSET);
@@ -423,15 +420,13 @@ static void icssg_init_emac_mode(struct prueth *prueth)
/* Clear host MAC address */
icssg_class_set_host_mac_addr(prueth->miig_rt, mac);
}
+EXPORT_SYMBOL_GPL(icssg_init_emac_mode);
-static void icssg_init_fw_offload_mode(struct prueth *prueth)
+void icssg_init_fw_offload_mode(struct prueth *prueth)
{
u32 addr = prueth->shram.pa + EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET;
int i;
- if (prueth->emacs_initialized)
- return;
-
/* Set VLAN TABLE address base */
regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK,
addr << SMEM_VLAN_OFFSET);
@@ -448,6 +443,7 @@ static void icssg_init_fw_offload_mode(struct prueth *prueth)
icssg_class_set_host_mac_addr(prueth->miig_rt, prueth->hw_bridge_dev->dev_addr);
icssg_set_pvid(prueth, prueth->default_vlan, PRUETH_PORT_HOST);
}
+EXPORT_SYMBOL_GPL(icssg_init_fw_offload_mode);
int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice)
{
@@ -455,11 +451,6 @@ int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice)
struct icssg_flow_cfg __iomem *flow_cfg;
int ret;
- if (prueth->is_switch_mode || prueth->is_hsr_offload_mode)
- icssg_init_fw_offload_mode(prueth);
- else
- icssg_init_emac_mode(prueth);
-
memset_io(config, 0, TAS_GATE_MASK_LIST0);
icssg_miig_queues_init(prueth, slice);
@@ -786,3 +777,27 @@ void icssg_set_pvid(struct prueth *prueth, u8 vid, u8 port)
writel(pvid, prueth->shram.va + EMAC_ICSSG_SWITCH_PORT0_DEFAULT_VLAN_OFFSET);
}
EXPORT_SYMBOL_GPL(icssg_set_pvid);
+
+int emac_fdb_flow_id_updated(struct prueth_emac *emac)
+{
+ struct mgmt_cmd_rsp fdb_cmd_rsp = { 0 };
+ int slice = prueth_emac_slice(emac);
+ struct mgmt_cmd fdb_cmd = { 0 };
+ int ret;
+
+ fdb_cmd.header = ICSSG_FW_MGMT_CMD_HEADER;
+ fdb_cmd.type = ICSSG_FW_MGMT_FDB_CMD_TYPE_RX_FLOW;
+ fdb_cmd.seqnum = ++(emac->prueth->icssg_hwcmdseq);
+ fdb_cmd.param = 0;
+
+ fdb_cmd.param |= (slice << 4);
+ fdb_cmd.cmd_args[0] = 0;
+
+ ret = icssg_send_fdb_msg(emac, &fdb_cmd, &fdb_cmd_rsp);
+ if (ret)
+ return ret;
+
+ WARN_ON(fdb_cmd.seqnum != fdb_cmd_rsp.seqnum);
+ return fdb_cmd_rsp.status == 1 ? 0 : -EINVAL;
+}
+EXPORT_SYMBOL_GPL(emac_fdb_flow_id_updated);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.h b/drivers/net/ethernet/ti/icssg/icssg_config.h
index 92c2deaa3068..c884e9fa099e 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_config.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_config.h
@@ -55,6 +55,7 @@ struct icssg_rxq_ctx {
#define ICSSG_FW_MGMT_FDB_CMD_TYPE 0x03
#define ICSSG_FW_MGMT_CMD_TYPE 0x04
#define ICSSG_FW_MGMT_PKT 0x80000000
+#define ICSSG_FW_MGMT_FDB_CMD_TYPE_RX_FLOW 0x05
struct icssg_r30_cmd {
u32 cmd[4];
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index c568c84a032b..d76fe6d05e10 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -164,11 +164,26 @@ static struct icssg_firmwares icssg_emac_firmwares[] = {
}
};
-static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac)
+static int prueth_start(struct rproc *rproc, const char *fw_name)
+{
+ int ret;
+
+ ret = rproc_set_firmware(rproc, fw_name);
+ if (ret)
+ return ret;
+ return rproc_boot(rproc);
+}
+
+static void prueth_shutdown(struct rproc *rproc)
+{
+ rproc_shutdown(rproc);
+}
+
+static int prueth_emac_start(struct prueth *prueth)
{
struct icssg_firmwares *firmwares;
struct device *dev = prueth->dev;
- int slice, ret;
+ int ret, slice;
if (prueth->is_switch_mode)
firmwares = icssg_switch_firmwares;
@@ -177,49 +192,126 @@ static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac)
else
firmwares = icssg_emac_firmwares;
- slice = prueth_emac_slice(emac);
- if (slice < 0) {
- netdev_err(emac->ndev, "invalid port\n");
- return -EINVAL;
+ for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
+ ret = prueth_start(prueth->pru[slice], firmwares[slice].pru);
+ if (ret) {
+ dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret);
+ goto unwind_slices;
+ }
+
+ ret = prueth_start(prueth->rtu[slice], firmwares[slice].rtu);
+ if (ret) {
+ dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret);
+ rproc_shutdown(prueth->pru[slice]);
+ goto unwind_slices;
+ }
+
+ ret = prueth_start(prueth->txpru[slice], firmwares[slice].txpru);
+ if (ret) {
+ dev_err(dev, "failed to boot TX_PRU%d: %d\n", slice, ret);
+ rproc_shutdown(prueth->rtu[slice]);
+ rproc_shutdown(prueth->pru[slice]);
+ goto unwind_slices;
+ }
}
- ret = icssg_config(prueth, emac, slice);
- if (ret)
- return ret;
+ return 0;
- ret = rproc_set_firmware(prueth->pru[slice], firmwares[slice].pru);
- ret = rproc_boot(prueth->pru[slice]);
- if (ret) {
- dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret);
- return -EINVAL;
+unwind_slices:
+ while (--slice >= 0) {
+ prueth_shutdown(prueth->txpru[slice]);
+ prueth_shutdown(prueth->rtu[slice]);
+ prueth_shutdown(prueth->pru[slice]);
}
- ret = rproc_set_firmware(prueth->rtu[slice], firmwares[slice].rtu);
- ret = rproc_boot(prueth->rtu[slice]);
- if (ret) {
- dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret);
- goto halt_pru;
+ return ret;
+}
+
+static void prueth_emac_stop(struct prueth *prueth)
+{
+ int slice;
+
+ for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
+ prueth_shutdown(prueth->txpru[slice]);
+ prueth_shutdown(prueth->rtu[slice]);
+ prueth_shutdown(prueth->pru[slice]);
}
+}
+
+static int prueth_emac_common_start(struct prueth *prueth)
+{
+ struct prueth_emac *emac;
+ int ret = 0;
+ int slice;
+
+ if (!prueth->emac[ICSS_SLICE0] && !prueth->emac[ICSS_SLICE1])
+ return -EINVAL;
+
+ /* clear SMEM and MSMC settings for all slices */
+ memset_io(prueth->msmcram.va, 0, prueth->msmcram.size);
+ memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS);
+
+ icssg_class_default(prueth->miig_rt, ICSS_SLICE0, 0, false);
+ icssg_class_default(prueth->miig_rt, ICSS_SLICE1, 0, false);
+
+ if (prueth->is_switch_mode || prueth->is_hsr_offload_mode)
+ icssg_init_fw_offload_mode(prueth);
+ else
+ icssg_init_emac_mode(prueth);
+
+ for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
+ emac = prueth->emac[slice];
+ if (!emac)
+ continue;
+ ret = icssg_config(prueth, emac, slice);
+ if (ret)
+ goto disable_class;
+ }
+
+ ret = prueth_emac_start(prueth);
+ if (ret)
+ goto disable_class;
- ret = rproc_set_firmware(prueth->txpru[slice], firmwares[slice].txpru);
- ret = rproc_boot(prueth->txpru[slice]);
+ emac = prueth->emac[ICSS_SLICE0] ? prueth->emac[ICSS_SLICE0] :
+ prueth->emac[ICSS_SLICE1];
+ ret = icss_iep_init(emac->iep, &prueth_iep_clockops,
+ emac, IEP_DEFAULT_CYCLE_TIME_NS);
if (ret) {
- dev_err(dev, "failed to boot TX_PRU%d: %d\n", slice, ret);
- goto halt_rtu;
+ dev_err(prueth->dev, "Failed to initialize IEP module\n");
+ goto stop_pruss;
}
- emac->fw_running = 1;
return 0;
-halt_rtu:
- rproc_shutdown(prueth->rtu[slice]);
+stop_pruss:
+ prueth_emac_stop(prueth);
-halt_pru:
- rproc_shutdown(prueth->pru[slice]);
+disable_class:
+ icssg_class_disable(prueth->miig_rt, ICSS_SLICE0);
+ icssg_class_disable(prueth->miig_rt, ICSS_SLICE1);
return ret;
}
+static int prueth_emac_common_stop(struct prueth *prueth)
+{
+ struct prueth_emac *emac;
+
+ if (!prueth->emac[ICSS_SLICE0] && !prueth->emac[ICSS_SLICE1])
+ return -EINVAL;
+
+ icssg_class_disable(prueth->miig_rt, ICSS_SLICE0);
+ icssg_class_disable(prueth->miig_rt, ICSS_SLICE1);
+
+ prueth_emac_stop(prueth);
+
+ emac = prueth->emac[ICSS_SLICE0] ? prueth->emac[ICSS_SLICE0] :
+ prueth->emac[ICSS_SLICE1];
+ icss_iep_exit(emac->iep);
+
+ return 0;
+}
+
/* called back by PHY layer if there is change in link state of hw port*/
static void emac_adjust_link(struct net_device *ndev)
{
@@ -374,9 +466,6 @@ static void prueth_iep_settime(void *clockops_data, u64 ns)
u32 cycletime;
int timeout;
- if (!emac->fw_running)
- return;
-
sc_descp = emac->prueth->shram.va + TIMESYNC_FW_WC_SETCLOCK_DESC_OFFSET;
cycletime = IEP_DEFAULT_CYCLE_TIME_NS;
@@ -543,23 +632,17 @@ static int emac_ndo_open(struct net_device *ndev)
{
struct prueth_emac *emac = netdev_priv(ndev);
int ret, i, num_data_chn = emac->tx_ch_num;
+ struct icssg_flow_cfg __iomem *flow_cfg;
struct prueth *prueth = emac->prueth;
int slice = prueth_emac_slice(emac);
struct device *dev = prueth->dev;
int max_rx_flows;
int rx_flow;
- /* clear SMEM and MSMC settings for all slices */
- if (!prueth->emacs_initialized) {
- memset_io(prueth->msmcram.va, 0, prueth->msmcram.size);
- memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS);
- }
-
/* set h/w MAC as user might have re-configured */
ether_addr_copy(emac->mac_addr, ndev->dev_addr);
icssg_class_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
- icssg_class_default(prueth->miig_rt, slice, 0, false);
icssg_ft1_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
/* Notify the stack of the actual queue counts. */
@@ -597,18 +680,23 @@ static int emac_ndo_open(struct net_device *ndev)
goto cleanup_napi;
}
- /* reset and start PRU firmware */
- ret = prueth_emac_start(prueth, emac);
- if (ret)
- goto free_rx_irq;
+ if (!prueth->emacs_initialized) {
+ ret = prueth_emac_common_start(prueth);
+ if (ret)
+ goto free_rx_irq;
+ }
- icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu);
+ flow_cfg = emac->dram.va + ICSSG_CONFIG_OFFSET + PSI_L_REGULAR_FLOW_ID_BASE_OFFSET;
+ writew(emac->rx_flow_id_base, &flow_cfg->rx_base_flow);
+ ret = emac_fdb_flow_id_updated(emac);
- if (!prueth->emacs_initialized) {
- ret = icss_iep_init(emac->iep, &prueth_iep_clockops,
- emac, IEP_DEFAULT_CYCLE_TIME_NS);
+ if (ret) {
+ netdev_err(ndev, "Failed to update Rx Flow ID %d", ret);
+ goto stop;
}
+ icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu);
+
ret = request_threaded_irq(emac->tx_ts_irq, NULL, prueth_tx_ts_irq,
IRQF_ONESHOT, dev_name(dev), emac);
if (ret)
@@ -653,7 +741,8 @@ reset_rx_chn:
free_tx_ts_irq:
free_irq(emac->tx_ts_irq, emac);
stop:
- prueth_emac_stop(emac);
+ if (!prueth->emacs_initialized)
+ prueth_emac_common_stop(prueth);
free_rx_irq:
free_irq(emac->rx_chns.irq[rx_flow], emac);
cleanup_napi:
@@ -689,8 +778,6 @@ static int emac_ndo_stop(struct net_device *ndev)
if (ndev->phydev)
phy_stop(ndev->phydev);
- icssg_class_disable(prueth->miig_rt, prueth_emac_slice(emac));
-
if (emac->prueth->is_hsr_offload_mode)
__dev_mc_unsync(ndev, icssg_prueth_hsr_del_mcast);
else
@@ -728,11 +815,9 @@ static int emac_ndo_stop(struct net_device *ndev)
/* Destroying the queued work in ndo_stop() */
cancel_delayed_work_sync(&emac->stats_work);
- if (prueth->emacs_initialized == 1)
- icss_iep_exit(emac->iep);
-
/* stop PRUs */
- prueth_emac_stop(emac);
+ if (prueth->emacs_initialized == 1)
+ prueth_emac_common_stop(prueth);
free_irq(emac->tx_ts_irq, emac);
@@ -1053,10 +1138,11 @@ static void prueth_offload_fwd_mark_update(struct prueth *prueth)
}
}
-static void prueth_emac_restart(struct prueth *prueth)
+static int prueth_emac_restart(struct prueth *prueth)
{
struct prueth_emac *emac0 = prueth->emac[PRUETH_MAC0];
struct prueth_emac *emac1 = prueth->emac[PRUETH_MAC1];
+ int ret;
/* Detach the net_device for both PRUeth ports*/
if (netif_running(emac0->ndev))
@@ -1065,36 +1151,46 @@ static void prueth_emac_restart(struct prueth *prueth)
netif_device_detach(emac1->ndev);
/* Disable both PRUeth ports */
- icssg_set_port_state(emac0, ICSSG_EMAC_PORT_DISABLE);
- icssg_set_port_state(emac1, ICSSG_EMAC_PORT_DISABLE);
+ ret = icssg_set_port_state(emac0, ICSSG_EMAC_PORT_DISABLE);
+ ret |= icssg_set_port_state(emac1, ICSSG_EMAC_PORT_DISABLE);
+ if (ret)
+ return ret;
/* Stop both pru cores for both PRUeth ports*/
- prueth_emac_stop(emac0);
- prueth->emacs_initialized--;
- prueth_emac_stop(emac1);
- prueth->emacs_initialized--;
+ ret = prueth_emac_common_stop(prueth);
+ if (ret) {
+ dev_err(prueth->dev, "Failed to stop the firmwares");
+ return ret;
+ }
/* Start both pru cores for both PRUeth ports */
- prueth_emac_start(prueth, emac0);
- prueth->emacs_initialized++;
- prueth_emac_start(prueth, emac1);
- prueth->emacs_initialized++;
+ ret = prueth_emac_common_start(prueth);
+ if (ret) {
+ dev_err(prueth->dev, "Failed to start the firmwares");
+ return ret;
+ }
/* Enable forwarding for both PRUeth ports */
- icssg_set_port_state(emac0, ICSSG_EMAC_PORT_FORWARD);
- icssg_set_port_state(emac1, ICSSG_EMAC_PORT_FORWARD);
+ ret = icssg_set_port_state(emac0, ICSSG_EMAC_PORT_FORWARD);
+ ret |= icssg_set_port_state(emac1, ICSSG_EMAC_PORT_FORWARD);
/* Attache net_device for both PRUeth ports */
netif_device_attach(emac0->ndev);
netif_device_attach(emac1->ndev);
+
+ return ret;
}
static void icssg_change_mode(struct prueth *prueth)
{
struct prueth_emac *emac;
- int mac;
+ int mac, ret;
- prueth_emac_restart(prueth);
+ ret = prueth_emac_restart(prueth);
+ if (ret) {
+ dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
+ return;
+ }
for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) {
emac = prueth->emac[mac];
@@ -1173,13 +1269,18 @@ static void prueth_netdevice_port_unlink(struct net_device *ndev)
{
struct prueth_emac *emac = netdev_priv(ndev);
struct prueth *prueth = emac->prueth;
+ int ret;
prueth->br_members &= ~BIT(emac->port_id);
if (prueth->is_switch_mode) {
prueth->is_switch_mode = false;
emac->port_vlan = 0;
- prueth_emac_restart(prueth);
+ ret = prueth_emac_restart(prueth);
+ if (ret) {
+ dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
+ return;
+ }
}
prueth_offload_fwd_mark_update(prueth);
@@ -1228,6 +1329,7 @@ static void prueth_hsr_port_unlink(struct net_device *ndev)
struct prueth *prueth = emac->prueth;
struct prueth_emac *emac0;
struct prueth_emac *emac1;
+ int ret;
emac0 = prueth->emac[PRUETH_MAC0];
emac1 = prueth->emac[PRUETH_MAC1];
@@ -1238,7 +1340,11 @@ static void prueth_hsr_port_unlink(struct net_device *ndev)
emac0->port_vlan = 0;
emac1->port_vlan = 0;
prueth->hsr_dev = NULL;
- prueth_emac_restart(prueth);
+ ret = prueth_emac_restart(prueth);
+ if (ret) {
+ dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
+ return;
+ }
netdev_dbg(ndev, "Disabling HSR Offload mode\n");
}
}
@@ -1413,13 +1519,10 @@ static int prueth_probe(struct platform_device *pdev)
prueth->pa_stats = NULL;
}
- if (eth0_node) {
+ if (eth0_node || eth1_node) {
ret = prueth_get_cores(prueth, ICSS_SLICE0, false);
if (ret)
goto put_cores;
- }
-
- if (eth1_node) {
ret = prueth_get_cores(prueth, ICSS_SLICE1, false);
if (ret)
goto put_cores;
@@ -1618,14 +1721,12 @@ put_pruss:
pruss_put(prueth->pruss);
put_cores:
- if (eth1_node) {
- prueth_put_cores(prueth, ICSS_SLICE1);
- of_node_put(eth1_node);
- }
-
- if (eth0_node) {
+ if (eth0_node || eth1_node) {
prueth_put_cores(prueth, ICSS_SLICE0);
of_node_put(eth0_node);
+
+ prueth_put_cores(prueth, ICSS_SLICE1);
+ of_node_put(eth1_node);
}
return ret;
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
index f5c1d473e9f9..5473315ea204 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
@@ -140,7 +140,6 @@ struct prueth_rx_chn {
/* data for each emac port */
struct prueth_emac {
bool is_sr1;
- bool fw_running;
struct prueth *prueth;
struct net_device *ndev;
u8 mac_addr[6];
@@ -361,6 +360,8 @@ int icssg_set_port_state(struct prueth_emac *emac,
enum icssg_port_state_cmd state);
void icssg_config_set_speed(struct prueth_emac *emac);
void icssg_config_half_duplex(struct prueth_emac *emac);
+void icssg_init_emac_mode(struct prueth *prueth);
+void icssg_init_fw_offload_mode(struct prueth *prueth);
/* Buffer queue helpers */
int icssg_queue_pop(struct prueth *prueth, u8 queue);
@@ -377,6 +378,7 @@ void icssg_vtbl_modify(struct prueth_emac *emac, u8 vid, u8 port_mask,
u8 untag_mask, bool add);
u16 icssg_get_pvid(struct prueth_emac *emac);
void icssg_set_pvid(struct prueth *prueth, u8 vid, u8 port);
+int emac_fdb_flow_id_updated(struct prueth_emac *emac);
#define prueth_napi_to_tx_chn(pnapi) \
container_of(pnapi, struct prueth_tx_chn, napi_tx)
@@ -407,7 +409,6 @@ void emac_rx_timestamp(struct prueth_emac *emac,
struct sk_buff *skb, u32 *psdata);
enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev);
irqreturn_t prueth_rx_irq(int irq, void *dev_id);
-void prueth_emac_stop(struct prueth_emac *emac);
void prueth_cleanup_tx_ts(struct prueth_emac *emac);
int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget);
int prueth_prepare_rx_chan(struct prueth_emac *emac,
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
index 5024f0647a0d..3dc86397c367 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
@@ -440,7 +440,6 @@ static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac)
goto halt_pru;
}
- emac->fw_running = 1;
return 0;
halt_pru:
@@ -449,6 +448,29 @@ halt_pru:
return ret;
}
+static void prueth_emac_stop(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+ int slice;
+
+ switch (emac->port_id) {
+ case PRUETH_PORT_MII0:
+ slice = ICSS_SLICE0;
+ break;
+ case PRUETH_PORT_MII1:
+ slice = ICSS_SLICE1;
+ break;
+ default:
+ netdev_err(emac->ndev, "invalid port\n");
+ return;
+ }
+
+ if (!emac->is_sr1)
+ rproc_shutdown(prueth->txpru[slice]);
+ rproc_shutdown(prueth->rtu[slice]);
+ rproc_shutdown(prueth->pru[slice]);
+}
+
/**
* emac_ndo_open - EMAC device open
* @ndev: network adapter device
diff --git a/drivers/net/mdio/fwnode_mdio.c b/drivers/net/mdio/fwnode_mdio.c
index b156493d7084..aea0f0357568 100644
--- a/drivers/net/mdio/fwnode_mdio.c
+++ b/drivers/net/mdio/fwnode_mdio.c
@@ -40,6 +40,7 @@ fwnode_find_pse_control(struct fwnode_handle *fwnode)
static struct mii_timestamper *
fwnode_find_mii_timestamper(struct fwnode_handle *fwnode)
{
+ struct mii_timestamper *mii_ts;
struct of_phandle_args arg;
int err;
@@ -53,10 +54,16 @@ fwnode_find_mii_timestamper(struct fwnode_handle *fwnode)
else if (err)
return ERR_PTR(err);
- if (arg.args_count != 1)
- return ERR_PTR(-EINVAL);
+ if (arg.args_count != 1) {
+ mii_ts = ERR_PTR(-EINVAL);
+ goto put_node;
+ }
+
+ mii_ts = register_mii_timestamper(arg.np, arg.args[0]);
- return register_mii_timestamper(arg.np, arg.args[0]);
+put_node:
+ of_node_put(arg.np);
+ return mii_ts;
}
int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio,
diff --git a/drivers/net/netdevsim/health.c b/drivers/net/netdevsim/health.c
index 70e8bdf34be9..688f05316b5e 100644
--- a/drivers/net/netdevsim/health.c
+++ b/drivers/net/netdevsim/health.c
@@ -149,6 +149,8 @@ static ssize_t nsim_dev_health_break_write(struct file *file,
char *break_msg;
int err;
+ if (count == 0 || count > PAGE_SIZE)
+ return -EINVAL;
break_msg = memdup_user_nul(data, count);
if (IS_ERR(break_msg))
return PTR_ERR(break_msg);
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 0be47fed4efc..e068a9761c09 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -635,10 +635,10 @@ nsim_pp_hold_write(struct file *file, const char __user *data,
page_pool_put_full_page(ns->page->pp, ns->page, false);
ns->page = NULL;
}
- rtnl_unlock();
exit:
- return count;
+ rtnl_unlock();
+ return ret;
}
static const struct file_operations nsim_pp_hold_fops = {
diff --git a/drivers/net/phy/aquantia/aquantia_leds.c b/drivers/net/phy/aquantia/aquantia_leds.c
index 00ad2313fed3..951f46104eff 100644
--- a/drivers/net/phy/aquantia/aquantia_leds.c
+++ b/drivers/net/phy/aquantia/aquantia_leds.c
@@ -156,5 +156,5 @@ int aqr_phy_led_polarity_set(struct phy_device *phydev, int index, unsigned long
if (force_active_high || force_active_low)
return aqr_phy_led_active_low_set(phydev, index, force_active_low);
- unreachable();
+ return -EINVAL;
}
diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c
index b672c55a7a4e..e6ed2413e514 100644
--- a/drivers/net/phy/intel-xway.c
+++ b/drivers/net/phy/intel-xway.c
@@ -529,7 +529,7 @@ static int xway_gphy_led_polarity_set(struct phy_device *phydev, int index,
if (force_active_high)
return phy_clear_bits(phydev, XWAY_MDIO_LED, XWAY_GPHY_LED_INV(index));
- unreachable();
+ return -EINVAL;
}
static struct phy_driver xway_gphy[] = {
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 3ef508840674..eeb33eb181ac 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -432,10 +432,12 @@ struct kszphy_ptp_priv {
struct kszphy_priv {
struct kszphy_ptp_priv ptp_priv;
const struct kszphy_type *type;
+ struct clk *clk;
int led_mode;
u16 vct_ctrl1000;
bool rmii_ref_clk_sel;
bool rmii_ref_clk_sel_val;
+ bool clk_enable;
u64 stats[ARRAY_SIZE(kszphy_hw_stats)];
};
@@ -2050,6 +2052,46 @@ static void kszphy_get_stats(struct phy_device *phydev,
data[i] = kszphy_get_stat(phydev, i);
}
+static void kszphy_enable_clk(struct phy_device *phydev)
+{
+ struct kszphy_priv *priv = phydev->priv;
+
+ if (!priv->clk_enable && priv->clk) {
+ clk_prepare_enable(priv->clk);
+ priv->clk_enable = true;
+ }
+}
+
+static void kszphy_disable_clk(struct phy_device *phydev)
+{
+ struct kszphy_priv *priv = phydev->priv;
+
+ if (priv->clk_enable && priv->clk) {
+ clk_disable_unprepare(priv->clk);
+ priv->clk_enable = false;
+ }
+}
+
+static int kszphy_generic_resume(struct phy_device *phydev)
+{
+ kszphy_enable_clk(phydev);
+
+ return genphy_resume(phydev);
+}
+
+static int kszphy_generic_suspend(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_suspend(phydev);
+ if (ret)
+ return ret;
+
+ kszphy_disable_clk(phydev);
+
+ return 0;
+}
+
static int kszphy_suspend(struct phy_device *phydev)
{
/* Disable PHY Interrupts */
@@ -2059,7 +2101,7 @@ static int kszphy_suspend(struct phy_device *phydev)
phydev->drv->config_intr(phydev);
}
- return genphy_suspend(phydev);
+ return kszphy_generic_suspend(phydev);
}
static void kszphy_parse_led_mode(struct phy_device *phydev)
@@ -2090,7 +2132,9 @@ static int kszphy_resume(struct phy_device *phydev)
{
int ret;
- genphy_resume(phydev);
+ ret = kszphy_generic_resume(phydev);
+ if (ret)
+ return ret;
/* After switching from power-down to normal mode, an internal global
* reset is automatically generated. Wait a minimum of 1 ms before
@@ -2112,6 +2156,24 @@ static int kszphy_resume(struct phy_device *phydev)
return 0;
}
+/* Because of errata DS80000700A, receiver error following software
+ * power down. Suspend and resume callbacks only disable and enable
+ * external rmii reference clock.
+ */
+static int ksz8041_resume(struct phy_device *phydev)
+{
+ kszphy_enable_clk(phydev);
+
+ return 0;
+}
+
+static int ksz8041_suspend(struct phy_device *phydev)
+{
+ kszphy_disable_clk(phydev);
+
+ return 0;
+}
+
static int ksz9477_resume(struct phy_device *phydev)
{
int ret;
@@ -2159,7 +2221,10 @@ static int ksz8061_resume(struct phy_device *phydev)
if (!(ret & BMCR_PDOWN))
return 0;
- genphy_resume(phydev);
+ ret = kszphy_generic_resume(phydev);
+ if (ret)
+ return ret;
+
usleep_range(1000, 2000);
/* Re-program the value after chip is reset. */
@@ -2177,6 +2242,11 @@ static int ksz8061_resume(struct phy_device *phydev)
return 0;
}
+static int ksz8061_suspend(struct phy_device *phydev)
+{
+ return kszphy_suspend(phydev);
+}
+
static int kszphy_probe(struct phy_device *phydev)
{
const struct kszphy_type *type = phydev->drv->driver_data;
@@ -2217,10 +2287,14 @@ static int kszphy_probe(struct phy_device *phydev)
} else if (!clk) {
/* unnamed clock from the generic ethernet-phy binding */
clk = devm_clk_get_optional_enabled(&phydev->mdio.dev, NULL);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
}
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ clk_disable_unprepare(clk);
+ priv->clk = clk;
+
if (ksz8041_fiber_mode(phydev))
phydev->port = PORT_FIBRE;
@@ -5290,6 +5364,21 @@ static int lan8841_probe(struct phy_device *phydev)
return 0;
}
+static int lan8804_resume(struct phy_device *phydev)
+{
+ return kszphy_resume(phydev);
+}
+
+static int lan8804_suspend(struct phy_device *phydev)
+{
+ return kszphy_generic_suspend(phydev);
+}
+
+static int lan8841_resume(struct phy_device *phydev)
+{
+ return kszphy_generic_resume(phydev);
+}
+
static int lan8841_suspend(struct phy_device *phydev)
{
struct kszphy_priv *priv = phydev->priv;
@@ -5298,7 +5387,7 @@ static int lan8841_suspend(struct phy_device *phydev)
if (ptp_priv->ptp_clock)
ptp_cancel_worker_sync(ptp_priv->ptp_clock);
- return genphy_suspend(phydev);
+ return kszphy_generic_suspend(phydev);
}
static struct phy_driver ksphy_driver[] = {
@@ -5358,9 +5447,8 @@ static struct phy_driver ksphy_driver[] = {
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
- /* No suspend/resume callbacks because of errata DS80000700A,
- * receiver error following software power down.
- */
+ .suspend = ksz8041_suspend,
+ .resume = ksz8041_resume,
}, {
.phy_id = PHY_ID_KSZ8041RNLI,
.phy_id_mask = MICREL_PHY_ID_MASK,
@@ -5436,7 +5524,7 @@ static struct phy_driver ksphy_driver[] = {
.soft_reset = genphy_soft_reset,
.config_intr = kszphy_config_intr,
.handle_interrupt = kszphy_handle_interrupt,
- .suspend = kszphy_suspend,
+ .suspend = ksz8061_suspend,
.resume = ksz8061_resume,
}, {
.phy_id = PHY_ID_KSZ9021,
@@ -5507,8 +5595,8 @@ static struct phy_driver ksphy_driver[] = {
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
- .suspend = genphy_suspend,
- .resume = kszphy_resume,
+ .suspend = lan8804_suspend,
+ .resume = lan8804_resume,
.config_intr = lan8804_config_intr,
.handle_interrupt = lan8804_handle_interrupt,
}, {
@@ -5526,7 +5614,7 @@ static struct phy_driver ksphy_driver[] = {
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
.suspend = lan8841_suspend,
- .resume = genphy_resume,
+ .resume = lan8841_resume,
.cable_test_start = lan8814_cable_test_start,
.cable_test_get_status = ksz886x_cable_test_get_status,
}, {
diff --git a/drivers/net/phy/mxl-gpy.c b/drivers/net/phy/mxl-gpy.c
index db3c1f72b407..a8ccf257c109 100644
--- a/drivers/net/phy/mxl-gpy.c
+++ b/drivers/net/phy/mxl-gpy.c
@@ -1014,7 +1014,7 @@ static int gpy_led_polarity_set(struct phy_device *phydev, int index,
if (force_active_high)
return phy_clear_bits(phydev, PHY_LED, PHY_LED_POLARITY(index));
- unreachable();
+ return -EINVAL;
}
static struct phy_driver gpy_drivers[] = {
diff --git a/drivers/net/pse-pd/tps23881.c b/drivers/net/pse-pd/tps23881.c
index 5c4e88be46ee..8797ca1a8a21 100644
--- a/drivers/net/pse-pd/tps23881.c
+++ b/drivers/net/pse-pd/tps23881.c
@@ -64,15 +64,11 @@ static int tps23881_pi_enable(struct pse_controller_dev *pcdev, int id)
if (id >= TPS23881_MAX_CHANS)
return -ERANGE;
- ret = i2c_smbus_read_word_data(client, TPS23881_REG_PW_STATUS);
- if (ret < 0)
- return ret;
-
chan = priv->port[id].chan[0];
if (chan < 4)
- val = (u16)(ret | BIT(chan));
+ val = BIT(chan);
else
- val = (u16)(ret | BIT(chan + 4));
+ val = BIT(chan + 4);
if (priv->port[id].is_4p) {
chan = priv->port[id].chan[1];
@@ -100,15 +96,11 @@ static int tps23881_pi_disable(struct pse_controller_dev *pcdev, int id)
if (id >= TPS23881_MAX_CHANS)
return -ERANGE;
- ret = i2c_smbus_read_word_data(client, TPS23881_REG_PW_STATUS);
- if (ret < 0)
- return ret;
-
chan = priv->port[id].chan[0];
if (chan < 4)
- val = (u16)(ret | BIT(chan + 4));
+ val = BIT(chan + 4);
else
- val = (u16)(ret | BIT(chan + 8));
+ val = BIT(chan + 8);
if (priv->port[id].is_4p) {
chan = priv->port[id].chan[1];
diff --git a/drivers/net/team/team_core.c b/drivers/net/team/team_core.c
index 69ea2c3c76bf..c7690adec8db 100644
--- a/drivers/net/team/team_core.c
+++ b/drivers/net/team/team_core.c
@@ -998,9 +998,13 @@ static void __team_compute_features(struct team *team)
unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
IFF_XMIT_DST_RELEASE_PERM;
+ rcu_read_lock();
+ if (list_empty(&team->port_list))
+ goto done;
+
vlan_features = netdev_base_features(vlan_features);
+ enc_features = netdev_base_features(enc_features);
- rcu_read_lock();
list_for_each_entry_rcu(port, &team->port_list, list) {
vlan_features = netdev_increment_features(vlan_features,
port->dev->vlan_features,
@@ -1010,11 +1014,11 @@ static void __team_compute_features(struct team *team)
port->dev->hw_enc_features,
TEAM_ENC_FEATURES);
-
dst_release_flag &= port->dev->priv_flags;
if (port->dev->hard_header_len > max_hard_header_len)
max_hard_header_len = port->dev->hard_header_len;
}
+done:
rcu_read_unlock();
team->dev->vlan_features = vlan_features;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index d7a865ef370b..e816aaba8e5f 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1481,7 +1481,7 @@ static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
skb->truesize += skb->data_len;
for (i = 1; i < it->nr_segs; i++) {
- const struct iovec *iov = iter_iov(it);
+ const struct iovec *iov = iter_iov(it) + i;
size_t fragsz = iov->iov_len;
struct page *page;
void *frag;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 9fe7f704a2f7..e9208a8d2bfa 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1429,6 +1429,7 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0195, 4)}, /* Quectel EG95 */
{QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x030e, 4)}, /* Quectel EM05GV2 */
+ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0316, 3)}, /* Quectel RG255C */
{QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */
{QMI_QUIRK_SET_DTR(0x2cb7, 0x0112, 0)}, /* Fibocom FG132 */
{QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
index cd1fe8490ae5..1c43f283ac4a 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
@@ -161,6 +161,7 @@ const struct iwl_cfg_trans_params iwl_gl_trans_cfg = {
const char iwl_bz_name[] = "Intel(R) TBD Bz device";
const char iwl_fm_name[] = "Intel(R) Wi-Fi 7 BE201 320MHz";
+const char iwl_wh_name[] = "Intel(R) Wi-Fi 7 BE211 320MHz";
const char iwl_gl_name[] = "Intel(R) Wi-Fi 7 BE200 320MHz";
const char iwl_mtp_name[] = "Intel(R) Wi-Fi 7 BE202 160MHz";
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 34c91deca57b..17721bb47e25 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -545,6 +545,7 @@ extern const char iwl_ax231_name[];
extern const char iwl_ax411_name[];
extern const char iwl_bz_name[];
extern const char iwl_fm_name[];
+extern const char iwl_wh_name[];
extern const char iwl_gl_name[];
extern const char iwl_mtp_name[];
extern const char iwl_sc_name[];
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index f85c01e04ebf..7d973546c9fb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -2954,6 +2954,7 @@ static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm,
int idx)
{
int i;
+ int n_channels = 0;
if (fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) {
@@ -2962,7 +2963,7 @@ static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm,
for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; i++)
if (matches[idx].matching_channels[i / 8] & (BIT(i % 8)))
- match->channels[match->n_channels++] =
+ match->channels[n_channels++] =
mvm->nd_channels[i]->center_freq;
} else {
struct iwl_scan_offload_profile_match_v1 *matches =
@@ -2970,9 +2971,11 @@ static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm,
for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1 * 8; i++)
if (matches[idx].matching_channels[i / 8] & (BIT(i % 8)))
- match->channels[match->n_channels++] =
+ match->channels[n_channels++] =
mvm->nd_channels[i]->center_freq;
}
+ /* We may have ended up with fewer channels than we allocated. */
+ match->n_channels = n_channels;
}
/**
@@ -3053,6 +3056,8 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
GFP_KERNEL);
if (!net_detect || !n_matches)
goto out_report_nd;
+ net_detect->n_matches = n_matches;
+ n_matches = 0;
for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) {
struct cfg80211_wowlan_nd_match *match;
@@ -3066,8 +3071,9 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
GFP_KERNEL);
if (!match)
goto out_report_nd;
+ match->n_channels = n_channels;
- net_detect->matches[net_detect->n_matches++] = match;
+ net_detect->matches[n_matches++] = match;
/* We inverted the order of the SSIDs in the scan
* request, so invert the index here.
@@ -3082,6 +3088,8 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
iwl_mvm_query_set_freqs(mvm, d3_data->nd_results, match, i);
}
+ /* We may have fewer matches than we allocated. */
+ net_detect->n_matches = n_matches;
out_report_nd:
wakeup.net_detect = net_detect;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 805fb249a0c6..8fb2aa282242 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -1106,19 +1106,54 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
iwlax210_2ax_cfg_so_jf_b0, iwl9462_name),
/* Bz */
-/* FIXME: need to change the naming according to the actual CRF */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_bz, iwl_ax201_name),
+
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_bz, iwl_ax211_name),
+
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_bz, iwl_fm_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_bz, iwl_wh_name),
+
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_bz, iwl_ax201_name),
+
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_bz, iwl_ax211_name),
+
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_bz, iwl_fm_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_bz, iwl_wh_name),
+
/* Ga (Gl) */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
diff --git a/drivers/net/wireless/st/cw1200/cw1200_spi.c b/drivers/net/wireless/st/cw1200/cw1200_spi.c
index 862964a8cc87..52386dfb5f4a 100644
--- a/drivers/net/wireless/st/cw1200/cw1200_spi.c
+++ b/drivers/net/wireless/st/cw1200/cw1200_spi.c
@@ -442,8 +442,8 @@ static void cw1200_spi_disconnect(struct spi_device *func)
cw1200_core_release(self->core);
self->core = NULL;
}
+ cw1200_spi_off(self, dev_get_platdata(&func->dev));
}
- cw1200_spi_off(self, dev_get_platdata(&func->dev));
}
static int __maybe_unused cw1200_spi_suspend(struct device *dev)
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mmio.c b/drivers/net/wwan/iosm/iosm_ipc_mmio.c
index 63eb08c43c05..6764c13530b9 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_mmio.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_mmio.c
@@ -104,7 +104,7 @@ struct iosm_mmio *ipc_mmio_init(void __iomem *mmio, struct device *dev)
break;
msleep(20);
- } while (retries-- > 0);
+ } while (--retries > 0);
if (!retries) {
dev_err(ipc_mmio->dev, "invalid exec stage %X", stage);
diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.c b/drivers/net/wwan/t7xx/t7xx_state_monitor.c
index 3931c7a13f5a..cbdbb91e8381 100644
--- a/drivers/net/wwan/t7xx/t7xx_state_monitor.c
+++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.c
@@ -104,14 +104,21 @@ void t7xx_fsm_broadcast_state(struct t7xx_fsm_ctl *ctl, enum md_state state)
fsm_state_notify(ctl->md, state);
}
+static void fsm_release_command(struct kref *ref)
+{
+ struct t7xx_fsm_command *cmd = container_of(ref, typeof(*cmd), refcnt);
+
+ kfree(cmd);
+}
+
static void fsm_finish_command(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd, int result)
{
if (cmd->flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
- *cmd->ret = result;
- complete_all(cmd->done);
+ cmd->result = result;
+ complete_all(&cmd->done);
}
- kfree(cmd);
+ kref_put(&cmd->refcnt, fsm_release_command);
}
static void fsm_del_kf_event(struct t7xx_fsm_event *event)
@@ -475,7 +482,6 @@ static int fsm_main_thread(void *data)
int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id, unsigned int flag)
{
- DECLARE_COMPLETION_ONSTACK(done);
struct t7xx_fsm_command *cmd;
unsigned long flags;
int ret;
@@ -487,11 +493,13 @@ int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id
INIT_LIST_HEAD(&cmd->entry);
cmd->cmd_id = cmd_id;
cmd->flag = flag;
+ kref_init(&cmd->refcnt);
if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
- cmd->done = &done;
- cmd->ret = &ret;
+ init_completion(&cmd->done);
+ kref_get(&cmd->refcnt);
}
+ kref_get(&cmd->refcnt);
spin_lock_irqsave(&ctl->command_lock, flags);
list_add_tail(&cmd->entry, &ctl->command_queue);
spin_unlock_irqrestore(&ctl->command_lock, flags);
@@ -501,11 +509,11 @@ int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id
if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
unsigned long wait_ret;
- wait_ret = wait_for_completion_timeout(&done,
+ wait_ret = wait_for_completion_timeout(&cmd->done,
msecs_to_jiffies(FSM_CMD_TIMEOUT_MS));
- if (!wait_ret)
- return -ETIMEDOUT;
+ ret = wait_ret ? cmd->result : -ETIMEDOUT;
+ kref_put(&cmd->refcnt, fsm_release_command);
return ret;
}
diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.h b/drivers/net/wwan/t7xx/t7xx_state_monitor.h
index 7b0a9baf488c..6e0601bb752e 100644
--- a/drivers/net/wwan/t7xx/t7xx_state_monitor.h
+++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.h
@@ -110,8 +110,9 @@ struct t7xx_fsm_command {
struct list_head entry;
enum t7xx_fsm_cmd_state cmd_id;
unsigned int flag;
- struct completion *done;
- int *ret;
+ struct completion done;
+ int result;
+ struct kref refcnt;
};
struct t7xx_fsm_notifier {
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 4265c1cd0ff7..63fe51d0e64d 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -867,7 +867,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
static int xennet_close(struct net_device *dev)
{
struct netfront_info *np = netdev_priv(dev);
- unsigned int num_queues = dev->real_num_tx_queues;
+ unsigned int num_queues = np->queues ? dev->real_num_tx_queues : 0;
unsigned int i;
struct netfront_queue *queue;
netif_tx_stop_all_queues(np->netdev);
@@ -882,6 +882,9 @@ static void xennet_destroy_queues(struct netfront_info *info)
{
unsigned int i;
+ if (!info->queues)
+ return;
+
for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
struct netfront_queue *queue = &info->queues[i];
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index d169a30eb935..a970168a3014 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2034,7 +2034,7 @@ static bool nvme_update_disk_info(struct nvme_ns *ns, struct nvme_id_ns *id,
* or smaller than a sector size yet, so catch this early and don't
* allow block I/O.
*/
- if (head->lba_shift > PAGE_SHIFT || head->lba_shift < SECTOR_SHIFT) {
+ if (blk_validate_block_size(bs)) {
bs = (1 << 9);
valid = false;
}
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 611b02c8a8b3..c4bb8dfe1a45 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -173,6 +173,11 @@ enum nvme_quirks {
* MSI (but not MSI-X) interrupts are broken and never fire.
*/
NVME_QUIRK_BROKEN_MSI = (1 << 21),
+
+ /*
+ * Align dma pool segment size to 512 bytes
+ */
+ NVME_QUIRK_DMAPOOL_ALIGN_512 = (1 << 22),
};
/*
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 1a5ba80f1811..e2634f437f33 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2834,15 +2834,20 @@ static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown)
static int nvme_setup_prp_pools(struct nvme_dev *dev)
{
+ size_t small_align = 256;
+
dev->prp_page_pool = dma_pool_create("prp list page", dev->dev,
NVME_CTRL_PAGE_SIZE,
NVME_CTRL_PAGE_SIZE, 0);
if (!dev->prp_page_pool)
return -ENOMEM;
+ if (dev->ctrl.quirks & NVME_QUIRK_DMAPOOL_ALIGN_512)
+ small_align = 512;
+
/* Optimisation for I/Os between 4k and 128k */
dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev,
- 256, 256, 0);
+ 256, small_align, 0);
if (!dev->prp_small_pool) {
dma_pool_destroy(dev->prp_page_pool);
return -ENOMEM;
@@ -3607,7 +3612,7 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1217, 0x8760), /* O2 Micro 64GB Steam Deck */
- .driver_data = NVME_QUIRK_QDEPTH_ONE },
+ .driver_data = NVME_QUIRK_DMAPOOL_ALIGN_512, },
{ PCI_DEVICE(0x126f, 0x2262), /* Silicon Motion generic */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
NVME_QUIRK_BOGUS_NID, },
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 28c76a3e1bd2..b127d41dbbfe 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -2024,14 +2024,6 @@ static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
return __nvme_tcp_alloc_io_queues(ctrl);
}
-static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
-{
- nvme_tcp_stop_io_queues(ctrl);
- if (remove)
- nvme_remove_io_tag_set(ctrl);
- nvme_tcp_free_io_queues(ctrl);
-}
-
static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
{
int ret, nr_queues;
@@ -2176,9 +2168,11 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
nvme_sync_io_queues(ctrl);
nvme_tcp_stop_io_queues(ctrl);
nvme_cancel_tagset(ctrl);
- if (remove)
+ if (remove) {
nvme_unquiesce_io_queues(ctrl);
- nvme_tcp_destroy_io_queues(ctrl, remove);
+ nvme_remove_io_tag_set(ctrl);
+ }
+ nvme_tcp_free_io_queues(ctrl);
}
static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl,
@@ -2267,7 +2261,9 @@ destroy_io:
nvme_sync_io_queues(ctrl);
nvme_tcp_stop_io_queues(ctrl);
nvme_cancel_tagset(ctrl);
- nvme_tcp_destroy_io_queues(ctrl, new);
+ if (new)
+ nvme_remove_io_tag_set(ctrl);
+ nvme_tcp_free_io_queues(ctrl);
}
destroy_admin:
nvme_stop_keep_alive(ctrl);
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 2962794ce881..fa89b0549c36 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -139,7 +139,7 @@ static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
unsigned long idx;
ctrl = req->sq->ctrl;
- xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
+ nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
/* we don't have the right data for file backed ns */
if (!ns->bdev)
continue;
@@ -331,9 +331,10 @@ static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
u32 count = 0;
if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
- xa_for_each(&ctrl->subsys->namespaces, idx, ns)
+ nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
if (ns->anagrpid == grpid)
desc->nsids[count++] = cpu_to_le32(ns->nsid);
+ }
}
desc->grpid = cpu_to_le32(grpid);
@@ -772,7 +773,7 @@ static void nvmet_execute_identify_endgrp_list(struct nvmet_req *req)
goto out;
}
- xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
+ nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
if (ns->nsid <= min_endgid)
continue;
@@ -815,7 +816,7 @@ static void nvmet_execute_identify_nslist(struct nvmet_req *req, bool match_css)
goto out;
}
- xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
+ nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
if (ns->nsid <= min_nsid)
continue;
if (match_css && req->ns->csi != req->cmd->identify.csi)
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index eeee9e9b854c..2b030f0efc38 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -810,18 +810,6 @@ static struct configfs_attribute *nvmet_ns_attrs[] = {
NULL,
};
-bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid)
-{
- struct config_item *ns_item;
- char name[12];
-
- snprintf(name, sizeof(name), "%u", nsid);
- mutex_lock(&subsys->namespaces_group.cg_subsys->su_mutex);
- ns_item = config_group_find_item(&subsys->namespaces_group, name);
- mutex_unlock(&subsys->namespaces_group.cg_subsys->su_mutex);
- return ns_item != NULL;
-}
-
static void nvmet_ns_release(struct config_item *item)
{
struct nvmet_ns *ns = to_nvmet_ns(item);
@@ -2254,12 +2242,17 @@ static ssize_t nvmet_root_discovery_nqn_store(struct config_item *item,
const char *page, size_t count)
{
struct list_head *entry;
+ char *old_nqn, *new_nqn;
size_t len;
len = strcspn(page, "\n");
if (!len || len > NVMF_NQN_FIELD_LEN - 1)
return -EINVAL;
+ new_nqn = kstrndup(page, len, GFP_KERNEL);
+ if (!new_nqn)
+ return -ENOMEM;
+
down_write(&nvmet_config_sem);
list_for_each(entry, &nvmet_subsystems_group.cg_children) {
struct config_item *item =
@@ -2268,13 +2261,15 @@ static ssize_t nvmet_root_discovery_nqn_store(struct config_item *item,
if (!strncmp(config_item_name(item), page, len)) {
pr_err("duplicate NQN %s\n", config_item_name(item));
up_write(&nvmet_config_sem);
+ kfree(new_nqn);
return -EINVAL;
}
}
- memset(nvmet_disc_subsys->subsysnqn, 0, NVMF_NQN_FIELD_LEN);
- memcpy(nvmet_disc_subsys->subsysnqn, page, len);
+ old_nqn = nvmet_disc_subsys->subsysnqn;
+ nvmet_disc_subsys->subsysnqn = new_nqn;
up_write(&nvmet_config_sem);
+ kfree(old_nqn);
return len;
}
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 1f4e9989663b..fde6c555af61 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -127,7 +127,7 @@ static u32 nvmet_max_nsid(struct nvmet_subsys *subsys)
unsigned long idx;
u32 nsid = 0;
- xa_for_each(&subsys->namespaces, idx, cur)
+ nvmet_for_each_enabled_ns(&subsys->namespaces, idx, cur)
nsid = cur->nsid;
return nsid;
@@ -441,11 +441,14 @@ u16 nvmet_req_find_ns(struct nvmet_req *req)
struct nvmet_subsys *subsys = nvmet_req_subsys(req);
req->ns = xa_load(&subsys->namespaces, nsid);
- if (unlikely(!req->ns)) {
+ if (unlikely(!req->ns || !req->ns->enabled)) {
req->error_loc = offsetof(struct nvme_common_command, nsid);
- if (nvmet_subsys_nsid_exists(subsys, nsid))
- return NVME_SC_INTERNAL_PATH_ERROR;
- return NVME_SC_INVALID_NS | NVME_STATUS_DNR;
+ if (!req->ns) /* ns doesn't exist! */
+ return NVME_SC_INVALID_NS | NVME_STATUS_DNR;
+
+ /* ns exists but it's disabled */
+ req->ns = NULL;
+ return NVME_SC_INTERNAL_PATH_ERROR;
}
percpu_ref_get(&req->ns->ref);
@@ -583,8 +586,6 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
goto out_unlock;
ret = -EMFILE;
- if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
- goto out_unlock;
ret = nvmet_bdev_ns_enable(ns);
if (ret == -ENOTBLK)
@@ -599,38 +600,19 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
nvmet_p2pmem_ns_add_p2p(ctrl, ns);
- ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
- 0, GFP_KERNEL);
- if (ret)
- goto out_dev_put;
-
- if (ns->nsid > subsys->max_nsid)
- subsys->max_nsid = ns->nsid;
-
- ret = xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL);
- if (ret)
- goto out_restore_subsys_maxnsid;
-
if (ns->pr.enable) {
ret = nvmet_pr_init_ns(ns);
if (ret)
- goto out_remove_from_subsys;
+ goto out_dev_put;
}
- subsys->nr_namespaces++;
-
nvmet_ns_changed(subsys, ns->nsid);
ns->enabled = true;
+ xa_set_mark(&subsys->namespaces, ns->nsid, NVMET_NS_ENABLED);
ret = 0;
out_unlock:
mutex_unlock(&subsys->lock);
return ret;
-
-out_remove_from_subsys:
- xa_erase(&subsys->namespaces, ns->nsid);
-out_restore_subsys_maxnsid:
- subsys->max_nsid = nvmet_max_nsid(subsys);
- percpu_ref_exit(&ns->ref);
out_dev_put:
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
@@ -649,15 +631,37 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
goto out_unlock;
ns->enabled = false;
- xa_erase(&ns->subsys->namespaces, ns->nsid);
- if (ns->nsid == subsys->max_nsid)
- subsys->max_nsid = nvmet_max_nsid(subsys);
+ xa_clear_mark(&subsys->namespaces, ns->nsid, NVMET_NS_ENABLED);
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
mutex_unlock(&subsys->lock);
+ if (ns->pr.enable)
+ nvmet_pr_exit_ns(ns);
+
+ mutex_lock(&subsys->lock);
+ nvmet_ns_changed(subsys, ns->nsid);
+ nvmet_ns_dev_disable(ns);
+out_unlock:
+ mutex_unlock(&subsys->lock);
+}
+
+void nvmet_ns_free(struct nvmet_ns *ns)
+{
+ struct nvmet_subsys *subsys = ns->subsys;
+
+ nvmet_ns_disable(ns);
+
+ mutex_lock(&subsys->lock);
+
+ xa_erase(&subsys->namespaces, ns->nsid);
+ if (ns->nsid == subsys->max_nsid)
+ subsys->max_nsid = nvmet_max_nsid(subsys);
+
+ mutex_unlock(&subsys->lock);
+
/*
* Now that we removed the namespaces from the lookup list, we
* can kill the per_cpu ref and wait for any remaining references
@@ -671,21 +675,9 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
wait_for_completion(&ns->disable_done);
percpu_ref_exit(&ns->ref);
- if (ns->pr.enable)
- nvmet_pr_exit_ns(ns);
-
mutex_lock(&subsys->lock);
-
subsys->nr_namespaces--;
- nvmet_ns_changed(subsys, ns->nsid);
- nvmet_ns_dev_disable(ns);
-out_unlock:
mutex_unlock(&subsys->lock);
-}
-
-void nvmet_ns_free(struct nvmet_ns *ns)
-{
- nvmet_ns_disable(ns);
down_write(&nvmet_ana_sem);
nvmet_ana_group_enabled[ns->anagrpid]--;
@@ -699,15 +691,33 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
{
struct nvmet_ns *ns;
+ mutex_lock(&subsys->lock);
+
+ if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
+ goto out_unlock;
+
ns = kzalloc(sizeof(*ns), GFP_KERNEL);
if (!ns)
- return NULL;
+ goto out_unlock;
init_completion(&ns->disable_done);
ns->nsid = nsid;
ns->subsys = subsys;
+ if (percpu_ref_init(&ns->ref, nvmet_destroy_namespace, 0, GFP_KERNEL))
+ goto out_free;
+
+ if (ns->nsid > subsys->max_nsid)
+ subsys->max_nsid = nsid;
+
+ if (xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL))
+ goto out_exit;
+
+ subsys->nr_namespaces++;
+
+ mutex_unlock(&subsys->lock);
+
down_write(&nvmet_ana_sem);
ns->anagrpid = NVMET_DEFAULT_ANA_GRPID;
nvmet_ana_group_enabled[ns->anagrpid]++;
@@ -718,6 +728,14 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
ns->csi = NVME_CSI_NVM;
return ns;
+out_exit:
+ subsys->max_nsid = nvmet_max_nsid(subsys);
+ percpu_ref_exit(&ns->ref);
+out_free:
+ kfree(ns);
+out_unlock:
+ mutex_unlock(&subsys->lock);
+ return NULL;
}
static void nvmet_update_sq_head(struct nvmet_req *req)
@@ -1394,7 +1412,7 @@ static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
ctrl->p2p_client = get_device(req->p2p_client);
- xa_for_each(&ctrl->subsys->namespaces, idx, ns)
+ nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns)
nvmet_p2pmem_ns_add_p2p(ctrl, ns);
}
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 0bda83d0fc3e..eaf31c823cbe 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -36,7 +36,7 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
*/
id->nsfeat |= 1 << 4;
/* NPWG = Namespace Preferred Write Granularity. 0's based */
- id->npwg = lpp0b;
+ id->npwg = to0based(bdev_io_min(bdev) / bdev_logical_block_size(bdev));
/* NPWA = Namespace Preferred Write Alignment. 0's based */
id->npwa = id->npwg;
/* NPDG = Namespace Preferred Deallocate Granularity. 0's based */
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 58328b35dc96..7233549f7c8a 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -24,6 +24,7 @@
#define NVMET_DEFAULT_VS NVME_VS(2, 1, 0)
+#define NVMET_NS_ENABLED XA_MARK_1
#define NVMET_ASYNC_EVENTS 4
#define NVMET_ERROR_LOG_SLOTS 128
#define NVMET_NO_ERROR_LOC ((u16)-1)
@@ -33,6 +34,12 @@
#define NVMET_FR_MAX_SIZE 8
#define NVMET_PR_LOG_QUEUE_SIZE 64
+#define nvmet_for_each_ns(xa, index, entry) \
+ xa_for_each(xa, index, entry)
+
+#define nvmet_for_each_enabled_ns(xa, index, entry) \
+ xa_for_each_marked(xa, index, entry, NVMET_NS_ENABLED)
+
/*
* Supported optional AENs:
*/
diff --git a/drivers/nvme/target/pr.c b/drivers/nvme/target/pr.c
index 90e9f5bbe581..cd22d8333314 100644
--- a/drivers/nvme/target/pr.c
+++ b/drivers/nvme/target/pr.c
@@ -60,7 +60,7 @@ u16 nvmet_set_feat_resv_notif_mask(struct nvmet_req *req, u32 mask)
goto success;
}
- xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
+ nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
if (ns->pr.enable)
WRITE_ONCE(ns->pr.notify_mask, mask);
}
@@ -1056,7 +1056,7 @@ int nvmet_ctrl_init_pr(struct nvmet_ctrl *ctrl)
* nvmet_pr_init_ns(), see more details in nvmet_ns_enable().
* So just check ns->pr.enable.
*/
- xa_for_each(&subsys->namespaces, idx, ns) {
+ nvmet_for_each_enabled_ns(&subsys->namespaces, idx, ns) {
if (ns->pr.enable) {
ret = nvmet_pr_alloc_and_insert_pc_ref(ns, ctrl->cntlid,
&ctrl->hostid);
@@ -1067,7 +1067,7 @@ int nvmet_ctrl_init_pr(struct nvmet_ctrl *ctrl)
return 0;
free_per_ctrl_refs:
- xa_for_each(&subsys->namespaces, idx, ns) {
+ nvmet_for_each_enabled_ns(&subsys->namespaces, idx, ns) {
if (ns->pr.enable) {
pc_ref = xa_erase(&ns->pr_per_ctrl_refs, ctrl->cntlid);
if (pc_ref)
@@ -1087,7 +1087,7 @@ void nvmet_ctrl_destroy_pr(struct nvmet_ctrl *ctrl)
kfifo_free(&ctrl->pr_log_mgr.log_queue);
mutex_destroy(&ctrl->pr_log_mgr.lock);
- xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
+ nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
if (ns->pr.enable) {
pc_ref = xa_erase(&ns->pr_per_ctrl_refs, ctrl->cntlid);
if (pc_ref)
diff --git a/drivers/of/address.c b/drivers/of/address.c
index c5b925ac469f..c1f1c810e810 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -459,7 +459,8 @@ static int of_translate_one(const struct device_node *parent, const struct of_bu
}
if (ranges == NULL || rlen == 0) {
offset = of_read_number(addr, na);
- memset(addr, 0, pna * 4);
+ /* set address to zero, pass flags through */
+ memset(addr + pbus->flag_cells, 0, (pna - pbus->flag_cells) * 4);
pr_debug("empty ranges; 1:1 translation\n");
goto finish;
}
@@ -619,7 +620,7 @@ struct device_node *__of_get_dma_parent(const struct device_node *np)
if (ret < 0)
return of_get_parent(np);
- return of_node_get(args.np);
+ return args.np;
}
#endif
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 7dc394255a0a..6f5abea2462a 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -88,7 +88,8 @@ static bool __of_node_is_type(const struct device_node *np, const char *type)
}
#define EXCLUDED_DEFAULT_CELLS_PLATFORMS ( \
- IS_ENABLED(CONFIG_SPARC) \
+ IS_ENABLED(CONFIG_SPARC) || \
+ of_find_compatible_node(NULL, NULL, "coreboot") \
)
int of_bus_n_addr_cells(struct device_node *np)
@@ -1507,8 +1508,10 @@ int of_parse_phandle_with_args_map(const struct device_node *np,
map_len--;
/* Check if not found */
- if (!new)
+ if (!new) {
+ ret = -EINVAL;
goto put;
+ }
if (!of_device_is_available(new))
match = 0;
@@ -1518,17 +1521,20 @@ int of_parse_phandle_with_args_map(const struct device_node *np,
goto put;
/* Check for malformed properties */
- if (WARN_ON(new_size > MAX_PHANDLE_ARGS))
- goto put;
- if (map_len < new_size)
+ if (WARN_ON(new_size > MAX_PHANDLE_ARGS) ||
+ map_len < new_size) {
+ ret = -EINVAL;
goto put;
+ }
/* Move forward by new node's #<list>-cells amount */
map += new_size;
map_len -= new_size;
}
- if (!match)
+ if (!match) {
+ ret = -ENOENT;
goto put;
+ }
/* Get the <list>-map-pass-thru property (optional) */
pass = of_get_property(cur, pass_name, NULL);
diff --git a/drivers/of/empty_root.dts b/drivers/of/empty_root.dts
index cf9e97a60f48..cbe169ba3db5 100644
--- a/drivers/of/empty_root.dts
+++ b/drivers/of/empty_root.dts
@@ -2,5 +2,12 @@
/dts-v1/;
/ {
-
+ /*
+ * #address-cells/#size-cells are required properties at root node.
+ * Use 2 cells for both address cells and size cells in order to fully
+ * support 64-bit addresses and sizes on systems using this empty root
+ * node.
+ */
+ #address-cells = <0x02>;
+ #size-cells = <0x02>;
};
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 67fc0ceaa5f5..98b1cf78ecac 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -111,6 +111,7 @@ const __be32 *of_irq_parse_imap_parent(const __be32 *imap, int len, struct of_ph
else
np = of_find_node_by_phandle(be32_to_cpup(imap));
imap++;
+ len--;
/* Check if not found */
if (!np) {
@@ -354,6 +355,7 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
return of_irq_parse_oldworld(device, index, out_irq);
/* Get the reg property (if any) */
+ addr_len = 0;
addr = of_get_property(device, "reg", &addr_len);
/* Prevent out-of-bounds read in case of longer interrupt parent address size */
diff --git a/drivers/of/property.c b/drivers/of/property.c
index 519bf9229e61..cfc8aea002e4 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -1286,7 +1286,6 @@ DEFINE_SIMPLE_PROP(iommus, "iommus", "#iommu-cells")
DEFINE_SIMPLE_PROP(mboxes, "mboxes", "#mbox-cells")
DEFINE_SIMPLE_PROP(io_channels, "io-channels", "#io-channel-cells")
DEFINE_SIMPLE_PROP(io_backends, "io-backends", "#io-backend-cells")
-DEFINE_SIMPLE_PROP(interrupt_parent, "interrupt-parent", NULL)
DEFINE_SIMPLE_PROP(dmas, "dmas", "#dma-cells")
DEFINE_SIMPLE_PROP(power_domains, "power-domains", "#power-domain-cells")
DEFINE_SIMPLE_PROP(hwlocks, "hwlocks", "#hwlock-cells")
@@ -1432,7 +1431,6 @@ static const struct supplier_bindings of_supplier_bindings[] = {
{ .parse_prop = parse_mboxes, },
{ .parse_prop = parse_io_channels, },
{ .parse_prop = parse_io_backends, },
- { .parse_prop = parse_interrupt_parent, },
{ .parse_prop = parse_dmas, .optional = true, },
{ .parse_prop = parse_power_domains, },
{ .parse_prop = parse_hwlocks, },
diff --git a/drivers/of/unittest-data/tests-address.dtsi b/drivers/of/unittest-data/tests-address.dtsi
index 3344f15c3755..f02a181bb125 100644
--- a/drivers/of/unittest-data/tests-address.dtsi
+++ b/drivers/of/unittest-data/tests-address.dtsi
@@ -114,6 +114,7 @@
device_type = "pci";
ranges = <0x82000000 0 0xe8000000 0 0xe8000000 0 0x7f00000>,
<0x81000000 0 0x00000000 0 0xefff0000 0 0x0010000>;
+ dma-ranges = <0x43000000 0x10 0x00 0x00 0x00 0x00 0x10000000>;
reg = <0x00000000 0xd1070000 0x20000>;
pci@0,0 {
@@ -142,6 +143,7 @@
#size-cells = <0x01>;
ranges = <0xa0000000 0 0 0 0x2000000>,
<0xb0000000 1 0 0 0x1000000>;
+ dma-ranges = <0xc0000000 0x43000000 0x10 0x00 0x10000000>;
dev@e0000000 {
reg = <0xa0001000 0x1000>,
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 42b411f32b1b..438fd70fa995 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -1213,6 +1213,44 @@ static void __init of_unittest_pci_dma_ranges(void)
of_node_put(np);
}
+static void __init of_unittest_pci_empty_dma_ranges(void)
+{
+ struct device_node *np;
+ struct of_pci_range range;
+ struct of_pci_range_parser parser;
+
+ if (!IS_ENABLED(CONFIG_PCI))
+ return;
+
+ np = of_find_node_by_path("/testcase-data/address-tests2/pcie@d1070000/pci@0,0/dev@0,0/local-bus@0");
+ if (!np) {
+ pr_err("missing testcase data\n");
+ return;
+ }
+
+ if (of_pci_dma_range_parser_init(&parser, np)) {
+ pr_err("missing dma-ranges property\n");
+ return;
+ }
+
+ /*
+ * Get the dma-ranges from the device tree
+ */
+ for_each_of_pci_range(&parser, &range) {
+ unittest(range.size == 0x10000000,
+ "for_each_of_pci_range wrong size on node %pOF size=%llx\n",
+ np, range.size);
+ unittest(range.cpu_addr == 0x00000000,
+ "for_each_of_pci_range wrong CPU addr (%llx) on node %pOF",
+ range.cpu_addr, np);
+ unittest(range.pci_addr == 0xc0000000,
+ "for_each_of_pci_range wrong DMA addr (%llx) on node %pOF",
+ range.pci_addr, np);
+ }
+
+ of_node_put(np);
+}
+
static void __init of_unittest_bus_ranges(void)
{
struct device_node *np;
@@ -4272,6 +4310,7 @@ static int __init of_unittest(void)
of_unittest_dma_get_max_cpu_address();
of_unittest_parse_dma_ranges();
of_unittest_pci_dma_ranges();
+ of_unittest_pci_empty_dma_ranges();
of_unittest_bus_ranges();
of_unittest_bus_3cell_ranges();
of_unittest_reg();
diff --git a/drivers/pci/msi/irqdomain.c b/drivers/pci/msi/irqdomain.c
index 569125726b3e..d7ba8795d60f 100644
--- a/drivers/pci/msi/irqdomain.c
+++ b/drivers/pci/msi/irqdomain.c
@@ -350,8 +350,11 @@ bool pci_msi_domain_supports(struct pci_dev *pdev, unsigned int feature_mask,
domain = dev_get_msi_domain(&pdev->dev);
- if (!domain || !irq_domain_is_hierarchy(domain))
- return mode == ALLOW_LEGACY;
+ if (!domain || !irq_domain_is_hierarchy(domain)) {
+ if (IS_ENABLED(CONFIG_PCI_MSI_ARCH_FALLBACKS))
+ return mode == ALLOW_LEGACY;
+ return false;
+ }
if (!irq_domain_is_msi_parent(domain)) {
/*
diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c
index 3a45879d85db..2f647cac4cae 100644
--- a/drivers/pci/msi/msi.c
+++ b/drivers/pci/msi/msi.c
@@ -433,6 +433,10 @@ int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
if (WARN_ON_ONCE(dev->msi_enabled))
return -EINVAL;
+ /* Test for the availability of MSI support */
+ if (!pci_msi_domain_supports(dev, 0, ALLOW_LEGACY))
+ return -ENOTSUPP;
+
nvec = pci_msi_vec_count(dev);
if (nvec < 0)
return nvec;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 0b29ec6e8e5e..661f98c6c63a 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -6232,12 +6232,14 @@ u8 pcie_get_supported_speeds(struct pci_dev *dev)
pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
speeds = lnkcap2 & PCI_EXP_LNKCAP2_SLS;
+ /* Ignore speeds higher than Max Link Speed */
+ pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
+ speeds &= GENMASK(lnkcap & PCI_EXP_LNKCAP_SLS, 0);
+
/* PCIe r3.0-compliant */
if (speeds)
return speeds;
- pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
-
/* Synthesize from the Max Link Speed field */
if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
speeds = PCI_EXP_LNKCAP2_SLS_5_0GB | PCI_EXP_LNKCAP2_SLS_2_5GB;
diff --git a/drivers/pci/pcie/portdrv.c b/drivers/pci/pcie/portdrv.c
index 5e10306b6308..02e73099bad0 100644
--- a/drivers/pci/pcie/portdrv.c
+++ b/drivers/pci/pcie/portdrv.c
@@ -265,12 +265,14 @@ static int get_port_device_capability(struct pci_dev *dev)
(pcie_ports_dpc_native || (services & PCIE_PORT_SERVICE_AER)))
services |= PCIE_PORT_SERVICE_DPC;
+ /* Enable bandwidth control if more than one speed is supported. */
if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM ||
pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) {
u32 linkcap;
pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &linkcap);
- if (linkcap & PCI_EXP_LNKCAP_LBNC)
+ if (linkcap & PCI_EXP_LNKCAP_LBNC &&
+ hweight8(dev->supported_speeds) > 1)
services |= PCIE_PORT_SERVICE_BWCTRL;
}
diff --git a/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c b/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c
index 950b7ae1d1a8..dc452610934a 100644
--- a/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c
+++ b/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c
@@ -325,6 +325,12 @@ static void usb_init_common_7216(struct brcm_usb_init_params *params)
void __iomem *ctrl = params->regs[BRCM_REGS_CTRL];
USB_CTRL_UNSET(ctrl, USB_PM, XHC_S2_CLK_SWITCH_EN);
+
+ /*
+ * The PHY might be in a bad state if it is already powered
+ * up. Toggle the power just in case.
+ */
+ USB_CTRL_SET(ctrl, USB_PM, USB_PWRDN);
USB_CTRL_UNSET(ctrl, USB_PM, USB_PWRDN);
/* 1 millisecond - for USB clocks to settle down */
diff --git a/drivers/phy/freescale/phy-fsl-samsung-hdmi.c b/drivers/phy/freescale/phy-fsl-samsung-hdmi.c
index 2c8038864357..d3ccf547ba1c 100644
--- a/drivers/phy/freescale/phy-fsl-samsung-hdmi.c
+++ b/drivers/phy/freescale/phy-fsl-samsung-hdmi.c
@@ -424,8 +424,7 @@ static unsigned long fsl_samsung_hdmi_phy_find_pms(unsigned long fout, u8 *p, u1
* Fvco = (M * f_ref) / P,
* where f_ref is 24MHz.
*/
- tmp = (u64)_m * 24 * MHZ;
- do_div(tmp, _p);
+ tmp = div64_ul((u64)_m * 24 * MHZ, _p);
if (tmp < 750 * MHZ ||
tmp > 3000 * MHZ)
continue;
diff --git a/drivers/phy/mediatek/Kconfig b/drivers/phy/mediatek/Kconfig
index 60e00057e8bc..ba6461350951 100644
--- a/drivers/phy/mediatek/Kconfig
+++ b/drivers/phy/mediatek/Kconfig
@@ -65,6 +65,7 @@ config PHY_MTK_HDMI
depends on ARCH_MEDIATEK || COMPILE_TEST
depends on COMMON_CLK
depends on OF
+ depends on REGULATOR
select GENERIC_PHY
help
Support HDMI PHY for Mediatek SoCs.
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index f053b525ccff..413f76e2d174 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -145,8 +145,10 @@ static struct phy_provider *of_phy_provider_lookup(struct device_node *node)
return phy_provider;
for_each_child_of_node(phy_provider->children, child)
- if (child == node)
+ if (child == node) {
+ of_node_put(child);
return phy_provider;
+ }
}
return ERR_PTR(-EPROBE_DEFER);
@@ -629,8 +631,10 @@ static struct phy *_of_phy_get(struct device_node *np, int index)
return ERR_PTR(-ENODEV);
/* This phy type handled by the usb-phy subsystem for now */
- if (of_device_is_compatible(args.np, "usb-nop-xceiv"))
- return ERR_PTR(-ENODEV);
+ if (of_device_is_compatible(args.np, "usb-nop-xceiv")) {
+ phy = ERR_PTR(-ENODEV);
+ goto out_put_node;
+ }
mutex_lock(&phy_provider_mutex);
phy_provider = of_phy_provider_lookup(args.np);
@@ -652,6 +656,7 @@ out_put_module:
out_unlock:
mutex_unlock(&phy_provider_mutex);
+out_put_node:
of_node_put(args.np);
return phy;
@@ -737,7 +742,7 @@ void devm_phy_put(struct device *dev, struct phy *phy)
if (!phy)
return;
- r = devres_destroy(dev, devm_phy_release, devm_phy_match, phy);
+ r = devres_release(dev, devm_phy_release, devm_phy_match, phy);
dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n");
}
EXPORT_SYMBOL_GPL(devm_phy_put);
@@ -1121,7 +1126,7 @@ void devm_phy_destroy(struct device *dev, struct phy *phy)
{
int r;
- r = devres_destroy(dev, devm_phy_consume, devm_phy_match, phy);
+ r = devres_release(dev, devm_phy_consume, devm_phy_match, phy);
dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n");
}
EXPORT_SYMBOL_GPL(devm_phy_destroy);
@@ -1259,12 +1264,12 @@ EXPORT_SYMBOL_GPL(of_phy_provider_unregister);
* of_phy_provider_unregister to unregister the phy provider.
*/
void devm_of_phy_provider_unregister(struct device *dev,
- struct phy_provider *phy_provider)
+ struct phy_provider *phy_provider)
{
int r;
- r = devres_destroy(dev, devm_phy_provider_release, devm_phy_match,
- phy_provider);
+ r = devres_release(dev, devm_phy_provider_release, devm_phy_match,
+ phy_provider);
dev_WARN_ONCE(dev, r, "couldn't find PHY provider device resource\n");
}
EXPORT_SYMBOL_GPL(devm_of_phy_provider_unregister);
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
index acd6075bf6d9..c9c337840715 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
@@ -1052,7 +1052,7 @@ static const struct qmp_phy_init_tbl sc8280xp_usb3_uniphy_rx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_GAIN, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FO_GAIN, 0x0a),
QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL1, 0x54),
QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL2, 0x0f),
QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
diff --git a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
index 0a9989e41237..2eb3329ca23f 100644
--- a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
+++ b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
@@ -309,7 +309,7 @@ static int rockchip_combphy_parse_dt(struct device *dev, struct rockchip_combphy
priv->ext_refclk = device_property_present(dev, "rockchip,ext-refclk");
- priv->phy_rst = devm_reset_control_array_get_exclusive(dev);
+ priv->phy_rst = devm_reset_control_get(dev, "phy");
if (IS_ERR(priv->phy_rst))
return dev_err_probe(dev, PTR_ERR(priv->phy_rst), "failed to get phy reset\n");
diff --git a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
index ceab9c71d3b5..0965b9d4f9cf 100644
--- a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
+++ b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
@@ -1101,6 +1101,8 @@ static int rk_hdptx_phy_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(hdptx->grf),
"Could not get GRF syscon\n");
+ platform_set_drvdata(pdev, hdptx);
+
ret = devm_pm_runtime_enable(dev);
if (ret)
return dev_err_probe(dev, ret, "Failed to enable runtime PM\n");
@@ -1110,7 +1112,6 @@ static int rk_hdptx_phy_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(hdptx->phy),
"Failed to create HDMI PHY\n");
- platform_set_drvdata(pdev, hdptx);
phy_set_drvdata(hdptx->phy, hdptx);
phy_set_bus_width(hdptx->phy, 8);
diff --git a/drivers/phy/st/phy-stm32-combophy.c b/drivers/phy/st/phy-stm32-combophy.c
index 765bb34fe358..49e9fa90a681 100644
--- a/drivers/phy/st/phy-stm32-combophy.c
+++ b/drivers/phy/st/phy-stm32-combophy.c
@@ -122,6 +122,7 @@ static int stm32_impedance_tune(struct stm32_combophy *combophy)
u32 max_vswing = imp_lookup[imp_size - 1].vswing[vswing_size - 1];
u32 min_vswing = imp_lookup[0].vswing[0];
u32 val;
+ u32 regval;
if (!of_property_read_u32(combophy->dev->of_node, "st,output-micro-ohms", &val)) {
if (val < min_imp || val > max_imp) {
@@ -129,16 +130,20 @@ static int stm32_impedance_tune(struct stm32_combophy *combophy)
return -EINVAL;
}
- for (imp_of = 0; imp_of < ARRAY_SIZE(imp_lookup); imp_of++)
- if (imp_lookup[imp_of].microohm <= val)
+ regval = 0;
+ for (imp_of = 0; imp_of < ARRAY_SIZE(imp_lookup); imp_of++) {
+ if (imp_lookup[imp_of].microohm <= val) {
+ regval = FIELD_PREP(STM32MP25_PCIEPRG_IMPCTRL_OHM, imp_of);
break;
+ }
+ }
dev_dbg(combophy->dev, "Set %u micro-ohms output impedance\n",
imp_lookup[imp_of].microohm);
regmap_update_bits(combophy->regmap, SYSCFG_PCIEPRGCR,
STM32MP25_PCIEPRG_IMPCTRL_OHM,
- FIELD_PREP(STM32MP25_PCIEPRG_IMPCTRL_OHM, imp_of));
+ regval);
} else {
regmap_read(combophy->regmap, SYSCFG_PCIEPRGCR, &val);
imp_of = FIELD_GET(STM32MP25_PCIEPRG_IMPCTRL_OHM, val);
@@ -150,16 +155,20 @@ static int stm32_impedance_tune(struct stm32_combophy *combophy)
return -EINVAL;
}
- for (vswing_of = 0; vswing_of < ARRAY_SIZE(imp_lookup[imp_of].vswing); vswing_of++)
- if (imp_lookup[imp_of].vswing[vswing_of] >= val)
+ regval = 0;
+ for (vswing_of = 0; vswing_of < ARRAY_SIZE(imp_lookup[imp_of].vswing); vswing_of++) {
+ if (imp_lookup[imp_of].vswing[vswing_of] >= val) {
+ regval = FIELD_PREP(STM32MP25_PCIEPRG_IMPCTRL_VSWING, vswing_of);
break;
+ }
+ }
dev_dbg(combophy->dev, "Set %u microvolt swing\n",
imp_lookup[imp_of].vswing[vswing_of]);
regmap_update_bits(combophy->regmap, SYSCFG_PCIEPRGCR,
STM32MP25_PCIEPRG_IMPCTRL_VSWING,
- FIELD_PREP(STM32MP25_PCIEPRG_IMPCTRL_VSWING, vswing_of));
+ regval);
}
return 0;
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index d66c3a3e8429..b96e6368a956 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -86,6 +86,7 @@ const struct regmap_config mcp23x08_regmap = {
.num_reg_defaults = ARRAY_SIZE(mcp23x08_defaults),
.cache_type = REGCACHE_FLAT,
.max_register = MCP_OLAT,
+ .disable_locking = true, /* mcp->lock protects the regmap */
};
EXPORT_SYMBOL_GPL(mcp23x08_regmap);
@@ -132,6 +133,7 @@ const struct regmap_config mcp23x17_regmap = {
.num_reg_defaults = ARRAY_SIZE(mcp23x17_defaults),
.cache_type = REGCACHE_FLAT,
.val_format_endian = REGMAP_ENDIAN_LITTLE,
+ .disable_locking = true, /* mcp->lock protects the regmap */
};
EXPORT_SYMBOL_GPL(mcp23x17_regmap);
@@ -228,7 +230,9 @@ static int mcp_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
switch (param) {
case PIN_CONFIG_BIAS_PULL_UP:
+ mutex_lock(&mcp->lock);
ret = mcp_read(mcp, MCP_GPPU, &data);
+ mutex_unlock(&mcp->lock);
if (ret < 0)
return ret;
status = (data & BIT(pin)) ? 1 : 0;
@@ -257,7 +261,9 @@ static int mcp_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
switch (param) {
case PIN_CONFIG_BIAS_PULL_UP:
+ mutex_lock(&mcp->lock);
ret = mcp_set_bit(mcp, MCP_GPPU, pin, arg);
+ mutex_unlock(&mcp->lock);
break;
default:
dev_dbg(mcp->dev, "Invalid config param %04x\n", param);
diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
index 924bf4d3cc77..8470b7f2b135 100644
--- a/drivers/platform/chrome/cros_ec_lpc.c
+++ b/drivers/platform/chrome/cros_ec_lpc.c
@@ -707,7 +707,7 @@ static const struct dmi_system_id cros_ec_lpc_dmi_table[] __initconst = {
/* Framework Laptop (12th Gen Intel Core) */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Framework"),
- DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "12th Gen Intel Core"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Laptop (12th Gen Intel Core)"),
},
.driver_data = (void *)&framework_laptop_mec_lpc_driver_data,
},
@@ -715,7 +715,7 @@ static const struct dmi_system_id cros_ec_lpc_dmi_table[] __initconst = {
/* Framework Laptop (13th Gen Intel Core) */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Framework"),
- DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "13th Gen Intel Core"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Laptop (13th Gen Intel Core)"),
},
.driver_data = (void *)&framework_laptop_mec_lpc_driver_data,
},
diff --git a/drivers/platform/loongarch/Kconfig b/drivers/platform/loongarch/Kconfig
index 5633e4d73991..447528797d07 100644
--- a/drivers/platform/loongarch/Kconfig
+++ b/drivers/platform/loongarch/Kconfig
@@ -18,7 +18,7 @@ if LOONGARCH_PLATFORM_DEVICES
config LOONGSON_LAPTOP
tristate "Generic Loongson-3 Laptop Driver"
- depends on ACPI
+ depends on ACPI_EC
depends on BACKLIGHT_CLASS_DEVICE
depends on INPUT
depends on MACH_LOONGSON64
diff --git a/drivers/platform/x86/dell/alienware-wmi.c b/drivers/platform/x86/dell/alienware-wmi.c
index 77465ed9b449..341d01d3e3e4 100644
--- a/drivers/platform/x86/dell/alienware-wmi.c
+++ b/drivers/platform/x86/dell/alienware-wmi.c
@@ -190,7 +190,7 @@ static struct quirk_entry quirk_asm201 = {
};
static struct quirk_entry quirk_g_series = {
- .num_zones = 2,
+ .num_zones = 0,
.hdmi_mux = 0,
.amplifier = 0,
.deepslp = 0,
@@ -199,7 +199,7 @@ static struct quirk_entry quirk_g_series = {
};
static struct quirk_entry quirk_x_series = {
- .num_zones = 2,
+ .num_zones = 0,
.hdmi_mux = 0,
.amplifier = 0,
.deepslp = 0,
@@ -243,6 +243,15 @@ static const struct dmi_system_id alienware_quirks[] __initconst = {
},
{
.callback = dmi_matched,
+ .ident = "Alienware m16 R1 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m16 R1 AMD"),
+ },
+ .driver_data = &quirk_x_series,
+ },
+ {
+ .callback = dmi_matched,
.ident = "Alienware m17 R5",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
@@ -687,6 +696,9 @@ static void alienware_zone_exit(struct platform_device *dev)
{
u8 zone;
+ if (!quirks->num_zones)
+ return;
+
sysfs_remove_group(&dev->dev.kobj, &zone_attribute_group);
led_classdev_unregister(&global_led);
if (zone_dev_attrs) {
@@ -1229,9 +1241,11 @@ static int __init alienware_wmi_init(void)
goto fail_prep_thermal_profile;
}
- ret = alienware_zone_init(platform_device);
- if (ret)
- goto fail_prep_zones;
+ if (quirks->num_zones > 0) {
+ ret = alienware_zone_init(platform_device);
+ if (ret)
+ goto fail_prep_zones;
+ }
return 0;
diff --git a/drivers/platform/x86/hp/hp-wmi.c b/drivers/platform/x86/hp/hp-wmi.c
index 81ccc96ffe40..20c55bab3b8c 100644
--- a/drivers/platform/x86/hp/hp-wmi.c
+++ b/drivers/platform/x86/hp/hp-wmi.c
@@ -64,7 +64,7 @@ static const char * const omen_thermal_profile_boards[] = {
"874A", "8603", "8604", "8748", "886B", "886C", "878A", "878B", "878C",
"88C8", "88CB", "8786", "8787", "8788", "88D1", "88D2", "88F4", "88FD",
"88F5", "88F6", "88F7", "88FE", "88FF", "8900", "8901", "8902", "8912",
- "8917", "8918", "8949", "894A", "89EB", "8BAD", "8A42"
+ "8917", "8918", "8949", "894A", "89EB", "8BAD", "8A42", "8A15"
};
/* DMI Board names of Omen laptops that are specifically set to be thermal
@@ -80,7 +80,7 @@ static const char * const omen_thermal_profile_force_v0_boards[] = {
* "balanced" when reaching zero.
*/
static const char * const omen_timed_thermal_profile_boards[] = {
- "8BAD", "8A42"
+ "8BAD", "8A42", "8A15"
};
/* DMI Board names of Victus laptops */
diff --git a/drivers/platform/x86/intel/ifs/core.c b/drivers/platform/x86/intel/ifs/core.c
index bc252b883210..1ae50702bdb7 100644
--- a/drivers/platform/x86/intel/ifs/core.c
+++ b/drivers/platform/x86/intel/ifs/core.c
@@ -20,6 +20,7 @@ static const struct x86_cpu_id ifs_cpu_ids[] __initconst = {
X86_MATCH(INTEL_GRANITERAPIDS_X, ARRAY_GEN0),
X86_MATCH(INTEL_GRANITERAPIDS_D, ARRAY_GEN0),
X86_MATCH(INTEL_ATOM_CRESTMONT_X, ARRAY_GEN1),
+ X86_MATCH(INTEL_ATOM_DARKMONT_X, ARRAY_GEN1),
{}
};
MODULE_DEVICE_TABLE(x86cpu, ifs_cpu_ids);
diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c
index 4a85aad2475a..8272f1dd0fbc 100644
--- a/drivers/platform/x86/intel/vsec.c
+++ b/drivers/platform/x86/intel/vsec.c
@@ -423,6 +423,7 @@ static const struct intel_vsec_platform_info lnl_info = {
#define PCI_DEVICE_ID_INTEL_VSEC_RPL 0xa77d
#define PCI_DEVICE_ID_INTEL_VSEC_TGL 0x9a0d
#define PCI_DEVICE_ID_INTEL_VSEC_LNL_M 0x647d
+#define PCI_DEVICE_ID_INTEL_VSEC_PTL 0xb07d
static const struct pci_device_id intel_vsec_pci_ids[] = {
{ PCI_DEVICE_DATA(INTEL, VSEC_ADL, &tgl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_DG1, &dg1_info) },
@@ -432,6 +433,7 @@ static const struct pci_device_id intel_vsec_pci_ids[] = {
{ PCI_DEVICE_DATA(INTEL, VSEC_RPL, &tgl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_TGL, &tgl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_LNL_M, &lnl_info) },
+ { PCI_DEVICE_DATA(INTEL, VSEC_PTL, &mtl_info) },
{ }
};
MODULE_DEVICE_TABLE(pci, intel_vsec_pci_ids);
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
index 671021cd1f59..9c7f30a47f1f 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/x86/mlx-platform.c
@@ -6237,6 +6237,7 @@ fail_pci_set_dma_mask:
fail_pci_request_regions:
pci_disable_device(pci_dev);
fail_pci_enable_device:
+ pci_dev_put(pci_dev);
return err;
}
@@ -6247,6 +6248,7 @@ mlxplat_pci_fpga_device_exit(struct pci_dev *pci_bridge,
iounmap(pci_bridge_addr);
pci_release_regions(pci_bridge);
pci_disable_device(pci_bridge);
+ pci_dev_put(pci_bridge);
}
static int
diff --git a/drivers/platform/x86/p2sb.c b/drivers/platform/x86/p2sb.c
index d51eb0db0626..cbbb0f809704 100644
--- a/drivers/platform/x86/p2sb.c
+++ b/drivers/platform/x86/p2sb.c
@@ -43,6 +43,7 @@ struct p2sb_res_cache {
};
static struct p2sb_res_cache p2sb_resources[NR_P2SB_RES_CACHE];
+static bool p2sb_hidden_by_bios;
static void p2sb_get_devfn(unsigned int *devfn)
{
@@ -97,6 +98,12 @@ static void p2sb_scan_and_cache_devfn(struct pci_bus *bus, unsigned int devfn)
static int p2sb_scan_and_cache(struct pci_bus *bus, unsigned int devfn)
{
+ /*
+ * The BIOS prevents the P2SB device from being enumerated by the PCI
+ * subsystem, so we need to unhide and hide it back to lookup the BAR.
+ */
+ pci_bus_write_config_dword(bus, devfn, P2SBC, 0);
+
/* Scan the P2SB device and cache its BAR0 */
p2sb_scan_and_cache_devfn(bus, devfn);
@@ -104,6 +111,8 @@ static int p2sb_scan_and_cache(struct pci_bus *bus, unsigned int devfn)
if (devfn == P2SB_DEVFN_GOLDMONT)
p2sb_scan_and_cache_devfn(bus, SPI_DEVFN_GOLDMONT);
+ pci_bus_write_config_dword(bus, devfn, P2SBC, P2SBC_HIDE);
+
if (!p2sb_valid_resource(&p2sb_resources[PCI_FUNC(devfn)].res))
return -ENOENT;
@@ -129,7 +138,7 @@ static int p2sb_cache_resources(void)
u32 value = P2SBC_HIDE;
struct pci_bus *bus;
u16 class;
- int ret;
+ int ret = 0;
/* Get devfn for P2SB device itself */
p2sb_get_devfn(&devfn_p2sb);
@@ -152,22 +161,53 @@ static int p2sb_cache_resources(void)
*/
pci_lock_rescan_remove();
+ pci_bus_read_config_dword(bus, devfn_p2sb, P2SBC, &value);
+ p2sb_hidden_by_bios = value & P2SBC_HIDE;
+
/*
- * The BIOS prevents the P2SB device from being enumerated by the PCI
- * subsystem, so we need to unhide and hide it back to lookup the BAR.
- * Unhide the P2SB device here, if needed.
+ * If the BIOS does not hide the P2SB device then its resources
+ * are accesilble. Cache them only if the P2SB device is hidden.
*/
- pci_bus_read_config_dword(bus, devfn_p2sb, P2SBC, &value);
- if (value & P2SBC_HIDE)
- pci_bus_write_config_dword(bus, devfn_p2sb, P2SBC, 0);
+ if (p2sb_hidden_by_bios)
+ ret = p2sb_scan_and_cache(bus, devfn_p2sb);
- ret = p2sb_scan_and_cache(bus, devfn_p2sb);
+ pci_unlock_rescan_remove();
- /* Hide the P2SB device, if it was hidden */
- if (value & P2SBC_HIDE)
- pci_bus_write_config_dword(bus, devfn_p2sb, P2SBC, P2SBC_HIDE);
+ return ret;
+}
- pci_unlock_rescan_remove();
+static int p2sb_read_from_cache(struct pci_bus *bus, unsigned int devfn,
+ struct resource *mem)
+{
+ struct p2sb_res_cache *cache = &p2sb_resources[PCI_FUNC(devfn)];
+
+ if (cache->bus_dev_id != bus->dev.id)
+ return -ENODEV;
+
+ if (!p2sb_valid_resource(&cache->res))
+ return -ENOENT;
+
+ memcpy(mem, &cache->res, sizeof(*mem));
+
+ return 0;
+}
+
+static int p2sb_read_from_dev(struct pci_bus *bus, unsigned int devfn,
+ struct resource *mem)
+{
+ struct pci_dev *pdev;
+ int ret = 0;
+
+ pdev = pci_get_slot(bus, devfn);
+ if (!pdev)
+ return -ENODEV;
+
+ if (p2sb_valid_resource(pci_resource_n(pdev, 0)))
+ p2sb_read_bar0(pdev, mem);
+ else
+ ret = -ENOENT;
+
+ pci_dev_put(pdev);
return ret;
}
@@ -188,8 +228,6 @@ static int p2sb_cache_resources(void)
*/
int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
{
- struct p2sb_res_cache *cache;
-
bus = p2sb_get_bus(bus);
if (!bus)
return -ENODEV;
@@ -197,15 +235,10 @@ int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
if (!devfn)
p2sb_get_devfn(&devfn);
- cache = &p2sb_resources[PCI_FUNC(devfn)];
- if (cache->bus_dev_id != bus->dev.id)
- return -ENODEV;
+ if (p2sb_hidden_by_bios)
+ return p2sb_read_from_cache(bus, devfn, mem);
- if (!p2sb_valid_resource(&cache->res))
- return -ENOENT;
-
- memcpy(mem, &cache->res, sizeof(*mem));
- return 0;
+ return p2sb_read_from_dev(bus, devfn, mem);
}
EXPORT_SYMBOL_GPL(p2sb_bar);
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 6371a9f765c1..2cfb2ac3f465 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -184,7 +184,8 @@ enum tpacpi_hkey_event_t {
*/
TP_HKEY_EV_AMT_TOGGLE = 0x131a, /* Toggle AMT on/off */
TP_HKEY_EV_DOUBLETAP_TOGGLE = 0x131c, /* Toggle trackpoint doubletap on/off */
- TP_HKEY_EV_PROFILE_TOGGLE = 0x131f, /* Toggle platform profile */
+ TP_HKEY_EV_PROFILE_TOGGLE = 0x131f, /* Toggle platform profile in 2024 systems */
+ TP_HKEY_EV_PROFILE_TOGGLE2 = 0x1401, /* Toggle platform profile in 2025 + systems */
/* Reasons for waking up from S3/S4 */
TP_HKEY_EV_WKUP_S3_UNDOCK = 0x2304, /* undock requested, S3 */
@@ -11200,6 +11201,7 @@ static bool tpacpi_driver_event(const unsigned int hkey_event)
tp_features.trackpoint_doubletap = !tp_features.trackpoint_doubletap;
return true;
case TP_HKEY_EV_PROFILE_TOGGLE:
+ case TP_HKEY_EV_PROFILE_TOGGLE2:
platform_profile_cycle();
return true;
}
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index 0a39f68c641d..bdc19cd8d3ed 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -855,6 +855,23 @@ static const struct ts_dmi_data rwc_nanote_next_data = {
.properties = rwc_nanote_next_props,
};
+static const struct property_entry sary_tab_3_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1730),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1151),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-sary-tab-3.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data sary_tab_3_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = sary_tab_3_props,
+};
+
static const struct property_entry schneider_sct101ctm_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1715),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
@@ -1616,6 +1633,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* SARY Tab 3 */
+ .driver_data = (void *)&sary_tab_3_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SARY"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "C210C"),
+ DMI_MATCH(DMI_PRODUCT_SKU, "TAB3"),
+ },
+ },
+ {
/* Schneider SCT101CTM */
.driver_data = (void *)&schneider_sct101ctm_data,
.matches = {
diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c
index bb11f467dc78..20a9efebbcb7 100644
--- a/drivers/pmdomain/core.c
+++ b/drivers/pmdomain/core.c
@@ -2142,6 +2142,11 @@ static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
return 0;
}
+static void genpd_provider_release(struct device *dev)
+{
+ /* nothing to be done here */
+}
+
static int genpd_alloc_data(struct generic_pm_domain *genpd)
{
struct genpd_governor_data *gd = NULL;
@@ -2173,6 +2178,7 @@ static int genpd_alloc_data(struct generic_pm_domain *genpd)
genpd->gd = gd;
device_initialize(&genpd->dev);
+ genpd->dev.release = genpd_provider_release;
if (!genpd_is_dev_name_fw(genpd)) {
dev_set_name(&genpd->dev, "%s", genpd->name);
diff --git a/drivers/pmdomain/imx/gpcv2.c b/drivers/pmdomain/imx/gpcv2.c
index e67ecf99ef84..9bdb80fd7210 100644
--- a/drivers/pmdomain/imx/gpcv2.c
+++ b/drivers/pmdomain/imx/gpcv2.c
@@ -1458,12 +1458,12 @@ static int imx_gpcv2_probe(struct platform_device *pdev)
.max_register = SZ_4K,
};
struct device *dev = &pdev->dev;
- struct device_node *pgc_np;
+ struct device_node *pgc_np __free(device_node) =
+ of_get_child_by_name(dev->of_node, "pgc");
struct regmap *regmap;
void __iomem *base;
int ret;
- pgc_np = of_get_child_by_name(dev->of_node, "pgc");
if (!pgc_np) {
dev_err(dev, "No power domains specified in DT\n");
return -EINVAL;
diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
index 2b393eb5c282..c47f32f152e6 100644
--- a/drivers/power/supply/bq24190_charger.c
+++ b/drivers/power/supply/bq24190_charger.c
@@ -567,6 +567,7 @@ static int bq24190_set_otg_vbus(struct bq24190_dev_info *bdi, bool enable)
static int bq24296_set_otg_vbus(struct bq24190_dev_info *bdi, bool enable)
{
+ union power_supply_propval val = { .intval = bdi->charge_type };
int ret;
ret = pm_runtime_resume_and_get(bdi->dev);
@@ -587,13 +588,18 @@ static int bq24296_set_otg_vbus(struct bq24190_dev_info *bdi, bool enable)
ret = bq24190_write_mask(bdi, BQ24190_REG_POC,
BQ24296_REG_POC_OTG_CONFIG_MASK,
- BQ24296_REG_POC_CHG_CONFIG_SHIFT,
+ BQ24296_REG_POC_OTG_CONFIG_SHIFT,
BQ24296_REG_POC_OTG_CONFIG_OTG);
- } else
+ } else {
ret = bq24190_write_mask(bdi, BQ24190_REG_POC,
BQ24296_REG_POC_OTG_CONFIG_MASK,
- BQ24296_REG_POC_CHG_CONFIG_SHIFT,
+ BQ24296_REG_POC_OTG_CONFIG_SHIFT,
BQ24296_REG_POC_OTG_CONFIG_DISABLE);
+ if (ret < 0)
+ goto out;
+
+ ret = bq24190_charger_set_charge_type(bdi, &val);
+ }
out:
pm_runtime_mark_last_busy(bdi->dev);
diff --git a/drivers/power/supply/cros_charge-control.c b/drivers/power/supply/cros_charge-control.c
index 17c53591ce19..9b0a7500296b 100644
--- a/drivers/power/supply/cros_charge-control.c
+++ b/drivers/power/supply/cros_charge-control.c
@@ -7,8 +7,10 @@
#include <acpi/battery.h>
#include <linux/container_of.h>
#include <linux/dmi.h>
+#include <linux/lockdep.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
#include <linux/platform_device.h>
@@ -49,6 +51,7 @@ struct cros_chctl_priv {
struct attribute *attributes[_CROS_CHCTL_ATTR_COUNT];
struct attribute_group group;
+ struct mutex lock; /* protects fields below and cros_ec */
enum power_supply_charge_behaviour current_behaviour;
u8 current_start_threshold, current_end_threshold;
};
@@ -85,6 +88,8 @@ static int cros_chctl_configure_ec(struct cros_chctl_priv *priv)
{
struct ec_params_charge_control req = {};
+ lockdep_assert_held(&priv->lock);
+
req.cmd = EC_CHARGE_CONTROL_CMD_SET;
switch (priv->current_behaviour) {
@@ -134,11 +139,15 @@ static ssize_t cros_chctl_store_threshold(struct device *dev, struct cros_chctl_
return -EINVAL;
if (is_end_threshold) {
- if (val <= priv->current_start_threshold)
+ /* Start threshold is not exposed, use fixed value */
+ if (priv->cmd_version == 2)
+ priv->current_start_threshold = val == 100 ? 0 : val;
+
+ if (val < priv->current_start_threshold)
return -EINVAL;
priv->current_end_threshold = val;
} else {
- if (val >= priv->current_end_threshold)
+ if (val > priv->current_end_threshold)
return -EINVAL;
priv->current_start_threshold = val;
}
@@ -159,6 +168,7 @@ static ssize_t charge_control_start_threshold_show(struct device *dev,
struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr,
CROS_CHCTL_ATTR_START_THRESHOLD);
+ guard(mutex)(&priv->lock);
return sysfs_emit(buf, "%u\n", (unsigned int)priv->current_start_threshold);
}
@@ -169,6 +179,7 @@ static ssize_t charge_control_start_threshold_store(struct device *dev,
struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr,
CROS_CHCTL_ATTR_START_THRESHOLD);
+ guard(mutex)(&priv->lock);
return cros_chctl_store_threshold(dev, priv, 0, buf, count);
}
@@ -178,6 +189,7 @@ static ssize_t charge_control_end_threshold_show(struct device *dev, struct devi
struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr,
CROS_CHCTL_ATTR_END_THRESHOLD);
+ guard(mutex)(&priv->lock);
return sysfs_emit(buf, "%u\n", (unsigned int)priv->current_end_threshold);
}
@@ -187,6 +199,7 @@ static ssize_t charge_control_end_threshold_store(struct device *dev, struct dev
struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr,
CROS_CHCTL_ATTR_END_THRESHOLD);
+ guard(mutex)(&priv->lock);
return cros_chctl_store_threshold(dev, priv, 1, buf, count);
}
@@ -195,6 +208,7 @@ static ssize_t charge_behaviour_show(struct device *dev, struct device_attribute
struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(&attr->attr,
CROS_CHCTL_ATTR_CHARGE_BEHAVIOUR);
+ guard(mutex)(&priv->lock);
return power_supply_charge_behaviour_show(dev, EC_CHARGE_CONTROL_BEHAVIOURS,
priv->current_behaviour, buf);
}
@@ -210,6 +224,7 @@ static ssize_t charge_behaviour_store(struct device *dev, struct device_attribut
if (ret < 0)
return ret;
+ guard(mutex)(&priv->lock);
priv->current_behaviour = ret;
ret = cros_chctl_configure_ec(priv);
@@ -223,12 +238,10 @@ static umode_t cros_chtl_attr_is_visible(struct kobject *kobj, struct attribute
{
struct cros_chctl_priv *priv = cros_chctl_attr_to_priv(attr, n);
- if (priv->cmd_version < 2) {
- if (n == CROS_CHCTL_ATTR_START_THRESHOLD)
- return 0;
- if (n == CROS_CHCTL_ATTR_END_THRESHOLD)
- return 0;
- }
+ if (n == CROS_CHCTL_ATTR_START_THRESHOLD && priv->cmd_version < 3)
+ return 0;
+ else if (n == CROS_CHCTL_ATTR_END_THRESHOLD && priv->cmd_version < 2)
+ return 0;
return attr->mode;
}
@@ -290,6 +303,10 @@ static int cros_chctl_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
+ ret = devm_mutex_init(dev, &priv->lock);
+ if (ret)
+ return ret;
+
ret = cros_ec_get_cmd_versions(cros_ec, EC_CMD_CHARGE_CONTROL);
if (ret < 0)
return ret;
@@ -327,7 +344,8 @@ static int cros_chctl_probe(struct platform_device *pdev)
priv->current_end_threshold = 100;
/* Bring EC into well-known state */
- ret = cros_chctl_configure_ec(priv);
+ scoped_guard(mutex, &priv->lock)
+ ret = cros_chctl_configure_ec(priv);
if (ret < 0)
return ret;
diff --git a/drivers/power/supply/gpio-charger.c b/drivers/power/supply/gpio-charger.c
index 68212b39785b..6139f736ecbe 100644
--- a/drivers/power/supply/gpio-charger.c
+++ b/drivers/power/supply/gpio-charger.c
@@ -67,6 +67,14 @@ static int set_charge_current_limit(struct gpio_charger *gpio_charger, int val)
if (gpio_charger->current_limit_map[i].limit_ua <= val)
break;
}
+
+ /*
+ * If a valid charge current limit isn't found, default to smallest
+ * current limitation for safety reasons.
+ */
+ if (i >= gpio_charger->current_limit_map_size)
+ i = gpio_charger->current_limit_map_size - 1;
+
mapping = gpio_charger->current_limit_map[i];
for (i = 0; i < ndescs; i++) {
diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c
index b889e64522c3..17e591f61efb 100644
--- a/drivers/pwm/pwm-stm32.c
+++ b/drivers/pwm/pwm-stm32.c
@@ -84,7 +84,7 @@ static int stm32_pwm_round_waveform_tohw(struct pwm_chip *chip,
wfhw->ccer = TIM_CCER_CCxE(ch + 1);
if (priv->have_complementary_output)
- wfhw->ccer = TIM_CCER_CCxNE(ch + 1);
+ wfhw->ccer |= TIM_CCER_CCxNE(ch + 1);
rate = clk_get_rate(priv->clk);
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 3d85762beda6..e5b4b93c07e3 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -175,7 +175,7 @@ static int of_get_regulation_constraints(struct device *dev,
if (!ret)
constraints->enable_time = pval;
- ret = of_property_read_u32(np, "regulator-uv-survival-time-ms", &pval);
+ ret = of_property_read_u32(np, "regulator-uv-less-critical-window-ms", &pval);
if (!ret)
constraints->uv_less_critical_window_ms = pval;
else
diff --git a/drivers/spi/spi-rockchip-sfc.c b/drivers/spi/spi-rockchip-sfc.c
index 69d0f2175568..70bbb459caa4 100644
--- a/drivers/spi/spi-rockchip-sfc.c
+++ b/drivers/spi/spi-rockchip-sfc.c
@@ -182,6 +182,7 @@ struct rockchip_sfc {
bool use_dma;
u32 max_iosize;
u16 version;
+ struct spi_controller *host;
};
static int rockchip_sfc_reset(struct rockchip_sfc *sfc)
@@ -574,6 +575,7 @@ static int rockchip_sfc_probe(struct platform_device *pdev)
sfc = spi_controller_get_devdata(host);
sfc->dev = dev;
+ sfc->host = host;
sfc->regbase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sfc->regbase))
@@ -651,8 +653,8 @@ err_hclk:
static void rockchip_sfc_remove(struct platform_device *pdev)
{
- struct spi_controller *host = platform_get_drvdata(pdev);
struct rockchip_sfc *sfc = platform_get_drvdata(pdev);
+ struct spi_controller *host = sfc->host;
spi_unregister_controller(host);
diff --git a/drivers/staging/fbtft/Kconfig b/drivers/staging/fbtft/Kconfig
index 77ab44362f16..dcf6a70455cc 100644
--- a/drivers/staging/fbtft/Kconfig
+++ b/drivers/staging/fbtft/Kconfig
@@ -3,6 +3,7 @@ menuconfig FB_TFT
tristate "Support for small TFT LCD display modules"
depends on FB && SPI
depends on FB_DEVICE
+ depends on BACKLIGHT_CLASS_DEVICE
depends on GPIOLIB || COMPILE_TEST
select FB_BACKLIGHT
select FB_SYSMEM_HELPERS_DEFERRED
diff --git a/drivers/staging/gpib/common/Makefile b/drivers/staging/gpib/common/Makefile
index 0c4c77bea75b..460586edb574 100644
--- a/drivers/staging/gpib/common/Makefile
+++ b/drivers/staging/gpib/common/Makefile
@@ -1,5 +1,5 @@
-obj-m += gpib_common.o
+obj-$(CONFIG_GPIB_COMMON) += gpib_common.o
gpib_common-objs := gpib_os.o iblib.o
diff --git a/drivers/staging/gpib/nec7210/Makefile b/drivers/staging/gpib/nec7210/Makefile
index 8d4d90f21109..64330f2e89d1 100644
--- a/drivers/staging/gpib/nec7210/Makefile
+++ b/drivers/staging/gpib/nec7210/Makefile
@@ -1,4 +1,4 @@
-obj-m += nec7210.o
+obj-$(CONFIG_GPIB_NEC7210) += nec7210.o
diff --git a/drivers/thermal/thermal_thresholds.c b/drivers/thermal/thermal_thresholds.c
index d9b2a0bb44fc..38f5fd0e8930 100644
--- a/drivers/thermal/thermal_thresholds.c
+++ b/drivers/thermal/thermal_thresholds.c
@@ -69,58 +69,60 @@ static struct user_threshold *__thermal_thresholds_find(const struct list_head *
return NULL;
}
-static bool __thermal_threshold_is_crossed(struct user_threshold *threshold, int temperature,
- int last_temperature, int direction,
- int *low, int *high)
+static bool thermal_thresholds_handle_raising(struct list_head *thresholds, int temperature,
+ int last_temperature)
{
+ struct user_threshold *t;
- if (temperature >= threshold->temperature) {
- if (threshold->temperature > *low &&
- THERMAL_THRESHOLD_WAY_DOWN & threshold->direction)
- *low = threshold->temperature;
+ list_for_each_entry(t, thresholds, list_node) {
- if (last_temperature < threshold->temperature &&
- threshold->direction & direction)
- return true;
- } else {
- if (threshold->temperature < *high && THERMAL_THRESHOLD_WAY_UP
- & threshold->direction)
- *high = threshold->temperature;
+ if (!(t->direction & THERMAL_THRESHOLD_WAY_UP))
+ continue;
- if (last_temperature >= threshold->temperature &&
- threshold->direction & direction)
+ if (temperature >= t->temperature &&
+ last_temperature < t->temperature)
return true;
}
return false;
}
-static bool thermal_thresholds_handle_raising(struct list_head *thresholds, int temperature,
- int last_temperature, int *low, int *high)
+static bool thermal_thresholds_handle_dropping(struct list_head *thresholds, int temperature,
+ int last_temperature)
{
struct user_threshold *t;
- list_for_each_entry(t, thresholds, list_node) {
- if (__thermal_threshold_is_crossed(t, temperature, last_temperature,
- THERMAL_THRESHOLD_WAY_UP, low, high))
+ list_for_each_entry_reverse(t, thresholds, list_node) {
+
+ if (!(t->direction & THERMAL_THRESHOLD_WAY_DOWN))
+ continue;
+
+ if (temperature <= t->temperature &&
+ last_temperature > t->temperature)
return true;
}
return false;
}
-static bool thermal_thresholds_handle_dropping(struct list_head *thresholds, int temperature,
- int last_temperature, int *low, int *high)
+static void thermal_threshold_find_boundaries(struct list_head *thresholds, int temperature,
+ int *low, int *high)
{
struct user_threshold *t;
- list_for_each_entry_reverse(t, thresholds, list_node) {
- if (__thermal_threshold_is_crossed(t, temperature, last_temperature,
- THERMAL_THRESHOLD_WAY_DOWN, low, high))
- return true;
+ list_for_each_entry(t, thresholds, list_node) {
+ if (temperature < t->temperature &&
+ (t->direction & THERMAL_THRESHOLD_WAY_UP) &&
+ *high > t->temperature)
+ *high = t->temperature;
}
- return false;
+ list_for_each_entry_reverse(t, thresholds, list_node) {
+ if (temperature > t->temperature &&
+ (t->direction & THERMAL_THRESHOLD_WAY_DOWN) &&
+ *low < t->temperature)
+ *low = t->temperature;
+ }
}
void thermal_thresholds_handle(struct thermal_zone_device *tz, int *low, int *high)
@@ -132,6 +134,8 @@ void thermal_thresholds_handle(struct thermal_zone_device *tz, int *low, int *hi
lockdep_assert_held(&tz->lock);
+ thermal_threshold_find_boundaries(thresholds, temperature, low, high);
+
/*
* We need a second update in order to detect a threshold being crossed
*/
@@ -151,12 +155,12 @@ void thermal_thresholds_handle(struct thermal_zone_device *tz, int *low, int *hi
* - decreased : thresholds are crossed the way down
*/
if (temperature > last_temperature) {
- if (thermal_thresholds_handle_raising(thresholds, temperature,
- last_temperature, low, high))
+ if (thermal_thresholds_handle_raising(thresholds,
+ temperature, last_temperature))
thermal_notify_threshold_up(tz);
} else {
- if (thermal_thresholds_handle_dropping(thresholds, temperature,
- last_temperature, low, high))
+ if (thermal_thresholds_handle_dropping(thresholds,
+ temperature, last_temperature))
thermal_notify_threshold_down(tz);
}
}
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 1257dd3ce7e6..f3a2264e012b 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -1520,6 +1520,14 @@ static struct pci_device_id nhi_ids[] = {
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_LNL_NHI1),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_PTL_M_NHI0),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_PTL_M_NHI1),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_PTL_P_NHI0),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_PTL_P_NHI1),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI) },
diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
index 7a07c7c1a9c2..16744f25a9a0 100644
--- a/drivers/thunderbolt/nhi.h
+++ b/drivers/thunderbolt/nhi.h
@@ -92,6 +92,10 @@ extern const struct tb_nhi_ops icl_nhi_ops;
#define PCI_DEVICE_ID_INTEL_RPL_NHI1 0xa76d
#define PCI_DEVICE_ID_INTEL_LNL_NHI0 0xa833
#define PCI_DEVICE_ID_INTEL_LNL_NHI1 0xa834
+#define PCI_DEVICE_ID_INTEL_PTL_M_NHI0 0xe333
+#define PCI_DEVICE_ID_INTEL_PTL_M_NHI1 0xe334
+#define PCI_DEVICE_ID_INTEL_PTL_P_NHI0 0xe433
+#define PCI_DEVICE_ID_INTEL_PTL_P_NHI1 0xe434
#define PCI_CLASS_SERIAL_USB_USB4 0x0c0340
diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c
index 89d2919d0193..eeb64433ebbc 100644
--- a/drivers/thunderbolt/retimer.c
+++ b/drivers/thunderbolt/retimer.c
@@ -103,6 +103,7 @@ static int tb_retimer_nvm_add(struct tb_retimer *rt)
err_nvm:
dev_dbg(&rt->dev, "NVM upgrade disabled\n");
+ rt->no_nvm_upgrade = true;
if (!IS_ERR(nvm))
tb_nvm_free(nvm);
@@ -182,8 +183,6 @@ static ssize_t nvm_authenticate_show(struct device *dev,
if (!rt->nvm)
ret = -EAGAIN;
- else if (rt->no_nvm_upgrade)
- ret = -EOPNOTSUPP;
else
ret = sysfs_emit(buf, "%#x\n", rt->auth_status);
@@ -323,8 +322,6 @@ static ssize_t nvm_version_show(struct device *dev,
if (!rt->nvm)
ret = -EAGAIN;
- else if (rt->no_nvm_upgrade)
- ret = -EOPNOTSUPP;
else
ret = sysfs_emit(buf, "%x.%x\n", rt->nvm->major, rt->nvm->minor);
@@ -342,6 +339,19 @@ static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(vendor);
+static umode_t retimer_is_visible(struct kobject *kobj, struct attribute *attr,
+ int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct tb_retimer *rt = tb_to_retimer(dev);
+
+ if (attr == &dev_attr_nvm_authenticate.attr ||
+ attr == &dev_attr_nvm_version.attr)
+ return rt->no_nvm_upgrade ? 0 : attr->mode;
+
+ return attr->mode;
+}
+
static struct attribute *retimer_attrs[] = {
&dev_attr_device.attr,
&dev_attr_nvm_authenticate.attr,
@@ -351,6 +361,7 @@ static struct attribute *retimer_attrs[] = {
};
static const struct attribute_group retimer_group = {
+ .is_visible = retimer_is_visible,
.attrs = retimer_attrs,
};
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 4f777788e917..a7c6919fbf97 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -2059,6 +2059,37 @@ static void tb_exit_redrive(struct tb_port *port)
}
}
+static void tb_switch_enter_redrive(struct tb_switch *sw)
+{
+ struct tb_port *port;
+
+ tb_switch_for_each_port(sw, port)
+ tb_enter_redrive(port);
+}
+
+/*
+ * Called during system and runtime suspend to forcefully exit redrive
+ * mode without querying whether the resource is available.
+ */
+static void tb_switch_exit_redrive(struct tb_switch *sw)
+{
+ struct tb_port *port;
+
+ if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE))
+ return;
+
+ tb_switch_for_each_port(sw, port) {
+ if (!tb_port_is_dpin(port))
+ continue;
+
+ if (port->redrive) {
+ port->redrive = false;
+ pm_runtime_put(&sw->dev);
+ tb_port_dbg(port, "exit redrive mode\n");
+ }
+ }
+}
+
static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
{
struct tb_port *in, *out;
@@ -2909,6 +2940,7 @@ static int tb_start(struct tb *tb, bool reset)
tb_create_usb3_tunnels(tb->root_switch);
/* Add DP IN resources for the root switch */
tb_add_dp_resources(tb->root_switch);
+ tb_switch_enter_redrive(tb->root_switch);
/* Make the discovered switches available to the userspace */
device_for_each_child(&tb->root_switch->dev, NULL,
tb_scan_finalize_switch);
@@ -2924,6 +2956,7 @@ static int tb_suspend_noirq(struct tb *tb)
tb_dbg(tb, "suspending...\n");
tb_disconnect_and_release_dp(tb);
+ tb_switch_exit_redrive(tb->root_switch);
tb_switch_suspend(tb->root_switch, false);
tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
tb_dbg(tb, "suspend finished\n");
@@ -3016,6 +3049,7 @@ static int tb_resume_noirq(struct tb *tb)
tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
msleep(100);
}
+ tb_switch_enter_redrive(tb->root_switch);
/* Allow tb_handle_hotplug to progress events */
tcm->hotplug_active = true;
tb_dbg(tb, "resume finished\n");
@@ -3079,6 +3113,12 @@ static int tb_runtime_suspend(struct tb *tb)
struct tb_cm *tcm = tb_priv(tb);
mutex_lock(&tb->lock);
+ /*
+ * The below call only releases DP resources to allow exiting and
+ * re-entering redrive mode.
+ */
+ tb_disconnect_and_release_dp(tb);
+ tb_switch_exit_redrive(tb->root_switch);
tb_switch_suspend(tb->root_switch, true);
tcm->hotplug_active = false;
mutex_unlock(&tb->lock);
@@ -3110,6 +3150,7 @@ static int tb_runtime_resume(struct tb *tb)
tb_restore_children(tb->root_switch);
list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
tb_tunnel_restart(tunnel);
+ tb_switch_enter_redrive(tb->root_switch);
tcm->hotplug_active = true;
mutex_unlock(&tb->lock);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 15db90c54a45..92703efda1f7 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -436,7 +436,7 @@ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
goto free_segments;
}
- xhci_link_rings(xhci, ring, &new_ring);
+ xhci_link_rings(xhci, &new_ring, ring);
trace_xhci_ring_expansion(ring);
xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
"ring expansion succeed, now has %d segments",
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 4cf5363875c7..09b05a62375e 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1199,8 +1199,6 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
* Keep retrying until the EP starts and stops again, on
* chips where this is known to help. Wait for 100ms.
*/
- if (!(xhci->quirks & XHCI_NEC_HOST))
- break;
if (time_is_before_jiffies(ep->stop_time + msecs_to_jiffies(100)))
break;
fallthrough;
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 9ba5584061c8..64317b390d22 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -625,6 +625,8 @@ static void option_instat_callback(struct urb *urb);
#define MEIGSMART_PRODUCT_SRM825L 0x4d22
/* MeiG Smart SLM320 based on UNISOC UIS8910 */
#define MEIGSMART_PRODUCT_SLM320 0x4d41
+/* MeiG Smart SLM770A based on ASR1803 */
+#define MEIGSMART_PRODUCT_SLM770A 0x4d57
/* Device flags */
@@ -1395,6 +1397,12 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10aa, 0xff), /* Telit FN920C04 (MBIM) */
.driver_info = NCTRL(3) | RSVD(4) | RSVD(5) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10c0, 0xff), /* Telit FE910C04 (rmnet) */
+ .driver_info = RSVD(0) | NCTRL(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10c4, 0xff), /* Telit FE910C04 (rmnet) */
+ .driver_info = RSVD(0) | NCTRL(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10c8, 0xff), /* Telit FE910C04 (rmnet) */
+ .driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
@@ -2247,6 +2255,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x7127, 0xff, 0x00, 0x00),
.driver_info = NCTRL(2) | NCTRL(3) | NCTRL(4) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x7129, 0xff, 0x00, 0x00), /* MediaTek T7XX */
+ .driver_info = NCTRL(2) | NCTRL(3) | NCTRL(4) },
{ USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
{ USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MPL200),
.driver_info = RSVD(1) | RSVD(4) },
@@ -2375,6 +2385,18 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0116, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WWD for Golbal EDU */
{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0116, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0116, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x010a, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WRD for WWAN Ready */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x010a, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x010a, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x010b, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WWD for WWAN Ready */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x010b, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x010b, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x010c, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WRD for WWAN Ready */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x010c, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x010c, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x010d, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WWD for WWAN Ready */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x010d, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x010d, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) },
@@ -2382,9 +2404,14 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SLM320, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SLM770A, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x60) },
+ { USB_DEVICE_INTERFACE_CLASS(0x1bbb, 0x0530, 0xff), /* TCL IK512 MBIM */
+ .driver_info = NCTRL(1) },
+ { USB_DEVICE_INTERFACE_CLASS(0x1bbb, 0x0640, 0xff), /* TCL IK512 ECM */
+ .driver_info = NCTRL(3) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index de035071fedb..55c6686f091e 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -649,6 +649,7 @@ config FB_S1D13XXX
config FB_ATMEL
tristate "AT91 LCD Controller support"
depends on FB && OF && HAVE_CLK && HAS_IOMEM
+ depends on BACKLIGHT_CLASS_DEVICE
depends on HAVE_FB_ATMEL || COMPILE_TEST
select FB_BACKLIGHT
select FB_IOMEM_HELPERS
@@ -660,7 +661,6 @@ config FB_ATMEL
config FB_NVIDIA
tristate "nVidia Framebuffer Support"
depends on FB && PCI
- select FB_BACKLIGHT if FB_NVIDIA_BACKLIGHT
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
@@ -700,6 +700,8 @@ config FB_NVIDIA_DEBUG
config FB_NVIDIA_BACKLIGHT
bool "Support for backlight control"
depends on FB_NVIDIA
+ depends on BACKLIGHT_CLASS_DEVICE=y || BACKLIGHT_CLASS_DEVICE=FB_NVIDIA
+ select FB_BACKLIGHT
default y
help
Say Y here if you want to control the backlight of your display.
@@ -707,7 +709,6 @@ config FB_NVIDIA_BACKLIGHT
config FB_RIVA
tristate "nVidia Riva support"
depends on FB && PCI
- select FB_BACKLIGHT if FB_RIVA_BACKLIGHT
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
@@ -747,6 +748,8 @@ config FB_RIVA_DEBUG
config FB_RIVA_BACKLIGHT
bool "Support for backlight control"
depends on FB_RIVA
+ depends on BACKLIGHT_CLASS_DEVICE=y || BACKLIGHT_CLASS_DEVICE=FB_RIVA
+ select FB_BACKLIGHT
default y
help
Say Y here if you want to control the backlight of your display.
@@ -934,7 +937,6 @@ config FB_MATROX_MAVEN
config FB_RADEON
tristate "ATI Radeon display support"
depends on FB && PCI
- select FB_BACKLIGHT if FB_RADEON_BACKLIGHT
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
@@ -960,6 +962,8 @@ config FB_RADEON_I2C
config FB_RADEON_BACKLIGHT
bool "Support for backlight control"
depends on FB_RADEON
+ depends on BACKLIGHT_CLASS_DEVICE=y || BACKLIGHT_CLASS_DEVICE=FB_RADEON
+ select FB_BACKLIGHT
default y
help
Say Y here if you want to control the backlight of your display.
@@ -975,7 +979,6 @@ config FB_RADEON_DEBUG
config FB_ATY128
tristate "ATI Rage128 display support"
depends on FB && PCI
- select FB_BACKLIGHT if FB_ATY128_BACKLIGHT
select FB_IOMEM_HELPERS
select FB_MACMODES if PPC_PMAC
help
@@ -989,6 +992,8 @@ config FB_ATY128
config FB_ATY128_BACKLIGHT
bool "Support for backlight control"
depends on FB_ATY128
+ depends on BACKLIGHT_CLASS_DEVICE=y || BACKLIGHT_CLASS_DEVICE=FB_ATY128
+ select FB_BACKLIGHT
default y
help
Say Y here if you want to control the backlight of your display.
@@ -999,7 +1004,6 @@ config FB_ATY
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
- select FB_BACKLIGHT if FB_ATY_BACKLIGHT
select FB_IOMEM_FOPS
select FB_MACMODES if PPC
select FB_ATY_CT if SPARC64 && PCI
@@ -1040,6 +1044,8 @@ config FB_ATY_GX
config FB_ATY_BACKLIGHT
bool "Support for backlight control"
depends on FB_ATY
+ depends on BACKLIGHT_CLASS_DEVICE=y || BACKLIGHT_CLASS_DEVICE=FB_ATY
+ select FB_BACKLIGHT
default y
help
Say Y here if you want to control the backlight of your display.
@@ -1528,6 +1534,7 @@ config FB_SH_MOBILE_LCDC
depends on FB && HAVE_CLK && HAS_IOMEM
depends on SUPERH || COMPILE_TEST
depends on FB_DEVICE
+ depends on BACKLIGHT_CLASS_DEVICE
select FB_BACKLIGHT
select FB_DEFERRED_IO
select FB_DMAMEM_HELPERS
@@ -1793,6 +1800,7 @@ config FB_SSD1307
tristate "Solomon SSD1307 framebuffer support"
depends on FB && I2C
depends on GPIOLIB || COMPILE_TEST
+ depends on BACKLIGHT_CLASS_DEVICE
select FB_BACKLIGHT
select FB_SYSMEM_HELPERS_DEFERRED
help
diff --git a/drivers/video/fbdev/core/Kconfig b/drivers/video/fbdev/core/Kconfig
index 0ab8848ba2f1..d554d8c543d4 100644
--- a/drivers/video/fbdev/core/Kconfig
+++ b/drivers/video/fbdev/core/Kconfig
@@ -183,9 +183,8 @@ config FB_SYSMEM_HELPERS_DEFERRED
select FB_SYSMEM_HELPERS
config FB_BACKLIGHT
- tristate
+ bool
depends on FB
- select BACKLIGHT_CLASS_DEVICE
config FB_MODE_HELPERS
bool "Enable Video Mode Handling Helpers"
diff --git a/drivers/virt/coco/tdx-guest/tdx-guest.c b/drivers/virt/coco/tdx-guest/tdx-guest.c
index d7db6c824e13..224e7dde9cde 100644
--- a/drivers/virt/coco/tdx-guest/tdx-guest.c
+++ b/drivers/virt/coco/tdx-guest/tdx-guest.c
@@ -124,10 +124,8 @@ static void *alloc_quote_buf(void)
if (!addr)
return NULL;
- if (set_memory_decrypted((unsigned long)addr, count)) {
- free_pages_exact(addr, len);
+ if (set_memory_decrypted((unsigned long)addr, count))
return NULL;
- }
return addr;
}
diff --git a/drivers/watchdog/stm32_iwdg.c b/drivers/watchdog/stm32_iwdg.c
index d700e0d49bb9..8ad06b54c5ad 100644
--- a/drivers/watchdog/stm32_iwdg.c
+++ b/drivers/watchdog/stm32_iwdg.c
@@ -286,7 +286,7 @@ static int stm32_iwdg_irq_init(struct platform_device *pdev,
if (!wdt->data->has_early_wakeup)
return 0;
- irq = platform_get_irq(pdev, 0);
+ irq = platform_get_irq_optional(pdev, 0);
if (irq <= 0)
return 0;
diff --git a/fs/btrfs/bio.c b/fs/btrfs/bio.c
index af3db0a7ae4d..7ea6f0b43b95 100644
--- a/fs/btrfs/bio.c
+++ b/fs/btrfs/bio.c
@@ -358,7 +358,7 @@ static void btrfs_simple_end_io(struct bio *bio)
INIT_WORK(&bbio->end_io_work, btrfs_end_bio_work);
queue_work(btrfs_end_io_wq(fs_info, bio), &bbio->end_io_work);
} else {
- if (bio_op(bio) == REQ_OP_ZONE_APPEND && !bio->bi_status)
+ if (bio_is_zone_append(bio) && !bio->bi_status)
btrfs_record_physical_zoned(bbio);
btrfs_bio_end_io(bbio, bbio->bio.bi_status);
}
@@ -401,7 +401,7 @@ static void btrfs_orig_write_end_io(struct bio *bio)
else
bio->bi_status = BLK_STS_OK;
- if (bio_op(bio) == REQ_OP_ZONE_APPEND && !bio->bi_status)
+ if (bio_is_zone_append(bio) && !bio->bi_status)
stripe->physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
btrfs_bio_end_io(bbio, bbio->bio.bi_status);
@@ -415,7 +415,7 @@ static void btrfs_clone_write_end_io(struct bio *bio)
if (bio->bi_status) {
atomic_inc(&stripe->bioc->error);
btrfs_log_dev_io_error(bio, stripe->dev);
- } else if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
+ } else if (bio_is_zone_append(bio)) {
stripe->physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
}
@@ -652,8 +652,14 @@ static u64 btrfs_append_map_length(struct btrfs_bio *bbio, u64 map_length)
map_length = min(map_length, bbio->fs_info->max_zone_append_size);
sector_offset = bio_split_rw_at(&bbio->bio, &bbio->fs_info->limits,
&nr_segs, map_length);
- if (sector_offset)
- return sector_offset << SECTOR_SHIFT;
+ if (sector_offset) {
+ /*
+ * bio_split_rw_at() could split at a size smaller than our
+ * sectorsize and thus cause unaligned I/Os. Fix that by
+ * always rounding down to the nearest boundary.
+ */
+ return ALIGN_DOWN(sector_offset << SECTOR_SHIFT, bbio->fs_info->sectorsize);
+ }
return map_length;
}
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 693dc27ffb89..185985a337b3 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -654,6 +654,8 @@ int btrfs_force_cow_block(struct btrfs_trans_handle *trans,
goto error_unlock_cow;
}
}
+
+ trace_btrfs_cow_block(root, buf, cow);
if (unlock_orig)
btrfs_tree_unlock(buf);
free_extent_buffer_stale(buf);
@@ -710,7 +712,6 @@ int btrfs_cow_block(struct btrfs_trans_handle *trans,
{
struct btrfs_fs_info *fs_info = root->fs_info;
u64 search_start;
- int ret;
if (unlikely(test_bit(BTRFS_ROOT_DELETING, &root->state))) {
btrfs_abort_transaction(trans, -EUCLEAN);
@@ -751,12 +752,8 @@ int btrfs_cow_block(struct btrfs_trans_handle *trans,
* Also We don't care about the error, as it's handled internally.
*/
btrfs_qgroup_trace_subtree_after_cow(trans, root, buf);
- ret = btrfs_force_cow_block(trans, root, buf, parent, parent_slot,
- cow_ret, search_start, 0, nest);
-
- trace_btrfs_cow_block(root, buf, *cow_ret);
-
- return ret;
+ return btrfs_force_cow_block(trans, root, buf, parent, parent_slot,
+ cow_ret, search_start, 0, nest);
}
ALLOW_ERROR_INJECTION(btrfs_cow_block, ERRNO);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 307dedf95c70..2c341956a01c 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -371,6 +371,25 @@ static inline void btrfs_set_root_last_trans(struct btrfs_root *root, u64 transi
}
/*
+ * Return the generation this root started with.
+ *
+ * Every normal root that is created with root->root_key.offset set to it's
+ * originating generation. If it is a snapshot it is the generation when the
+ * snapshot was created.
+ *
+ * However for TREE_RELOC roots root_key.offset is the objectid of the owning
+ * tree root. Thankfully we copy the root item of the owning tree root, which
+ * has it's last_snapshot set to what we would have root_key.offset set to, so
+ * return that if this is a TREE_RELOC root.
+ */
+static inline u64 btrfs_root_origin_generation(const struct btrfs_root *root)
+{
+ if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID)
+ return btrfs_root_last_snapshot(&root->root_item);
+ return root->root_key.offset;
+}
+
+/*
* Structure that conveys information about an extent that is going to replace
* all the extents in a file range.
*/
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index bd09dd3ad1a0..3c6f7fecbb9a 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -5285,7 +5285,7 @@ static bool visit_node_for_delete(struct btrfs_root *root, struct walk_control *
* reference to it.
*/
generation = btrfs_node_ptr_generation(eb, slot);
- if (!wc->update_ref || generation <= root->root_key.offset)
+ if (!wc->update_ref || generation <= btrfs_root_origin_generation(root))
return false;
/*
@@ -5340,7 +5340,7 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
goto reada;
if (wc->stage == UPDATE_BACKREF &&
- generation <= root->root_key.offset)
+ generation <= btrfs_root_origin_generation(root))
continue;
/* We don't lock the tree block, it's OK to be racy here */
@@ -5683,7 +5683,7 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
* for the subtree
*/
if (wc->stage == UPDATE_BACKREF &&
- generation <= root->root_key.offset) {
+ generation <= btrfs_root_origin_generation(root)) {
wc->lookup_info = 1;
return 1;
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 488edca8333a..27b2fe7f735d 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -9078,9 +9078,9 @@ out:
}
struct btrfs_encoded_read_private {
- wait_queue_head_t wait;
+ struct completion done;
void *uring_ctx;
- atomic_t pending;
+ refcount_t pending_refs;
blk_status_t status;
};
@@ -9099,14 +9099,14 @@ static void btrfs_encoded_read_endio(struct btrfs_bio *bbio)
*/
WRITE_ONCE(priv->status, bbio->bio.bi_status);
}
- if (atomic_dec_and_test(&priv->pending)) {
+ if (refcount_dec_and_test(&priv->pending_refs)) {
int err = blk_status_to_errno(READ_ONCE(priv->status));
if (priv->uring_ctx) {
btrfs_uring_read_extent_endio(priv->uring_ctx, err);
kfree(priv);
} else {
- wake_up(&priv->wait);
+ complete(&priv->done);
}
}
bio_put(&bbio->bio);
@@ -9126,8 +9126,8 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
if (!priv)
return -ENOMEM;
- init_waitqueue_head(&priv->wait);
- atomic_set(&priv->pending, 1);
+ init_completion(&priv->done);
+ refcount_set(&priv->pending_refs, 1);
priv->status = 0;
priv->uring_ctx = uring_ctx;
@@ -9140,7 +9140,7 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
size_t bytes = min_t(u64, disk_io_size, PAGE_SIZE);
if (bio_add_page(&bbio->bio, pages[i], bytes, 0) < bytes) {
- atomic_inc(&priv->pending);
+ refcount_inc(&priv->pending_refs);
btrfs_submit_bbio(bbio, 0);
bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info,
@@ -9155,11 +9155,11 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
disk_io_size -= bytes;
} while (disk_io_size);
- atomic_inc(&priv->pending);
+ refcount_inc(&priv->pending_refs);
btrfs_submit_bbio(bbio, 0);
if (uring_ctx) {
- if (atomic_dec_return(&priv->pending) == 0) {
+ if (refcount_dec_and_test(&priv->pending_refs)) {
ret = blk_status_to_errno(READ_ONCE(priv->status));
btrfs_uring_read_extent_endio(uring_ctx, ret);
kfree(priv);
@@ -9168,8 +9168,8 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
return -EIOCBQUEUED;
} else {
- if (atomic_dec_return(&priv->pending) != 0)
- io_wait_event(priv->wait, !atomic_read(&priv->pending));
+ if (!refcount_dec_and_test(&priv->pending_refs))
+ wait_for_completion_io(&priv->done);
/* See btrfs_encoded_read_endio() for ordering. */
ret = blk_status_to_errno(READ_ONCE(priv->status));
kfree(priv);
@@ -9799,15 +9799,25 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct extent_state *cached_state = NULL;
- struct extent_map *em = NULL;
struct btrfs_chunk_map *map = NULL;
struct btrfs_device *device = NULL;
struct btrfs_swap_info bsi = {
.lowest_ppage = (sector_t)-1ULL,
};
+ struct btrfs_backref_share_check_ctx *backref_ctx = NULL;
+ struct btrfs_path *path = NULL;
int ret = 0;
u64 isize;
- u64 start;
+ u64 prev_extent_end = 0;
+
+ /*
+ * Acquire the inode's mmap lock to prevent races with memory mapped
+ * writes, as they could happen after we flush delalloc below and before
+ * we lock the extent range further below. The inode was already locked
+ * up in the call chain.
+ */
+ btrfs_assert_inode_locked(BTRFS_I(inode));
+ down_write(&BTRFS_I(inode)->i_mmap_lock);
/*
* If the swap file was just created, make sure delalloc is done. If the
@@ -9816,22 +9826,32 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
*/
ret = btrfs_wait_ordered_range(BTRFS_I(inode), 0, (u64)-1);
if (ret)
- return ret;
+ goto out_unlock_mmap;
/*
* The inode is locked, so these flags won't change after we check them.
*/
if (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS) {
btrfs_warn(fs_info, "swapfile must not be compressed");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_unlock_mmap;
}
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)) {
btrfs_warn(fs_info, "swapfile must not be copy-on-write");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_unlock_mmap;
}
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
btrfs_warn(fs_info, "swapfile must not be checksummed");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_unlock_mmap;
+ }
+
+ path = btrfs_alloc_path();
+ backref_ctx = btrfs_alloc_backref_share_check_ctx();
+ if (!path || !backref_ctx) {
+ ret = -ENOMEM;
+ goto out_unlock_mmap;
}
/*
@@ -9846,7 +9866,8 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_SWAP_ACTIVATE)) {
btrfs_warn(fs_info,
"cannot activate swapfile while exclusive operation is running");
- return -EBUSY;
+ ret = -EBUSY;
+ goto out_unlock_mmap;
}
/*
@@ -9860,7 +9881,8 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
btrfs_exclop_finish(fs_info);
btrfs_warn(fs_info,
"cannot activate swapfile because snapshot creation is in progress");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_unlock_mmap;
}
/*
* Snapshots can create extents which require COW even if NODATACOW is
@@ -9881,7 +9903,8 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
btrfs_warn(fs_info,
"cannot activate swapfile because subvolume %llu is being deleted",
btrfs_root_id(root));
- return -EPERM;
+ ret = -EPERM;
+ goto out_unlock_mmap;
}
atomic_inc(&root->nr_swapfiles);
spin_unlock(&root->root_item_lock);
@@ -9889,24 +9912,39 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize);
lock_extent(io_tree, 0, isize - 1, &cached_state);
- start = 0;
- while (start < isize) {
- u64 logical_block_start, physical_block_start;
+ while (prev_extent_end < isize) {
+ struct btrfs_key key;
+ struct extent_buffer *leaf;
+ struct btrfs_file_extent_item *ei;
struct btrfs_block_group *bg;
- u64 len = isize - start;
+ u64 logical_block_start;
+ u64 physical_block_start;
+ u64 extent_gen;
+ u64 disk_bytenr;
+ u64 len;
- em = btrfs_get_extent(BTRFS_I(inode), NULL, start, len);
- if (IS_ERR(em)) {
- ret = PTR_ERR(em);
+ key.objectid = btrfs_ino(BTRFS_I(inode));
+ key.type = BTRFS_EXTENT_DATA_KEY;
+ key.offset = prev_extent_end;
+
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret < 0)
goto out;
- }
- if (em->disk_bytenr == EXTENT_MAP_HOLE) {
+ /*
+ * If key not found it means we have an implicit hole (NO_HOLES
+ * is enabled).
+ */
+ if (ret > 0) {
btrfs_warn(fs_info, "swapfile must not have holes");
ret = -EINVAL;
goto out;
}
- if (em->disk_bytenr == EXTENT_MAP_INLINE) {
+
+ leaf = path->nodes[0];
+ ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
+
+ if (btrfs_file_extent_type(leaf, ei) == BTRFS_FILE_EXTENT_INLINE) {
/*
* It's unlikely we'll ever actually find ourselves
* here, as a file small enough to fit inline won't be
@@ -9918,23 +9956,45 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
ret = -EINVAL;
goto out;
}
- if (extent_map_is_compressed(em)) {
+
+ if (btrfs_file_extent_compression(leaf, ei) != BTRFS_COMPRESS_NONE) {
btrfs_warn(fs_info, "swapfile must not be compressed");
ret = -EINVAL;
goto out;
}
- logical_block_start = extent_map_block_start(em) + (start - em->start);
- len = min(len, em->len - (start - em->start));
- free_extent_map(em);
- em = NULL;
+ disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
+ if (disk_bytenr == 0) {
+ btrfs_warn(fs_info, "swapfile must not have holes");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ logical_block_start = disk_bytenr + btrfs_file_extent_offset(leaf, ei);
+ extent_gen = btrfs_file_extent_generation(leaf, ei);
+ prev_extent_end = btrfs_file_extent_end(path);
- ret = can_nocow_extent(inode, start, &len, NULL, false, true);
+ if (prev_extent_end > isize)
+ len = isize - key.offset;
+ else
+ len = btrfs_file_extent_num_bytes(leaf, ei);
+
+ backref_ctx->curr_leaf_bytenr = leaf->start;
+
+ /*
+ * Don't need the path anymore, release to avoid deadlocks when
+ * calling btrfs_is_data_extent_shared() because when joining a
+ * transaction it can block waiting for the current one's commit
+ * which in turn may be trying to lock the same leaf to flush
+ * delayed items for example.
+ */
+ btrfs_release_path(path);
+
+ ret = btrfs_is_data_extent_shared(BTRFS_I(inode), disk_bytenr,
+ extent_gen, backref_ctx);
if (ret < 0) {
goto out;
- } else if (ret) {
- ret = 0;
- } else {
+ } else if (ret > 0) {
btrfs_warn(fs_info,
"swapfile must not be copy-on-write");
ret = -EINVAL;
@@ -9969,7 +10029,6 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
physical_block_start = (map->stripes[0].physical +
(logical_block_start - map->start));
- len = min(len, map->chunk_len - (logical_block_start - map->start));
btrfs_free_chunk_map(map);
map = NULL;
@@ -10010,20 +10069,23 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
if (ret)
goto out;
}
- bsi.start = start;
+ bsi.start = key.offset;
bsi.block_start = physical_block_start;
bsi.block_len = len;
}
- start += len;
+ if (fatal_signal_pending(current)) {
+ ret = -EINTR;
+ goto out;
+ }
+
+ cond_resched();
}
if (bsi.block_len)
ret = btrfs_add_swap_extent(sis, &bsi);
out:
- if (!IS_ERR_OR_NULL(em))
- free_extent_map(em);
if (!IS_ERR_OR_NULL(map))
btrfs_free_chunk_map(map);
@@ -10036,6 +10098,10 @@ out:
btrfs_exclop_finish(fs_info);
+out_unlock_mmap:
+ up_write(&BTRFS_I(inode)->i_mmap_lock);
+ btrfs_free_backref_share_ctx(backref_ctx);
+ btrfs_free_path(path);
if (ret)
return ret;
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index a6f92836c9b1..f9b214992212 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1121,6 +1121,7 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info,
fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON;
if (simple) {
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE;
+ btrfs_set_fs_incompat(fs_info, SIMPLE_QUOTA);
btrfs_set_qgroup_status_enable_gen(leaf, ptr, trans->transid);
} else {
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
@@ -1254,8 +1255,6 @@ out_add_root:
spin_lock(&fs_info->qgroup_lock);
fs_info->quota_root = quota_root;
set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
- if (simple)
- btrfs_set_fs_incompat(fs_info, SIMPLE_QUOTA);
spin_unlock(&fs_info->qgroup_lock);
/* Skip rescan for simple qgroups. */
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index bf267bdfa8f8..db8b42f674b7 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -2902,6 +2902,7 @@ static int relocate_one_folio(struct reloc_control *rc,
const bool use_rst = btrfs_need_stripe_tree_update(fs_info, rc->block_group->flags);
ASSERT(index <= last_index);
+again:
folio = filemap_lock_folio(inode->i_mapping, index);
if (IS_ERR(folio)) {
@@ -2937,6 +2938,11 @@ static int relocate_one_folio(struct reloc_control *rc,
ret = -EIO;
goto release_folio;
}
+ if (folio->mapping != inode->i_mapping) {
+ folio_unlock(folio);
+ folio_put(folio);
+ goto again;
+ }
}
/*
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 7254279c3cc9..498c84323253 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -5280,6 +5280,7 @@ static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len)
unsigned cur_len = min_t(unsigned, len,
PAGE_SIZE - pg_offset);
+again:
folio = filemap_lock_folio(mapping, index);
if (IS_ERR(folio)) {
page_cache_sync_readahead(mapping,
@@ -5312,6 +5313,11 @@ static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len)
ret = -EIO;
break;
}
+ if (folio->mapping != mapping) {
+ folio_unlock(folio);
+ folio_put(folio);
+ goto again;
+ }
}
memcpy_from_folio(sctx->send_buf + sctx->send_size, folio,
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index fdcbf650ac31..7f09b6c9cc2d 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -1118,7 +1118,7 @@ static ssize_t btrfs_nodesize_show(struct kobject *kobj,
{
struct btrfs_fs_info *fs_info = to_fs_info(kobj);
- return sysfs_emit(buf, "%u\n", fs_info->super_copy->nodesize);
+ return sysfs_emit(buf, "%u\n", fs_info->nodesize);
}
BTRFS_ATTR(, nodesize, btrfs_nodesize_show);
@@ -1128,7 +1128,7 @@ static ssize_t btrfs_sectorsize_show(struct kobject *kobj,
{
struct btrfs_fs_info *fs_info = to_fs_info(kobj);
- return sysfs_emit(buf, "%u\n", fs_info->super_copy->sectorsize);
+ return sysfs_emit(buf, "%u\n", fs_info->sectorsize);
}
BTRFS_ATTR(, sectorsize, btrfs_sectorsize_show);
@@ -1180,7 +1180,7 @@ static ssize_t btrfs_clone_alignment_show(struct kobject *kobj,
{
struct btrfs_fs_info *fs_info = to_fs_info(kobj);
- return sysfs_emit(buf, "%u\n", fs_info->super_copy->sectorsize);
+ return sysfs_emit(buf, "%u\n", fs_info->sectorsize);
}
BTRFS_ATTR(, clone_alignment, btrfs_clone_alignment_show);
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index 148d8cefa40e..dfeee033f31f 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -1527,6 +1527,11 @@ static int check_extent_item(struct extent_buffer *leaf,
dref_offset, fs_info->sectorsize);
return -EUCLEAN;
}
+ if (unlikely(btrfs_extent_data_ref_count(leaf, dref) == 0)) {
+ extent_err(leaf, slot,
+ "invalid data ref count, should have non-zero value");
+ return -EUCLEAN;
+ }
inline_refs += btrfs_extent_data_ref_count(leaf, dref);
break;
/* Contains parent bytenr and ref count */
@@ -1539,6 +1544,11 @@ static int check_extent_item(struct extent_buffer *leaf,
inline_offset, fs_info->sectorsize);
return -EUCLEAN;
}
+ if (unlikely(btrfs_shared_data_ref_count(leaf, sref) == 0)) {
+ extent_err(leaf, slot,
+ "invalid shared data ref count, should have non-zero value");
+ return -EUCLEAN;
+ }
inline_refs += btrfs_shared_data_ref_count(leaf, sref);
break;
case BTRFS_EXTENT_OWNER_REF_KEY:
@@ -1611,8 +1621,18 @@ static int check_simple_keyed_refs(struct extent_buffer *leaf,
{
u32 expect_item_size = 0;
- if (key->type == BTRFS_SHARED_DATA_REF_KEY)
+ if (key->type == BTRFS_SHARED_DATA_REF_KEY) {
+ struct btrfs_shared_data_ref *sref;
+
+ sref = btrfs_item_ptr(leaf, slot, struct btrfs_shared_data_ref);
+ if (unlikely(btrfs_shared_data_ref_count(leaf, sref) == 0)) {
+ extent_err(leaf, slot,
+ "invalid shared data backref count, should have non-zero value");
+ return -EUCLEAN;
+ }
+
expect_item_size = sizeof(struct btrfs_shared_data_ref);
+ }
if (unlikely(btrfs_item_size(leaf, slot) != expect_item_size)) {
generic_err(leaf, slot,
@@ -1689,6 +1709,11 @@ static int check_extent_data_ref(struct extent_buffer *leaf,
offset, leaf->fs_info->sectorsize);
return -EUCLEAN;
}
+ if (unlikely(btrfs_extent_data_ref_count(leaf, dref) == 0)) {
+ extent_err(leaf, slot,
+ "invalid extent data backref count, should have non-zero value");
+ return -EUCLEAN;
+ }
}
return 0;
}
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 4b8d59ebda00..851d70200c6b 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -1066,7 +1066,7 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
if (ceph_inode_is_shutdown(inode))
return -EIO;
- if (!len)
+ if (!len || !i_size)
return 0;
/*
* flush any page cache pages in this range. this
@@ -1086,7 +1086,7 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
int num_pages;
size_t page_off;
bool more;
- int idx;
+ int idx = 0;
size_t left;
struct ceph_osd_req_op *op;
u64 read_off = off;
@@ -1116,6 +1116,16 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
len = read_off + read_len - off;
more = len < iov_iter_count(to);
+ op = &req->r_ops[0];
+ if (sparse) {
+ extent_cnt = __ceph_sparse_read_ext_count(inode, read_len);
+ ret = ceph_alloc_sparse_ext_map(op, extent_cnt);
+ if (ret) {
+ ceph_osdc_put_request(req);
+ break;
+ }
+ }
+
num_pages = calc_pages_for(read_off, read_len);
page_off = offset_in_page(off);
pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
@@ -1127,17 +1137,7 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
osd_req_op_extent_osd_data_pages(req, 0, pages, read_len,
offset_in_page(read_off),
- false, false);
-
- op = &req->r_ops[0];
- if (sparse) {
- extent_cnt = __ceph_sparse_read_ext_count(inode, read_len);
- ret = ceph_alloc_sparse_ext_map(op, extent_cnt);
- if (ret) {
- ceph_osdc_put_request(req);
- break;
- }
- }
+ false, true);
ceph_osdc_start_request(osdc, req);
ret = ceph_osdc_wait_request(osdc, req);
@@ -1160,7 +1160,14 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
else if (ret == -ENOENT)
ret = 0;
- if (ret > 0 && IS_ENCRYPTED(inode)) {
+ if (ret < 0) {
+ ceph_osdc_put_request(req);
+ if (ret == -EBLOCKLISTED)
+ fsc->blocklisted = true;
+ break;
+ }
+
+ if (IS_ENCRYPTED(inode)) {
int fret;
fret = ceph_fscrypt_decrypt_extents(inode, pages,
@@ -1186,10 +1193,8 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
ret = min_t(ssize_t, fret, len);
}
- ceph_osdc_put_request(req);
-
/* Short read but not EOF? Zero out the remainder. */
- if (ret >= 0 && ret < len && (off + ret < i_size)) {
+ if (ret < len && (off + ret < i_size)) {
int zlen = min(len - ret, i_size - off - ret);
int zoff = page_off + ret;
@@ -1199,13 +1204,11 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
ret += zlen;
}
- idx = 0;
- if (ret <= 0)
- left = 0;
- else if (off + ret > i_size)
- left = i_size - off;
+ if (off + ret > i_size)
+ left = (i_size > off) ? i_size - off : 0;
else
left = ret;
+
while (left > 0) {
size_t plen, copied;
@@ -1221,13 +1224,8 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
break;
}
}
- ceph_release_page_vector(pages, num_pages);
- if (ret < 0) {
- if (ret == -EBLOCKLISTED)
- fsc->blocklisted = true;
- break;
- }
+ ceph_osdc_put_request(req);
if (off >= i_size || !more)
break;
@@ -1553,6 +1551,16 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
break;
}
+ op = &req->r_ops[0];
+ if (!write && sparse) {
+ extent_cnt = __ceph_sparse_read_ext_count(inode, size);
+ ret = ceph_alloc_sparse_ext_map(op, extent_cnt);
+ if (ret) {
+ ceph_osdc_put_request(req);
+ break;
+ }
+ }
+
len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages);
if (len < 0) {
ceph_osdc_put_request(req);
@@ -1562,6 +1570,8 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
if (len != size)
osd_req_op_extent_update(req, 0, len);
+ osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
+
/*
* To simplify error handling, allow AIO when IO within i_size
* or IO can be satisfied by single OSD request.
@@ -1593,17 +1603,6 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
req->r_mtime = mtime;
}
- osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
- op = &req->r_ops[0];
- if (sparse) {
- extent_cnt = __ceph_sparse_read_ext_count(inode, size);
- ret = ceph_alloc_sparse_ext_map(op, extent_cnt);
- if (ret) {
- ceph_osdc_put_request(req);
- break;
- }
- }
-
if (aio_req) {
aio_req->total_len += len;
aio_req->num_reqs++;
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 219a2cc2bf3c..785fe489ef4b 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -2800,12 +2800,11 @@ retry:
if (pos < 0) {
/*
- * A rename didn't occur, but somehow we didn't end up where
- * we thought we would. Throw a warning and try again.
+ * The path is longer than PATH_MAX and this function
+ * cannot ever succeed. Creating paths that long is
+ * possible with Ceph, but Linux cannot use them.
*/
- pr_warn_client(cl, "did not end path lookup where expected (pos = %d)\n",
- pos);
- goto retry;
+ return ERR_PTR(-ENAMETOOLONG);
}
*pbase = base;
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index de03cd6eb86e..4344e1f11806 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -431,6 +431,8 @@ static int ceph_parse_mount_param(struct fs_context *fc,
switch (token) {
case Opt_snapdirname:
+ if (strlen(param->string) > NAME_MAX)
+ return invalfc(fc, "snapdirname too long");
kfree(fsopt->snapdir_name);
fsopt->snapdir_name = param->string;
param->string = NULL;
diff --git a/fs/erofs/data.c b/fs/erofs/data.c
index 1c49f8962021..0cd6b5c4df98 100644
--- a/fs/erofs/data.c
+++ b/fs/erofs/data.c
@@ -56,10 +56,10 @@ void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb)
buf->file = NULL;
if (erofs_is_fileio_mode(sbi)) {
- buf->file = sbi->fdev; /* some fs like FUSE needs it */
+ buf->file = sbi->dif0.file; /* some fs like FUSE needs it */
buf->mapping = buf->file->f_mapping;
} else if (erofs_is_fscache_mode(sb))
- buf->mapping = sbi->s_fscache->inode->i_mapping;
+ buf->mapping = sbi->dif0.fscache->inode->i_mapping;
else
buf->mapping = sb->s_bdev->bd_mapping;
}
@@ -179,19 +179,13 @@ out:
}
static void erofs_fill_from_devinfo(struct erofs_map_dev *map,
- struct erofs_device_info *dif)
+ struct super_block *sb, struct erofs_device_info *dif)
{
+ map->m_sb = sb;
+ map->m_dif = dif;
map->m_bdev = NULL;
- map->m_fp = NULL;
- if (dif->file) {
- if (S_ISBLK(file_inode(dif->file)->i_mode))
- map->m_bdev = file_bdev(dif->file);
- else
- map->m_fp = dif->file;
- }
- map->m_daxdev = dif->dax_dev;
- map->m_dax_part_off = dif->dax_part_off;
- map->m_fscache = dif->fscache;
+ if (dif->file && S_ISBLK(file_inode(dif->file)->i_mode))
+ map->m_bdev = file_bdev(dif->file);
}
int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
@@ -201,12 +195,8 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
erofs_off_t startoff, length;
int id;
- map->m_bdev = sb->s_bdev;
- map->m_daxdev = EROFS_SB(sb)->dax_dev;
- map->m_dax_part_off = EROFS_SB(sb)->dax_part_off;
- map->m_fscache = EROFS_SB(sb)->s_fscache;
- map->m_fp = EROFS_SB(sb)->fdev;
-
+ erofs_fill_from_devinfo(map, sb, &EROFS_SB(sb)->dif0);
+ map->m_bdev = sb->s_bdev; /* use s_bdev for the primary device */
if (map->m_deviceid) {
down_read(&devs->rwsem);
dif = idr_find(&devs->tree, map->m_deviceid - 1);
@@ -219,7 +209,7 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
up_read(&devs->rwsem);
return 0;
}
- erofs_fill_from_devinfo(map, dif);
+ erofs_fill_from_devinfo(map, sb, dif);
up_read(&devs->rwsem);
} else if (devs->extra_devices && !devs->flatdev) {
down_read(&devs->rwsem);
@@ -232,7 +222,7 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
if (map->m_pa >= startoff &&
map->m_pa < startoff + length) {
map->m_pa -= startoff;
- erofs_fill_from_devinfo(map, dif);
+ erofs_fill_from_devinfo(map, sb, dif);
break;
}
}
@@ -302,7 +292,7 @@ static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
iomap->offset = map.m_la;
if (flags & IOMAP_DAX)
- iomap->dax_dev = mdev.m_daxdev;
+ iomap->dax_dev = mdev.m_dif->dax_dev;
else
iomap->bdev = mdev.m_bdev;
iomap->length = map.m_llen;
@@ -331,7 +321,7 @@ static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
iomap->type = IOMAP_MAPPED;
iomap->addr = mdev.m_pa;
if (flags & IOMAP_DAX)
- iomap->addr += mdev.m_dax_part_off;
+ iomap->addr += mdev.m_dif->dax_part_off;
}
return 0;
}
diff --git a/fs/erofs/fileio.c b/fs/erofs/fileio.c
index 3af96b1e2c2a..33f8539dda4a 100644
--- a/fs/erofs/fileio.c
+++ b/fs/erofs/fileio.c
@@ -9,6 +9,7 @@ struct erofs_fileio_rq {
struct bio_vec bvecs[BIO_MAX_VECS];
struct bio bio;
struct kiocb iocb;
+ struct super_block *sb;
};
struct erofs_fileio {
@@ -52,8 +53,9 @@ static void erofs_fileio_rq_submit(struct erofs_fileio_rq *rq)
rq->iocb.ki_pos = rq->bio.bi_iter.bi_sector << SECTOR_SHIFT;
rq->iocb.ki_ioprio = get_current_ioprio();
rq->iocb.ki_complete = erofs_fileio_ki_complete;
- rq->iocb.ki_flags = (rq->iocb.ki_filp->f_mode & FMODE_CAN_ODIRECT) ?
- IOCB_DIRECT : 0;
+ if (test_opt(&EROFS_SB(rq->sb)->opt, DIRECT_IO) &&
+ rq->iocb.ki_filp->f_mode & FMODE_CAN_ODIRECT)
+ rq->iocb.ki_flags = IOCB_DIRECT;
iov_iter_bvec(&iter, ITER_DEST, rq->bvecs, rq->bio.bi_vcnt,
rq->bio.bi_iter.bi_size);
ret = vfs_iocb_iter_read(rq->iocb.ki_filp, &rq->iocb, &iter);
@@ -67,7 +69,8 @@ static struct erofs_fileio_rq *erofs_fileio_rq_alloc(struct erofs_map_dev *mdev)
GFP_KERNEL | __GFP_NOFAIL);
bio_init(&rq->bio, NULL, rq->bvecs, BIO_MAX_VECS, REQ_OP_READ);
- rq->iocb.ki_filp = mdev->m_fp;
+ rq->iocb.ki_filp = mdev->m_dif->file;
+ rq->sb = mdev->m_sb;
return rq;
}
diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c
index fda16eedafb5..ce3d8737df85 100644
--- a/fs/erofs/fscache.c
+++ b/fs/erofs/fscache.c
@@ -198,7 +198,7 @@ struct bio *erofs_fscache_bio_alloc(struct erofs_map_dev *mdev)
io = kmalloc(sizeof(*io), GFP_KERNEL | __GFP_NOFAIL);
bio_init(&io->bio, NULL, io->bvecs, BIO_MAX_VECS, REQ_OP_READ);
- io->io.private = mdev->m_fscache->cookie;
+ io->io.private = mdev->m_dif->fscache->cookie;
io->io.end_io = erofs_fscache_bio_endio;
refcount_set(&io->io.ref, 1);
return &io->bio;
@@ -316,7 +316,7 @@ static int erofs_fscache_data_read_slice(struct erofs_fscache_rq *req)
if (!io)
return -ENOMEM;
iov_iter_xarray(&io->iter, ITER_DEST, &mapping->i_pages, pos, count);
- ret = erofs_fscache_read_io_async(mdev.m_fscache->cookie,
+ ret = erofs_fscache_read_io_async(mdev.m_dif->fscache->cookie,
mdev.m_pa + (pos - map.m_la), io);
erofs_fscache_req_io_put(io);
@@ -657,7 +657,7 @@ int erofs_fscache_register_fs(struct super_block *sb)
if (IS_ERR(fscache))
return PTR_ERR(fscache);
- sbi->s_fscache = fscache;
+ sbi->dif0.fscache = fscache;
return 0;
}
@@ -665,14 +665,14 @@ void erofs_fscache_unregister_fs(struct super_block *sb)
{
struct erofs_sb_info *sbi = EROFS_SB(sb);
- erofs_fscache_unregister_cookie(sbi->s_fscache);
+ erofs_fscache_unregister_cookie(sbi->dif0.fscache);
if (sbi->domain)
erofs_fscache_domain_put(sbi->domain);
else
fscache_relinquish_volume(sbi->volume, NULL, false);
- sbi->s_fscache = NULL;
+ sbi->dif0.fscache = NULL;
sbi->volume = NULL;
sbi->domain = NULL;
}
diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
index 1c847c30a918..686d835eb533 100644
--- a/fs/erofs/internal.h
+++ b/fs/erofs/internal.h
@@ -107,6 +107,7 @@ struct erofs_xattr_prefix_item {
};
struct erofs_sb_info {
+ struct erofs_device_info dif0;
struct erofs_mount_opts opt; /* options */
#ifdef CONFIG_EROFS_FS_ZIP
/* list for all registered superblocks, mainly for shrinker */
@@ -124,13 +125,9 @@ struct erofs_sb_info {
struct erofs_sb_lz4_info lz4;
#endif /* CONFIG_EROFS_FS_ZIP */
- struct file *fdev;
struct inode *packed_inode;
struct erofs_dev_context *devs;
- struct dax_device *dax_dev;
- u64 dax_part_off;
u64 total_blocks;
- u32 primarydevice_blocks;
u32 meta_blkaddr;
#ifdef CONFIG_EROFS_FS_XATTR
@@ -166,7 +163,6 @@ struct erofs_sb_info {
/* fscache support */
struct fscache_volume *volume;
- struct erofs_fscache *s_fscache;
struct erofs_domain *domain;
char *fsid;
char *domain_id;
@@ -180,6 +176,7 @@ struct erofs_sb_info {
#define EROFS_MOUNT_POSIX_ACL 0x00000020
#define EROFS_MOUNT_DAX_ALWAYS 0x00000040
#define EROFS_MOUNT_DAX_NEVER 0x00000080
+#define EROFS_MOUNT_DIRECT_IO 0x00000100
#define clear_opt(opt, option) ((opt)->mount_opt &= ~EROFS_MOUNT_##option)
#define set_opt(opt, option) ((opt)->mount_opt |= EROFS_MOUNT_##option)
@@ -187,7 +184,7 @@ struct erofs_sb_info {
static inline bool erofs_is_fileio_mode(struct erofs_sb_info *sbi)
{
- return IS_ENABLED(CONFIG_EROFS_FS_BACKED_BY_FILE) && sbi->fdev;
+ return IS_ENABLED(CONFIG_EROFS_FS_BACKED_BY_FILE) && sbi->dif0.file;
}
static inline bool erofs_is_fscache_mode(struct super_block *sb)
@@ -357,11 +354,9 @@ enum {
};
struct erofs_map_dev {
- struct erofs_fscache *m_fscache;
+ struct super_block *m_sb;
+ struct erofs_device_info *m_dif;
struct block_device *m_bdev;
- struct dax_device *m_daxdev;
- struct file *m_fp;
- u64 m_dax_part_off;
erofs_off_t m_pa;
unsigned int m_deviceid;
diff --git a/fs/erofs/super.c b/fs/erofs/super.c
index c235a8e4315e..f5956474bfde 100644
--- a/fs/erofs/super.c
+++ b/fs/erofs/super.c
@@ -203,7 +203,7 @@ static int erofs_scan_devices(struct super_block *sb,
struct erofs_device_info *dif;
int id, err = 0;
- sbi->total_blocks = sbi->primarydevice_blocks;
+ sbi->total_blocks = sbi->dif0.blocks;
if (!erofs_sb_has_device_table(sbi))
ondisk_extradevs = 0;
else
@@ -307,7 +307,7 @@ static int erofs_read_superblock(struct super_block *sb)
sbi->sb_size);
goto out;
}
- sbi->primarydevice_blocks = le32_to_cpu(dsb->blocks);
+ sbi->dif0.blocks = le32_to_cpu(dsb->blocks);
sbi->meta_blkaddr = le32_to_cpu(dsb->meta_blkaddr);
#ifdef CONFIG_EROFS_FS_XATTR
sbi->xattr_blkaddr = le32_to_cpu(dsb->xattr_blkaddr);
@@ -364,14 +364,8 @@ static void erofs_default_options(struct erofs_sb_info *sbi)
}
enum {
- Opt_user_xattr,
- Opt_acl,
- Opt_cache_strategy,
- Opt_dax,
- Opt_dax_enum,
- Opt_device,
- Opt_fsid,
- Opt_domain_id,
+ Opt_user_xattr, Opt_acl, Opt_cache_strategy, Opt_dax, Opt_dax_enum,
+ Opt_device, Opt_fsid, Opt_domain_id, Opt_directio,
Opt_err
};
@@ -398,6 +392,7 @@ static const struct fs_parameter_spec erofs_fs_parameters[] = {
fsparam_string("device", Opt_device),
fsparam_string("fsid", Opt_fsid),
fsparam_string("domain_id", Opt_domain_id),
+ fsparam_flag_no("directio", Opt_directio),
{}
};
@@ -511,6 +506,16 @@ static int erofs_fc_parse_param(struct fs_context *fc,
errorfc(fc, "%s option not supported", erofs_fs_parameters[opt].name);
break;
#endif
+ case Opt_directio:
+#ifdef CONFIG_EROFS_FS_BACKED_BY_FILE
+ if (result.boolean)
+ set_opt(&sbi->opt, DIRECT_IO);
+ else
+ clear_opt(&sbi->opt, DIRECT_IO);
+#else
+ errorfc(fc, "%s option not supported", erofs_fs_parameters[opt].name);
+#endif
+ break;
default:
return -ENOPARAM;
}
@@ -602,9 +607,8 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
return -EINVAL;
}
- sbi->dax_dev = fs_dax_get_by_bdev(sb->s_bdev,
- &sbi->dax_part_off,
- NULL, NULL);
+ sbi->dif0.dax_dev = fs_dax_get_by_bdev(sb->s_bdev,
+ &sbi->dif0.dax_part_off, NULL, NULL);
}
err = erofs_read_superblock(sb);
@@ -627,7 +631,7 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
}
if (test_opt(&sbi->opt, DAX_ALWAYS)) {
- if (!sbi->dax_dev) {
+ if (!sbi->dif0.dax_dev) {
errorfc(fc, "DAX unsupported by block device. Turning off DAX.");
clear_opt(&sbi->opt, DAX_ALWAYS);
} else if (sbi->blkszbits != PAGE_SHIFT) {
@@ -703,16 +707,18 @@ static int erofs_fc_get_tree(struct fs_context *fc)
GET_TREE_BDEV_QUIET_LOOKUP : 0);
#ifdef CONFIG_EROFS_FS_BACKED_BY_FILE
if (ret == -ENOTBLK) {
+ struct file *file;
+
if (!fc->source)
return invalf(fc, "No source specified");
- sbi->fdev = filp_open(fc->source, O_RDONLY | O_LARGEFILE, 0);
- if (IS_ERR(sbi->fdev))
- return PTR_ERR(sbi->fdev);
+ file = filp_open(fc->source, O_RDONLY | O_LARGEFILE, 0);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+ sbi->dif0.file = file;
- if (S_ISREG(file_inode(sbi->fdev)->i_mode) &&
- sbi->fdev->f_mapping->a_ops->read_folio)
+ if (S_ISREG(file_inode(sbi->dif0.file)->i_mode) &&
+ sbi->dif0.file->f_mapping->a_ops->read_folio)
return get_tree_nodev(fc, erofs_fc_fill_super);
- fput(sbi->fdev);
}
#endif
return ret;
@@ -763,19 +769,24 @@ static void erofs_free_dev_context(struct erofs_dev_context *devs)
kfree(devs);
}
-static void erofs_fc_free(struct fs_context *fc)
+static void erofs_sb_free(struct erofs_sb_info *sbi)
{
- struct erofs_sb_info *sbi = fc->s_fs_info;
-
- if (!sbi)
- return;
-
erofs_free_dev_context(sbi->devs);
kfree(sbi->fsid);
kfree(sbi->domain_id);
+ if (sbi->dif0.file)
+ fput(sbi->dif0.file);
kfree(sbi);
}
+static void erofs_fc_free(struct fs_context *fc)
+{
+ struct erofs_sb_info *sbi = fc->s_fs_info;
+
+ if (sbi) /* free here if an error occurs before transferring to sb */
+ erofs_sb_free(sbi);
+}
+
static const struct fs_context_operations erofs_context_ops = {
.parse_param = erofs_fc_parse_param,
.get_tree = erofs_fc_get_tree,
@@ -809,19 +820,14 @@ static void erofs_kill_sb(struct super_block *sb)
{
struct erofs_sb_info *sbi = EROFS_SB(sb);
- if ((IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && sbi->fsid) || sbi->fdev)
+ if ((IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && sbi->fsid) ||
+ sbi->dif0.file)
kill_anon_super(sb);
else
kill_block_super(sb);
-
- erofs_free_dev_context(sbi->devs);
- fs_put_dax(sbi->dax_dev, NULL);
+ fs_put_dax(sbi->dif0.dax_dev, NULL);
erofs_fscache_unregister_fs(sb);
- kfree(sbi->fsid);
- kfree(sbi->domain_id);
- if (sbi->fdev)
- fput(sbi->fdev);
- kfree(sbi);
+ erofs_sb_free(sbi);
sb->s_fs_info = NULL;
}
@@ -947,6 +953,8 @@ static int erofs_show_options(struct seq_file *seq, struct dentry *root)
seq_puts(seq, ",dax=always");
if (test_opt(opt, DAX_NEVER))
seq_puts(seq, ",dax=never");
+ if (erofs_is_fileio_mode(sbi) && test_opt(opt, DIRECT_IO))
+ seq_puts(seq, ",directio");
#ifdef CONFIG_EROFS_FS_ONDEMAND
if (sbi->fsid)
seq_printf(seq, ",fsid=%s", sbi->fsid);
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 01f147505487..19ef4ff2a134 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -1792,9 +1792,9 @@ drain_io:
erofs_fscache_submit_bio(bio);
else
submit_bio(bio);
- if (memstall)
- psi_memstall_leave(&pflags);
}
+ if (memstall)
+ psi_memstall_leave(&pflags);
/*
* although background is preferred, no one is pending for submission.
diff --git a/fs/erofs/zutil.c b/fs/erofs/zutil.c
index 75704f58ecfa..0dd65cefce33 100644
--- a/fs/erofs/zutil.c
+++ b/fs/erofs/zutil.c
@@ -230,9 +230,10 @@ void erofs_shrinker_unregister(struct super_block *sb)
struct erofs_sb_info *const sbi = EROFS_SB(sb);
mutex_lock(&sbi->umount_mutex);
- /* clean up all remaining pclusters in memory */
- z_erofs_shrink_scan(sbi, ~0UL);
-
+ while (!xa_empty(&sbi->managed_pslots)) {
+ z_erofs_shrink_scan(sbi, ~0UL);
+ cond_resched();
+ }
spin_lock(&erofs_sb_list_lock);
list_del(&sbi->list);
spin_unlock(&erofs_sb_list_lock);
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 90f883d6b8fd..fc1ae5132127 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -825,7 +825,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
error = PTR_ERR(folio);
goto out;
}
- folio_zero_user(folio, ALIGN_DOWN(addr, hpage_size));
+ folio_zero_user(folio, addr);
__folio_mark_uptodate(folio);
error = hugetlb_add_to_page_cache(folio, mapping, index);
if (unlikely(error)) {
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 0d16b383a452..5f582713bf05 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1308,7 +1308,7 @@ pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo,
enum pnfs_iomode *iomode)
{
/* Serialise LAYOUTGET/LAYOUTRETURN */
- if (atomic_read(&lo->plh_outstanding) != 0)
+ if (atomic_read(&lo->plh_outstanding) != 0 && lo->plh_return_seq == 0)
return false;
if (test_and_set_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags))
return false;
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index ae5c5e39afa0..aeb715b4a690 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -73,6 +73,7 @@
#include "nfs.h"
#include "netns.h"
#include "sysfs.h"
+#include "nfs4idmap.h"
#define NFSDBG_FACILITY NFSDBG_VFS
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index eacafe46e3b6..aa4712362b3b 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -40,24 +40,15 @@
#define EXPKEY_HASHMAX (1 << EXPKEY_HASHBITS)
#define EXPKEY_HASHMASK (EXPKEY_HASHMAX -1)
-static void expkey_put_work(struct work_struct *work)
+static void expkey_put(struct kref *ref)
{
- struct svc_expkey *key =
- container_of(to_rcu_work(work), struct svc_expkey, ek_rcu_work);
+ struct svc_expkey *key = container_of(ref, struct svc_expkey, h.ref);
if (test_bit(CACHE_VALID, &key->h.flags) &&
!test_bit(CACHE_NEGATIVE, &key->h.flags))
path_put(&key->ek_path);
auth_domain_put(key->ek_client);
- kfree(key);
-}
-
-static void expkey_put(struct kref *ref)
-{
- struct svc_expkey *key = container_of(ref, struct svc_expkey, h.ref);
-
- INIT_RCU_WORK(&key->ek_rcu_work, expkey_put_work);
- queue_rcu_work(system_wq, &key->ek_rcu_work);
+ kfree_rcu(key, ek_rcu);
}
static int expkey_upcall(struct cache_detail *cd, struct cache_head *h)
@@ -364,26 +355,16 @@ static void export_stats_destroy(struct export_stats *stats)
EXP_STATS_COUNTERS_NUM);
}
-static void svc_export_put_work(struct work_struct *work)
+static void svc_export_put(struct kref *ref)
{
- struct svc_export *exp =
- container_of(to_rcu_work(work), struct svc_export, ex_rcu_work);
-
+ struct svc_export *exp = container_of(ref, struct svc_export, h.ref);
path_put(&exp->ex_path);
auth_domain_put(exp->ex_client);
nfsd4_fslocs_free(&exp->ex_fslocs);
export_stats_destroy(exp->ex_stats);
kfree(exp->ex_stats);
kfree(exp->ex_uuid);
- kfree(exp);
-}
-
-static void svc_export_put(struct kref *ref)
-{
- struct svc_export *exp = container_of(ref, struct svc_export, h.ref);
-
- INIT_RCU_WORK(&exp->ex_rcu_work, svc_export_put_work);
- queue_rcu_work(system_wq, &exp->ex_rcu_work);
+ kfree_rcu(exp, ex_rcu);
}
static int svc_export_upcall(struct cache_detail *cd, struct cache_head *h)
diff --git a/fs/nfsd/export.h b/fs/nfsd/export.h
index 6f2fbaae01fa..4d92b99c1ffd 100644
--- a/fs/nfsd/export.h
+++ b/fs/nfsd/export.h
@@ -75,7 +75,7 @@ struct svc_export {
u32 ex_layout_types;
struct nfsd4_deviceid_map *ex_devid_map;
struct cache_detail *cd;
- struct rcu_work ex_rcu_work;
+ struct rcu_head ex_rcu;
unsigned long ex_xprtsec_modes;
struct export_stats *ex_stats;
};
@@ -92,7 +92,7 @@ struct svc_expkey {
u32 ek_fsid[6];
struct path ek_path;
- struct rcu_work ek_rcu_work;
+ struct rcu_head ek_rcu;
};
#define EX_ISSYNC(exp) (!((exp)->ex_flags & NFSEXP_ASYNC))
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 3877b53e429f..c083e539e898 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -1100,7 +1100,7 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
args.authflavor = clp->cl_cred.cr_flavor;
clp->cl_cb_ident = conn->cb_ident;
} else {
- if (!conn->cb_xprt)
+ if (!conn->cb_xprt || !ses)
return -EINVAL;
clp->cl_cb_session = ses;
args.bc_xprt = conn->cb_xprt;
@@ -1522,8 +1522,6 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
ses = c->cn_session;
}
spin_unlock(&clp->cl_lock);
- if (!c)
- return;
err = setup_callback_client(clp, &conn, ses);
if (err) {
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index f8a10f90bc7a..ad44ad49274f 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1347,7 +1347,6 @@ static void nfs4_put_copy(struct nfsd4_copy *copy)
{
if (!refcount_dec_and_test(&copy->refcount))
return;
- atomic_dec(&copy->cp_nn->pending_async_copies);
kfree(copy->cp_src);
kfree(copy);
}
@@ -1870,6 +1869,7 @@ do_callback:
set_bit(NFSD4_COPY_F_COMPLETED, &copy->cp_flags);
trace_nfsd_copy_async_done(copy);
nfsd4_send_cb_offload(copy);
+ atomic_dec(&copy->cp_nn->pending_async_copies);
return 0;
}
@@ -1927,19 +1927,19 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
/* Arbitrary cap on number of pending async copy operations */
if (atomic_inc_return(&nn->pending_async_copies) >
(int)rqstp->rq_pool->sp_nrthreads)
- goto out_err;
+ goto out_dec_async_copy_err;
async_copy->cp_src = kmalloc(sizeof(*async_copy->cp_src), GFP_KERNEL);
if (!async_copy->cp_src)
- goto out_err;
+ goto out_dec_async_copy_err;
if (!nfs4_init_copy_state(nn, copy))
- goto out_err;
+ goto out_dec_async_copy_err;
memcpy(&result->cb_stateid, &copy->cp_stateid.cs_stid,
sizeof(result->cb_stateid));
dup_copy_fields(copy, async_copy);
async_copy->copy_task = kthread_create(nfsd4_do_async_copy,
async_copy, "%s", "copy thread");
if (IS_ERR(async_copy->copy_task))
- goto out_err;
+ goto out_dec_async_copy_err;
spin_lock(&async_copy->cp_clp->async_lock);
list_add(&async_copy->copies,
&async_copy->cp_clp->async_copies);
@@ -1954,6 +1954,9 @@ out:
trace_nfsd_copy_done(copy, status);
release_copy_files(copy);
return status;
+out_dec_async_copy_err:
+ if (async_copy)
+ atomic_dec(&nn->pending_async_copies);
out_err:
if (nfsd4_ssc_is_inter(copy)) {
/*
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index 501ad7be5174..54a3fa0cf67e 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -35,6 +35,7 @@ void nilfs_init_btnc_inode(struct inode *btnc_inode)
ii->i_flags = 0;
memset(&ii->i_bmap_data, 0, sizeof(struct nilfs_bmap));
mapping_set_gfp_mask(btnc_inode->i_mapping, GFP_NOFS);
+ btnc_inode->i_mapping->a_ops = &nilfs_buffer_cache_aops;
}
void nilfs_btnode_cache_clear(struct address_space *btnc)
diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
index ace22253fed0..2dbb15767df1 100644
--- a/fs/nilfs2/gcinode.c
+++ b/fs/nilfs2/gcinode.c
@@ -163,7 +163,7 @@ int nilfs_init_gcinode(struct inode *inode)
inode->i_mode = S_IFREG;
mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
- inode->i_mapping->a_ops = &empty_aops;
+ inode->i_mapping->a_ops = &nilfs_buffer_cache_aops;
ii->i_flags = 0;
nilfs_bmap_init_gc(ii->i_bmap);
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index cf9ba481ae37..23f3a75edd50 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -276,6 +276,10 @@ const struct address_space_operations nilfs_aops = {
.is_partially_uptodate = block_is_partially_uptodate,
};
+const struct address_space_operations nilfs_buffer_cache_aops = {
+ .invalidate_folio = block_invalidate_folio,
+};
+
static int nilfs_insert_inode_locked(struct inode *inode,
struct nilfs_root *root,
unsigned long ino)
@@ -544,8 +548,14 @@ struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
inode = nilfs_iget_locked(sb, root, ino);
if (unlikely(!inode))
return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
+
+ if (!(inode->i_state & I_NEW)) {
+ if (!inode->i_nlink) {
+ iput(inode);
+ return ERR_PTR(-ESTALE);
+ }
return inode;
+ }
err = __nilfs_read_inode(sb, root, ino, inode);
if (unlikely(err)) {
@@ -675,6 +685,7 @@ struct inode *nilfs_iget_for_shadow(struct inode *inode)
NILFS_I(s_inode)->i_flags = 0;
memset(NILFS_I(s_inode)->i_bmap, 0, sizeof(struct nilfs_bmap));
mapping_set_gfp_mask(s_inode->i_mapping, GFP_NOFS);
+ s_inode->i_mapping->a_ops = &nilfs_buffer_cache_aops;
err = nilfs_attach_btree_node_cache(s_inode);
if (unlikely(err)) {
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index 9b108052d9f7..1d836a5540f3 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -67,6 +67,11 @@ nilfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
inode = NULL;
} else {
inode = nilfs_iget(dir->i_sb, NILFS_I(dir)->i_root, ino);
+ if (inode == ERR_PTR(-ESTALE)) {
+ nilfs_error(dir->i_sb,
+ "deleted inode referenced: %lu", ino);
+ return ERR_PTR(-EIO);
+ }
}
return d_splice_alias(inode, dentry);
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index 45d03826eaf1..dff241c53fc5 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -401,6 +401,7 @@ extern const struct file_operations nilfs_dir_operations;
extern const struct inode_operations nilfs_file_inode_operations;
extern const struct file_operations nilfs_file_operations;
extern const struct address_space_operations nilfs_aops;
+extern const struct address_space_operations nilfs_buffer_cache_aops;
extern const struct inode_operations nilfs_dir_inode_operations;
extern const struct inode_operations nilfs_special_inode_operations;
extern const struct inode_operations nilfs_symlink_inode_operations;
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index 8ac42ea81a17..d1aa04a5af1b 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -971,9 +971,9 @@ static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
start = count = 0;
left = le32_to_cpu(alloc->id1.bitmap1.i_total);
- while ((bit_off = ocfs2_find_next_zero_bit(bitmap, left, start)) <
- left) {
- if (bit_off == start) {
+ while (1) {
+ bit_off = ocfs2_find_next_zero_bit(bitmap, left, start);
+ if ((bit_off < left) && (bit_off == start)) {
count++;
start++;
continue;
@@ -998,29 +998,12 @@ static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
}
}
+ if (bit_off >= left)
+ break;
count = 1;
start = bit_off + 1;
}
- /* clear the contiguous bits until the end boundary */
- if (count) {
- blkno = la_start_blk +
- ocfs2_clusters_to_blocks(osb->sb,
- start - count);
-
- trace_ocfs2_sync_local_to_main_free(
- count, start - count,
- (unsigned long long)la_start_blk,
- (unsigned long long)blkno);
-
- status = ocfs2_release_clusters(handle,
- main_bm_inode,
- main_bm_bh, blkno,
- count);
- if (status < 0)
- mlog_errno(status);
- }
-
bail:
if (status)
mlog_errno(status);
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index 2b0daced98eb..3404e7a30c33 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -893,7 +893,7 @@ static int ocfs2_get_next_id(struct super_block *sb, struct kqid *qid)
int status = 0;
trace_ocfs2_get_next_id(from_kqid(&init_user_ns, *qid), type);
- if (!sb_has_quota_loaded(sb, type)) {
+ if (!sb_has_quota_active(sb, type)) {
status = -ESRCH;
goto out;
}
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index 73d3367c533b..2956d888c131 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -867,6 +867,7 @@ out:
brelse(oinfo->dqi_libh);
brelse(oinfo->dqi_lqi_bh);
kfree(oinfo);
+ info->dqi_priv = NULL;
return status;
}
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 38a5a3e9cba2..f02cd362309a 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1810,7 +1810,7 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
}
for (; addr != end; addr += PAGE_SIZE, idx++) {
- unsigned long cur_flags = flags;
+ u64 cur_flags = flags;
pagemap_entry_t pme;
if (folio && (flags & PM_PRESENT) &&
diff --git a/fs/smb/client/Kconfig b/fs/smb/client/Kconfig
index 2aff6d1395ce..9f05f94e265a 100644
--- a/fs/smb/client/Kconfig
+++ b/fs/smb/client/Kconfig
@@ -2,7 +2,6 @@
config CIFS
tristate "SMB3 and CIFS support (advanced network filesystem)"
depends on INET
- select NETFS_SUPPORT
select NLS
select NLS_UCS2_UTILS
select CRYPTO
diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
index 9d96b833015c..b800c9f585d8 100644
--- a/fs/smb/client/cifsfs.c
+++ b/fs/smb/client/cifsfs.c
@@ -398,7 +398,7 @@ cifs_alloc_inode(struct super_block *sb)
cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
if (!cifs_inode)
return NULL;
- cifs_inode->cifsAttrs = 0x20; /* default */
+ cifs_inode->cifsAttrs = ATTR_ARCHIVE; /* default */
cifs_inode->time = 0;
/*
* Until the file is open and we have gotten oplock info back from the
diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
index 754417cb3294..d26f9bbb5382 100644
--- a/fs/smb/client/cifsproto.h
+++ b/fs/smb/client/cifsproto.h
@@ -614,8 +614,6 @@ int cifs_alloc_hash(const char *name, struct shash_desc **sdesc);
void cifs_free_hash(struct shash_desc **sdesc);
int cifs_try_adding_channels(struct cifs_ses *ses);
-bool is_server_using_iface(struct TCP_Server_Info *server,
- struct cifs_server_iface *iface);
bool is_ses_using_iface(struct cifs_ses *ses, struct cifs_server_iface *iface);
void cifs_ses_mark_for_reconnect(struct cifs_ses *ses);
diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
index 2372538a1211..ddcc9e514a0e 100644
--- a/fs/smb/client/connect.c
+++ b/fs/smb/client/connect.c
@@ -987,9 +987,13 @@ clean_demultiplex_info(struct TCP_Server_Info *server)
msleep(125);
if (cifs_rdma_enabled(server))
smbd_destroy(server);
+
if (server->ssocket) {
sock_release(server->ssocket);
server->ssocket = NULL;
+
+ /* Release netns reference for the socket. */
+ put_net(cifs_net_ns(server));
}
if (!list_empty(&server->pending_mid_q)) {
@@ -1037,6 +1041,7 @@ clean_demultiplex_info(struct TCP_Server_Info *server)
*/
}
+ /* Release netns reference for this server. */
put_net(cifs_net_ns(server));
kfree(server->leaf_fullpath);
kfree(server);
@@ -1713,6 +1718,8 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
tcp_ses->ops = ctx->ops;
tcp_ses->vals = ctx->vals;
+
+ /* Grab netns reference for this server. */
cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns));
tcp_ses->conn_id = atomic_inc_return(&tcpSesNextId);
@@ -1844,6 +1851,7 @@ smbd_connected:
out_err_crypto_release:
cifs_crypto_secmech_release(tcp_ses);
+ /* Release netns reference for this server. */
put_net(cifs_net_ns(tcp_ses));
out_err:
@@ -1852,8 +1860,10 @@ out_err:
cifs_put_tcp_session(tcp_ses->primary_server, false);
kfree(tcp_ses->hostname);
kfree(tcp_ses->leaf_fullpath);
- if (tcp_ses->ssocket)
+ if (tcp_ses->ssocket) {
sock_release(tcp_ses->ssocket);
+ put_net(cifs_net_ns(tcp_ses));
+ }
kfree(tcp_ses);
}
return ERR_PTR(rc);
@@ -3131,20 +3141,20 @@ generic_ip_connect(struct TCP_Server_Info *server)
socket = server->ssocket;
} else {
struct net *net = cifs_net_ns(server);
- struct sock *sk;
- rc = __sock_create(net, sfamily, SOCK_STREAM,
- IPPROTO_TCP, &server->ssocket, 1);
+ rc = sock_create_kern(net, sfamily, SOCK_STREAM, IPPROTO_TCP, &server->ssocket);
if (rc < 0) {
cifs_server_dbg(VFS, "Error %d creating socket\n", rc);
return rc;
}
- sk = server->ssocket->sk;
- __netns_tracker_free(net, &sk->ns_tracker, false);
- sk->sk_net_refcnt = 1;
- get_net_track(net, &sk->ns_tracker, GFP_KERNEL);
- sock_inuse_add(net, 1);
+ /*
+ * Grab netns reference for the socket.
+ *
+ * It'll be released here, on error, or in clean_demultiplex_info() upon server
+ * teardown.
+ */
+ get_net(net);
/* BB other socket options to set KEEPALIVE, NODELAY? */
cifs_dbg(FYI, "Socket created\n");
@@ -3158,8 +3168,10 @@ generic_ip_connect(struct TCP_Server_Info *server)
}
rc = bind_socket(server);
- if (rc < 0)
+ if (rc < 0) {
+ put_net(cifs_net_ns(server));
return rc;
+ }
/*
* Eventually check for other socket options to change from
@@ -3196,6 +3208,7 @@ generic_ip_connect(struct TCP_Server_Info *server)
if (rc < 0) {
cifs_dbg(FYI, "Error %d connecting to server\n", rc);
trace_smb3_connect_err(server->hostname, server->conn_id, &server->dstaddr, rc);
+ put_net(cifs_net_ns(server));
sock_release(socket);
server->ssocket = NULL;
return rc;
@@ -3204,6 +3217,9 @@ generic_ip_connect(struct TCP_Server_Info *server)
if (sport == htons(RFC1001_PORT))
rc = ip_rfc1001_connect(server);
+ if (rc < 0)
+ put_net(cifs_net_ns(server));
+
return rc;
}
diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
index a58a3333ecc3..3b2d33291a7e 100644
--- a/fs/smb/client/file.c
+++ b/fs/smb/client/file.c
@@ -990,7 +990,11 @@ int cifs_open(struct inode *inode, struct file *file)
}
/* Get the cached handle as SMB2 close is deferred */
- rc = cifs_get_readable_path(tcon, full_path, &cfile);
+ if (OPEN_FMODE(file->f_flags) & FMODE_WRITE) {
+ rc = cifs_get_writable_path(tcon, full_path, FIND_WR_FSUID_ONLY, &cfile);
+ } else {
+ rc = cifs_get_readable_path(tcon, full_path, &cfile);
+ }
if (rc == 0) {
if (file->f_flags == cfile->f_flags) {
file->private_data = cfile;
diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c
index 3306fb655136..91d4d409cb1d 100644
--- a/fs/smb/client/sess.c
+++ b/fs/smb/client/sess.c
@@ -27,31 +27,6 @@ static int
cifs_ses_add_channel(struct cifs_ses *ses,
struct cifs_server_iface *iface);
-bool
-is_server_using_iface(struct TCP_Server_Info *server,
- struct cifs_server_iface *iface)
-{
- struct sockaddr_in *i4 = (struct sockaddr_in *)&iface->sockaddr;
- struct sockaddr_in6 *i6 = (struct sockaddr_in6 *)&iface->sockaddr;
- struct sockaddr_in *s4 = (struct sockaddr_in *)&server->dstaddr;
- struct sockaddr_in6 *s6 = (struct sockaddr_in6 *)&server->dstaddr;
-
- if (server->dstaddr.ss_family != iface->sockaddr.ss_family)
- return false;
- if (server->dstaddr.ss_family == AF_INET) {
- if (s4->sin_addr.s_addr != i4->sin_addr.s_addr)
- return false;
- } else if (server->dstaddr.ss_family == AF_INET6) {
- if (memcmp(&s6->sin6_addr, &i6->sin6_addr,
- sizeof(i6->sin6_addr)) != 0)
- return false;
- } else {
- /* unknown family.. */
- return false;
- }
- return true;
-}
-
bool is_ses_using_iface(struct cifs_ses *ses, struct cifs_server_iface *iface)
{
int i;
diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
index 010eae9d6c47..959359301250 100644
--- a/fs/smb/client/smb2pdu.c
+++ b/fs/smb/client/smb2pdu.c
@@ -4840,6 +4840,8 @@ smb2_writev_callback(struct mid_q_entry *mid)
if (written > wdata->subreq.len)
written &= 0xFFFF;
+ cifs_stats_bytes_written(tcon, written);
+
if (written < wdata->subreq.len)
wdata->result = -ENOSPC;
else
@@ -5156,6 +5158,7 @@ replay_again:
cifs_dbg(VFS, "Send error in write = %d\n", rc);
} else {
*nbytes = le32_to_cpu(rsp->DataLength);
+ cifs_stats_bytes_written(io_parms->tcon, *nbytes);
trace_smb3_write_done(0, 0, xid,
req->PersistentFileId,
io_parms->tcon->tid,
@@ -6204,7 +6207,7 @@ SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
req->StructureSize = cpu_to_le16(36);
total_len += 12;
- memcpy(req->LeaseKey, lease_key, 16);
+ memcpy(req->LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
req->LeaseState = lease_state;
flags |= CIFS_NO_RSP_BUF;
diff --git a/fs/smb/server/connection.c b/fs/smb/server/connection.c
index c14dd72e1b30..f8a40f65db6a 100644
--- a/fs/smb/server/connection.c
+++ b/fs/smb/server/connection.c
@@ -70,7 +70,6 @@ struct ksmbd_conn *ksmbd_conn_alloc(void)
atomic_set(&conn->req_running, 0);
atomic_set(&conn->r_count, 0);
atomic_set(&conn->refcnt, 1);
- atomic_set(&conn->mux_smb_requests, 0);
conn->total_credits = 1;
conn->outstanding_credits = 0;
@@ -120,8 +119,8 @@ void ksmbd_conn_enqueue_request(struct ksmbd_work *work)
if (conn->ops->get_cmd_val(work) != SMB2_CANCEL_HE)
requests_queue = &conn->requests;
+ atomic_inc(&conn->req_running);
if (requests_queue) {
- atomic_inc(&conn->req_running);
spin_lock(&conn->request_lock);
list_add_tail(&work->request_entry, requests_queue);
spin_unlock(&conn->request_lock);
@@ -132,11 +131,14 @@ void ksmbd_conn_try_dequeue_request(struct ksmbd_work *work)
{
struct ksmbd_conn *conn = work->conn;
+ atomic_dec(&conn->req_running);
+ if (waitqueue_active(&conn->req_running_q))
+ wake_up(&conn->req_running_q);
+
if (list_empty(&work->request_entry) &&
list_empty(&work->async_request_entry))
return;
- atomic_dec(&conn->req_running);
spin_lock(&conn->request_lock);
list_del_init(&work->request_entry);
spin_unlock(&conn->request_lock);
@@ -308,7 +310,7 @@ int ksmbd_conn_handler_loop(void *p)
{
struct ksmbd_conn *conn = (struct ksmbd_conn *)p;
struct ksmbd_transport *t = conn->transport;
- unsigned int pdu_size, max_allowed_pdu_size;
+ unsigned int pdu_size, max_allowed_pdu_size, max_req;
char hdr_buf[4] = {0,};
int size;
@@ -318,6 +320,7 @@ int ksmbd_conn_handler_loop(void *p)
if (t->ops->prepare && t->ops->prepare(t))
goto out;
+ max_req = server_conf.max_inflight_req;
conn->last_active = jiffies;
set_freezable();
while (ksmbd_conn_alive(conn)) {
@@ -327,6 +330,13 @@ int ksmbd_conn_handler_loop(void *p)
kvfree(conn->request_buf);
conn->request_buf = NULL;
+recheck:
+ if (atomic_read(&conn->req_running) + 1 > max_req) {
+ wait_event_interruptible(conn->req_running_q,
+ atomic_read(&conn->req_running) < max_req);
+ goto recheck;
+ }
+
size = t->ops->read(t, hdr_buf, sizeof(hdr_buf), -1);
if (size != sizeof(hdr_buf))
break;
diff --git a/fs/smb/server/connection.h b/fs/smb/server/connection.h
index 8ddd5a3c7baf..b379ae4fdcdf 100644
--- a/fs/smb/server/connection.h
+++ b/fs/smb/server/connection.h
@@ -107,7 +107,6 @@ struct ksmbd_conn {
__le16 signing_algorithm;
bool binding;
atomic_t refcnt;
- atomic_t mux_smb_requests;
};
struct ksmbd_conn_ops {
diff --git a/fs/smb/server/server.c b/fs/smb/server/server.c
index 3ba95bd8edeb..601e7fcbcf1e 100644
--- a/fs/smb/server/server.c
+++ b/fs/smb/server/server.c
@@ -270,7 +270,6 @@ static void handle_ksmbd_work(struct work_struct *wk)
ksmbd_conn_try_dequeue_request(work);
ksmbd_free_work_struct(work);
- atomic_dec(&conn->mux_smb_requests);
/*
* Checking waitqueue to dropping pending requests on
* disconnection. waitqueue_active is safe because it
@@ -300,11 +299,6 @@ static int queue_ksmbd_work(struct ksmbd_conn *conn)
if (err)
return 0;
- if (atomic_inc_return(&conn->mux_smb_requests) >= conn->vals->max_credits) {
- atomic_dec_return(&conn->mux_smb_requests);
- return -ENOSPC;
- }
-
work = ksmbd_alloc_work_struct();
if (!work) {
pr_err("allocation for work failed\n");
@@ -367,6 +361,7 @@ static int server_conf_init(void)
server_conf.auth_mechs |= KSMBD_AUTH_KRB5 |
KSMBD_AUTH_MSKRB5;
#endif
+ server_conf.max_inflight_req = SMB2_MAX_CREDITS;
return 0;
}
diff --git a/fs/smb/server/server.h b/fs/smb/server/server.h
index 4fc529335271..94187628ff08 100644
--- a/fs/smb/server/server.h
+++ b/fs/smb/server/server.h
@@ -42,6 +42,7 @@ struct ksmbd_server_config {
struct smb_sid domain_sid;
unsigned int auth_mechs;
unsigned int max_connections;
+ unsigned int max_inflight_req;
char *conf[SERVER_CONF_WORK_GROUP + 1];
struct task_struct *dh_task;
diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
index 803b35b89513..23e21845f928 100644
--- a/fs/smb/server/smb2pdu.c
+++ b/fs/smb/server/smb2pdu.c
@@ -1097,6 +1097,7 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
return rc;
}
+ ksmbd_conn_lock(conn);
smb2_buf_len = get_rfc1002_len(work->request_buf);
smb2_neg_size = offsetof(struct smb2_negotiate_req, Dialects);
if (smb2_neg_size > smb2_buf_len) {
@@ -1247,6 +1248,7 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
ksmbd_conn_set_need_negotiate(conn);
err_out:
+ ksmbd_conn_unlock(conn);
if (rc)
rsp->hdr.Status = STATUS_INSUFFICIENT_RESOURCES;
diff --git a/fs/smb/server/transport_ipc.c b/fs/smb/server/transport_ipc.c
index 48cda3350e5a..befaf42b84cc 100644
--- a/fs/smb/server/transport_ipc.c
+++ b/fs/smb/server/transport_ipc.c
@@ -319,8 +319,11 @@ static int ipc_server_config_on_startup(struct ksmbd_startup_request *req)
init_smb2_max_write_size(req->smb2_max_write);
if (req->smb2_max_trans)
init_smb2_max_trans_size(req->smb2_max_trans);
- if (req->smb2_max_credits)
+ if (req->smb2_max_credits) {
init_smb2_max_credits(req->smb2_max_credits);
+ server_conf.max_inflight_req =
+ req->smb2_max_credits;
+ }
if (req->smbd_max_io_size)
init_smbd_max_io_size(req->smbd_max_io_size);
diff --git a/include/clocksource/hyperv_timer.h b/include/clocksource/hyperv_timer.h
index 6cdc873ac907..aa5233b1eba9 100644
--- a/include/clocksource/hyperv_timer.h
+++ b/include/clocksource/hyperv_timer.h
@@ -38,6 +38,8 @@ extern void hv_remap_tsc_clocksource(void);
extern unsigned long hv_get_tsc_pfn(void);
extern struct ms_hyperv_tsc_page *hv_get_tsc_page(void);
+extern void hv_adj_sched_clock_offset(u64 offset);
+
static __always_inline bool
hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg,
u64 *cur_tsc, u64 *time)
diff --git a/include/dt-bindings/sound/qcom,wcd9335.h b/include/dt-bindings/sound/qcom,wcd9335.h
index f5e9f1db091e..4fc68aeb9e04 100644
--- a/include/dt-bindings/sound/qcom,wcd9335.h
+++ b/include/dt-bindings/sound/qcom,wcd9335.h
@@ -10,6 +10,5 @@
#define AIF3_PB 4
#define AIF3_CAP 5
#define AIF4_PB 6
-#define NUM_CODEC_DAIS 7
#endif
diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h
index 7c0786bdf9af..0bbbe537c5f9 100644
--- a/include/linux/alloc_tag.h
+++ b/include/linux/alloc_tag.h
@@ -63,7 +63,12 @@ static inline void set_codetag_empty(union codetag_ref *ref)
#else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
static inline bool is_codetag_empty(union codetag_ref *ref) { return false; }
-static inline void set_codetag_empty(union codetag_ref *ref) {}
+
+static inline void set_codetag_empty(union codetag_ref *ref)
+{
+ if (ref)
+ ref->ct = NULL;
+}
#endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
@@ -135,7 +140,7 @@ static inline struct alloc_tag_counters alloc_tag_read(struct alloc_tag *tag)
#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
static inline void alloc_tag_add_check(union codetag_ref *ref, struct alloc_tag *tag)
{
- WARN_ONCE(ref && ref->ct,
+ WARN_ONCE(ref && ref->ct && !is_codetag_empty(ref),
"alloc_tag was not cleared (got tag for %s:%u)\n",
ref->ct->filename, ref->ct->lineno);
diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h
index a28e2a6a13d0..74169dd0f659 100644
--- a/include/linux/arm_ffa.h
+++ b/include/linux/arm_ffa.h
@@ -166,9 +166,12 @@ static inline void *ffa_dev_get_drvdata(struct ffa_device *fdev)
return dev_get_drvdata(&fdev->dev);
}
+struct ffa_partition_info;
+
#if IS_REACHABLE(CONFIG_ARM_FFA_TRANSPORT)
-struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
- const struct ffa_ops *ops);
+struct ffa_device *
+ffa_device_register(const struct ffa_partition_info *part_info,
+ const struct ffa_ops *ops);
void ffa_device_unregister(struct ffa_device *ffa_dev);
int ffa_driver_register(struct ffa_driver *driver, struct module *owner,
const char *mod_name);
@@ -176,9 +179,9 @@ void ffa_driver_unregister(struct ffa_driver *driver);
bool ffa_device_is_valid(struct ffa_device *ffa_dev);
#else
-static inline
-struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
- const struct ffa_ops *ops)
+static inline struct ffa_device *
+ffa_device_register(const struct ffa_partition_info *part_info,
+ const struct ffa_ops *ops)
{
return NULL;
}
diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h
index 108060612bb8..7ad736538649 100644
--- a/include/linux/cacheinfo.h
+++ b/include/linux/cacheinfo.h
@@ -155,8 +155,14 @@ static inline int get_cpu_cacheinfo_id(int cpu, int level)
#ifndef CONFIG_ARCH_HAS_CPU_CACHE_ALIASING
#define cpu_dcache_is_aliasing() false
+#define cpu_icache_is_aliasing() cpu_dcache_is_aliasing()
#else
#include <asm/cachetype.h>
+
+#ifndef cpu_icache_is_aliasing
+#define cpu_icache_is_aliasing() cpu_dcache_is_aliasing()
+#endif
+
#endif
#endif /* _LINUX_CACHEINFO_H */
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 469a64dd6495..240c632c5b95 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -216,28 +216,43 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
#endif /* __KERNEL__ */
+/**
+ * offset_to_ptr - convert a relative memory offset to an absolute pointer
+ * @off: the address of the 32-bit offset value
+ */
+static inline void *offset_to_ptr(const int *off)
+{
+ return (void *)((unsigned long)off + *off);
+}
+
+#endif /* __ASSEMBLY__ */
+
+#ifdef CONFIG_64BIT
+#define ARCH_SEL(a,b) a
+#else
+#define ARCH_SEL(a,b) b
+#endif
+
/*
* Force the compiler to emit 'sym' as a symbol, so that we can reference
* it from inline assembler. Necessary in case 'sym' could be inlined
* otherwise, or eliminated entirely due to lack of references that are
* visible to the compiler.
*/
-#define ___ADDRESSABLE(sym, __attrs) \
- static void * __used __attrs \
+#define ___ADDRESSABLE(sym, __attrs) \
+ static void * __used __attrs \
__UNIQUE_ID(__PASTE(__addressable_,sym)) = (void *)(uintptr_t)&sym;
+
#define __ADDRESSABLE(sym) \
___ADDRESSABLE(sym, __section(".discard.addressable"))
-/**
- * offset_to_ptr - convert a relative memory offset to an absolute pointer
- * @off: the address of the 32-bit offset value
- */
-static inline void *offset_to_ptr(const int *off)
-{
- return (void *)((unsigned long)off + *off);
-}
+#define __ADDRESSABLE_ASM(sym) \
+ .pushsection .discard.addressable,"aw"; \
+ .align ARCH_SEL(8,4); \
+ ARCH_SEL(.quad, .long) __stringify(sym); \
+ .popsection;
-#endif /* __ASSEMBLY__ */
+#define __ADDRESSABLE_ASM_STR(sym) __stringify(__ADDRESSABLE_ASM(sym))
#ifdef __CHECKER__
#define __BUILD_BUG_ON_ZERO_MSG(e, msg) (0)
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index b137fdb56093..346251bf1026 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -84,7 +84,7 @@ enum dma_transfer_direction {
DMA_TRANS_NONE,
};
-/**
+/*
* Interleaved Transfer Request
* ----------------------------
* A chunk is collection of contiguous bytes to be transferred.
@@ -223,7 +223,7 @@ enum sum_check_bits {
};
/**
- * enum pq_check_flags - result of async_{xor,pq}_zero_sum operations
+ * enum sum_check_flags - result of async_{xor,pq}_zero_sum operations
* @SUM_CHECK_P_RESULT - 1 if xor zero sum error, 0 otherwise
* @SUM_CHECK_Q_RESULT - 1 if reed-solomon zero sum error, 0 otherwise
*/
@@ -286,7 +286,7 @@ typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
* pointer to the engine's metadata area
* 4. Read out the metadata from the pointer
*
- * Note: the two mode is not compatible and clients must use one mode for a
+ * Warning: the two modes are not compatible and clients must use one mode for a
* descriptor.
*/
enum dma_desc_metadata_mode {
@@ -594,9 +594,13 @@ struct dma_descriptor_metadata_ops {
* @phys: physical address of the descriptor
* @chan: target channel for this operation
* @tx_submit: accept the descriptor, assign ordered cookie and mark the
+ * @desc_free: driver's callback function to free a resusable descriptor
+ * after completion
* descriptor pending. To be pushed on .issue_pending() call
* @callback: routine to call after this operation is complete
+ * @callback_result: error result from a DMA transaction
* @callback_param: general parameter to pass to the callback routine
+ * @unmap: hook for generic DMA unmap data
* @desc_metadata_mode: core managed metadata mode to protect mixed use of
* DESC_METADATA_CLIENT or DESC_METADATA_ENGINE. Otherwise
* DESC_METADATA_NONE
@@ -827,6 +831,9 @@ struct dma_filter {
* @device_prep_dma_memset: prepares a memset operation
* @device_prep_dma_memset_sg: prepares a memset operation over a scatter list
* @device_prep_dma_interrupt: prepares an end of chain interrupt operation
+ * @device_prep_peripheral_dma_vec: prepares a scatter-gather DMA transfer,
+ * where the address and size of each segment is located in one entry of
+ * the dma_vec array.
* @device_prep_slave_sg: prepares a slave dma operation
* @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio.
* The function takes a buffer of size buf_len. The callback function will
diff --git a/include/linux/firmware/cirrus/cs_dsp_test_utils.h b/include/linux/firmware/cirrus/cs_dsp_test_utils.h
new file mode 100644
index 000000000000..4f87a908ab4f
--- /dev/null
+++ b/include/linux/firmware/cirrus/cs_dsp_test_utils.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Support utilities for cs_dsp testing.
+ *
+ * Copyright (C) 2024 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#include <linux/regmap.h>
+#include <linux/firmware/cirrus/wmfw.h>
+
+struct kunit;
+struct cs_dsp_test;
+struct cs_dsp_test_local;
+
+/**
+ * struct cs_dsp_test - base class for test utilities
+ *
+ * @test: Pointer to struct kunit instance.
+ * @dsp: Pointer to struct cs_dsp instance.
+ * @local: Private data for each test suite.
+ */
+struct cs_dsp_test {
+ struct kunit *test;
+ struct cs_dsp *dsp;
+
+ struct cs_dsp_test_local *local;
+
+ /* Following members are private */
+ bool saw_bus_write;
+};
+
+/**
+ * struct cs_dsp_mock_alg_def - Info for creating a mock algorithm entry.
+ *
+ * @id Algorithm ID.
+ * @ver; Algorithm version.
+ * @xm_base_words XM base address in DSP words.
+ * @xm_size_words XM size in DSP words.
+ * @ym_base_words YM base address in DSP words.
+ * @ym_size_words YM size in DSP words.
+ * @zm_base_words ZM base address in DSP words.
+ * @zm_size_words ZM size in DSP words.
+ */
+struct cs_dsp_mock_alg_def {
+ unsigned int id;
+ unsigned int ver;
+ unsigned int xm_base_words;
+ unsigned int xm_size_words;
+ unsigned int ym_base_words;
+ unsigned int ym_size_words;
+ unsigned int zm_base_words;
+ unsigned int zm_size_words;
+};
+
+struct cs_dsp_mock_coeff_def {
+ const char *shortname;
+ const char *fullname;
+ const char *description;
+ u16 type;
+ u16 flags;
+ u16 mem_type;
+ unsigned int offset_dsp_words;
+ unsigned int length_bytes;
+};
+
+/**
+ * struct cs_dsp_mock_xm_header - XM header builder
+ *
+ * @test_priv: Pointer to the struct cs_dsp_test.
+ * @blob_data: Pointer to the created blob data.
+ * @blob_size_bytes: Size of the data at blob_data.
+ */
+struct cs_dsp_mock_xm_header {
+ struct cs_dsp_test *test_priv;
+ void *blob_data;
+ size_t blob_size_bytes;
+};
+
+struct cs_dsp_mock_wmfw_builder;
+struct cs_dsp_mock_bin_builder;
+
+extern const unsigned int cs_dsp_mock_adsp2_32bit_sysbase;
+extern const unsigned int cs_dsp_mock_adsp2_16bit_sysbase;
+extern const unsigned int cs_dsp_mock_halo_core_base;
+extern const unsigned int cs_dsp_mock_halo_sysinfo_base;
+
+extern const struct cs_dsp_region cs_dsp_mock_halo_dsp1_regions[];
+extern const unsigned int cs_dsp_mock_halo_dsp1_region_sizes[];
+extern const struct cs_dsp_region cs_dsp_mock_adsp2_32bit_dsp1_regions[];
+extern const unsigned int cs_dsp_mock_adsp2_32bit_dsp1_region_sizes[];
+extern const struct cs_dsp_region cs_dsp_mock_adsp2_16bit_dsp1_regions[];
+extern const unsigned int cs_dsp_mock_adsp2_16bit_dsp1_region_sizes[];
+int cs_dsp_mock_count_regions(const unsigned int *region_sizes);
+unsigned int cs_dsp_mock_size_of_region(const struct cs_dsp *dsp, int mem_type);
+unsigned int cs_dsp_mock_base_addr_for_mem(struct cs_dsp_test *priv, int mem_type);
+unsigned int cs_dsp_mock_reg_addr_inc_per_unpacked_word(struct cs_dsp_test *priv);
+unsigned int cs_dsp_mock_reg_block_length_bytes(struct cs_dsp_test *priv, int mem_type);
+unsigned int cs_dsp_mock_reg_block_length_registers(struct cs_dsp_test *priv, int mem_type);
+unsigned int cs_dsp_mock_reg_block_length_dsp_words(struct cs_dsp_test *priv, int mem_type);
+bool cs_dsp_mock_has_zm(struct cs_dsp_test *priv);
+int cs_dsp_mock_packed_to_unpacked_mem_type(int packed_mem_type);
+unsigned int cs_dsp_mock_num_dsp_words_to_num_packed_regs(unsigned int num_dsp_words);
+unsigned int cs_dsp_mock_xm_header_get_alg_base_in_words(struct cs_dsp_test *priv,
+ unsigned int alg_id,
+ int mem_type);
+unsigned int cs_dsp_mock_xm_header_get_fw_version_from_regmap(struct cs_dsp_test *priv);
+unsigned int cs_dsp_mock_xm_header_get_fw_version(struct cs_dsp_mock_xm_header *header);
+void cs_dsp_mock_xm_header_drop_from_regmap_cache(struct cs_dsp_test *priv);
+int cs_dsp_mock_xm_header_write_to_regmap(struct cs_dsp_mock_xm_header *header);
+struct cs_dsp_mock_xm_header *cs_dsp_create_mock_xm_header(struct cs_dsp_test *priv,
+ const struct cs_dsp_mock_alg_def *algs,
+ size_t num_algs);
+
+int cs_dsp_mock_regmap_init(struct cs_dsp_test *priv);
+void cs_dsp_mock_regmap_drop_range(struct cs_dsp_test *priv,
+ unsigned int first_reg, unsigned int last_reg);
+void cs_dsp_mock_regmap_drop_regs(struct cs_dsp_test *priv,
+ unsigned int first_reg, size_t num_regs);
+void cs_dsp_mock_regmap_drop_bytes(struct cs_dsp_test *priv,
+ unsigned int first_reg, size_t num_bytes);
+void cs_dsp_mock_regmap_drop_system_regs(struct cs_dsp_test *priv);
+bool cs_dsp_mock_regmap_is_dirty(struct cs_dsp_test *priv, bool drop_system_regs);
+
+struct cs_dsp_mock_bin_builder *cs_dsp_mock_bin_init(struct cs_dsp_test *priv,
+ int format_version,
+ unsigned int fw_version);
+void cs_dsp_mock_bin_add_raw_block(struct cs_dsp_mock_bin_builder *builder,
+ unsigned int alg_id, unsigned int alg_ver,
+ int type, unsigned int offset,
+ const void *payload_data, size_t payload_len_bytes);
+void cs_dsp_mock_bin_add_info(struct cs_dsp_mock_bin_builder *builder,
+ const char *info);
+void cs_dsp_mock_bin_add_name(struct cs_dsp_mock_bin_builder *builder,
+ const char *name);
+void cs_dsp_mock_bin_add_patch(struct cs_dsp_mock_bin_builder *builder,
+ unsigned int alg_id, unsigned int alg_ver,
+ int mem_region, unsigned int reg_addr_offset,
+ const void *payload_data, size_t payload_len_bytes);
+struct firmware *cs_dsp_mock_bin_get_firmware(struct cs_dsp_mock_bin_builder *builder);
+
+struct cs_dsp_mock_wmfw_builder *cs_dsp_mock_wmfw_init(struct cs_dsp_test *priv,
+ int format_version);
+void cs_dsp_mock_wmfw_add_raw_block(struct cs_dsp_mock_wmfw_builder *builder,
+ int mem_region, unsigned int mem_offset_dsp_words,
+ const void *payload_data, size_t payload_len_bytes);
+void cs_dsp_mock_wmfw_add_info(struct cs_dsp_mock_wmfw_builder *builder,
+ const char *info);
+void cs_dsp_mock_wmfw_add_data_block(struct cs_dsp_mock_wmfw_builder *builder,
+ int mem_region, unsigned int mem_offset_dsp_words,
+ const void *payload_data, size_t payload_len_bytes);
+void cs_dsp_mock_wmfw_start_alg_info_block(struct cs_dsp_mock_wmfw_builder *builder,
+ unsigned int alg_id,
+ const char *name,
+ const char *description);
+void cs_dsp_mock_wmfw_add_coeff_desc(struct cs_dsp_mock_wmfw_builder *builder,
+ const struct cs_dsp_mock_coeff_def *def);
+void cs_dsp_mock_wmfw_end_alg_info_block(struct cs_dsp_mock_wmfw_builder *builder);
+struct firmware *cs_dsp_mock_wmfw_get_firmware(struct cs_dsp_mock_wmfw_builder *builder);
+int cs_dsp_mock_wmfw_format_version(struct cs_dsp_mock_wmfw_builder *builder);
diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h
index 0d99bf11d260..e4ce1cae03bf 100644
--- a/include/linux/fortify-string.h
+++ b/include/linux/fortify-string.h
@@ -616,6 +616,12 @@ __FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size,
return false;
}
+/*
+ * To work around what seems to be an optimizer bug, the macro arguments
+ * need to have const copies or the values end up changed by the time they
+ * reach fortify_warn_once(). See commit 6f7630b1b5bc ("fortify: Capture
+ * __bos() results in const temp vars") for more details.
+ */
#define __fortify_memcpy_chk(p, q, size, p_size, q_size, \
p_size_field, q_size_field, op) ({ \
const size_t __fortify_size = (size_t)(size); \
@@ -623,6 +629,8 @@ __FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size,
const size_t __q_size = (q_size); \
const size_t __p_size_field = (p_size_field); \
const size_t __q_size_field = (q_size_field); \
+ /* Keep a mutable version of the size for the final copy. */ \
+ size_t __copy_size = __fortify_size; \
fortify_warn_once(fortify_memcpy_chk(__fortify_size, __p_size, \
__q_size, __p_size_field, \
__q_size_field, FORTIFY_FUNC_ ##op), \
@@ -630,7 +638,11 @@ __FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size,
__fortify_size, \
"field \"" #p "\" at " FILE_LINE, \
__p_size_field); \
- __underlying_##op(p, q, __fortify_size); \
+ /* Hide only the run-time size from value range tracking to */ \
+ /* silence compile-time false positive bounds warnings. */ \
+ if (!__builtin_constant_p(__copy_size)) \
+ OPTIMIZER_HIDE_VAR(__copy_size); \
+ __underlying_##op(p, q, __copy_size); \
})
/*
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 6e452bd8e7e3..5c6bea81a90e 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -224,7 +224,13 @@ static inline
struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
unsigned long vaddr)
{
- return vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr);
+ struct folio *folio;
+
+ folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr);
+ if (folio && user_alloc_needs_zeroing())
+ clear_user_highpage(&folio->page, vaddr);
+
+ return folio;
}
#endif
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 22c22fb91042..02a226bcf0ed 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -1559,6 +1559,7 @@ struct hv_util_service {
void *channel;
void (*util_cb)(void *);
int (*util_init)(struct hv_util_service *);
+ int (*util_init_transport)(void);
void (*util_deinit)(void);
int (*util_pre_suspend)(void);
int (*util_pre_resume)(void);
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index c1645c86eed9..d65b5d71b93b 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -585,13 +585,16 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
* vlan_get_protocol - get protocol EtherType.
* @skb: skbuff to query
* @type: first vlan protocol
+ * @mac_offset: MAC offset
* @depth: buffer to store length of eth and vlan tags in bytes
*
* Returns the EtherType of the packet, regardless of whether it is
* vlan encapsulated (normal or hardware accelerated) or not.
*/
-static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
- int *depth)
+static inline __be16 __vlan_get_protocol_offset(const struct sk_buff *skb,
+ __be16 type,
+ int mac_offset,
+ int *depth)
{
unsigned int vlan_depth = skb->mac_len, parse_depth = VLAN_MAX_DEPTH;
@@ -610,7 +613,8 @@ static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
do {
struct vlan_hdr vhdr, *vh;
- vh = skb_header_pointer(skb, vlan_depth, sizeof(vhdr), &vhdr);
+ vh = skb_header_pointer(skb, mac_offset + vlan_depth,
+ sizeof(vhdr), &vhdr);
if (unlikely(!vh || !--parse_depth))
return 0;
@@ -625,6 +629,12 @@ static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
return type;
}
+static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
+ int *depth)
+{
+ return __vlan_get_protocol_offset(skb, type, 0, depth);
+}
+
/**
* vlan_get_protocol - get protocol EtherType.
* @skb: skbuff to query
diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
index e123d5e17b52..85fe4e6b275c 100644
--- a/include/linux/io_uring.h
+++ b/include/linux/io_uring.h
@@ -15,10 +15,8 @@ bool io_is_uring_fops(struct file *file);
static inline void io_uring_files_cancel(void)
{
- if (current->io_uring) {
- io_uring_unreg_ringfd();
+ if (current->io_uring)
__io_uring_cancel(false);
- }
}
static inline void io_uring_task_cancel(void)
{
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 011860ade268..fd4cdb0860a2 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -345,7 +345,7 @@ struct io_ring_ctx {
/* timeouts */
struct {
- spinlock_t timeout_lock;
+ raw_spinlock_t timeout_lock;
struct list_head timeout_list;
struct list_head ltimeout_list;
unsigned cq_last_tm_flush;
diff --git a/include/linux/memfd.h b/include/linux/memfd.h
index 3f2cf339ceaf..d437e3070850 100644
--- a/include/linux/memfd.h
+++ b/include/linux/memfd.h
@@ -7,6 +7,7 @@
#ifdef CONFIG_MEMFD_CREATE
extern long memfd_fcntl(struct file *file, unsigned int cmd, unsigned int arg);
struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx);
+unsigned int *memfd_file_seals_ptr(struct file *file);
#else
static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned int a)
{
@@ -16,6 +17,19 @@ static inline struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx)
{
return ERR_PTR(-EINVAL);
}
+
+static inline unsigned int *memfd_file_seals_ptr(struct file *file)
+{
+ return NULL;
+}
#endif
+/* Retrieve memfd seals associated with the file, if any. */
+static inline unsigned int memfd_file_seals(struct file *file)
+{
+ unsigned int *sealsp = memfd_file_seals_ptr(file);
+
+ return sealsp ? *sealsp : 0;
+}
+
#endif /* __LINUX_MEMFD_H */
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index fc7e6153b73d..ea48eb879a0f 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -524,6 +524,7 @@ enum {
* creation/deletion on drivers rescan. Unset during device attach.
*/
MLX5_PRIV_FLAGS_DETACH = 1 << 2,
+ MLX5_PRIV_FLAGS_SWITCH_LEGACY = 1 << 3,
};
struct mlx5_adev {
@@ -1202,6 +1203,12 @@ static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev)
return dev->coredev_type == MLX5_COREDEV_VF;
}
+static inline bool mlx5_core_same_coredev_type(const struct mlx5_core_dev *dev1,
+ const struct mlx5_core_dev *dev2)
+{
+ return dev1->coredev_type == dev2->coredev_type;
+}
+
static inline bool mlx5_core_is_ecpf(const struct mlx5_core_dev *dev)
{
return dev->caps.embedded_cpu;
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 4fbbcf35498b..48d47181c7cd 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -2119,7 +2119,9 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 migration_in_chunks[0x1];
u8 reserved_at_d1[0x1];
u8 sf_eq_usage[0x1];
- u8 reserved_at_d3[0xd];
+ u8 reserved_at_d3[0x5];
+ u8 multiplane[0x1];
+ u8 reserved_at_d9[0x7];
u8 cross_vhca_object_to_object_supported[0x20];
diff --git a/include/linux/mm.h b/include/linux/mm.h
index c39c4945946c..b1c3db9cf355 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -31,6 +31,7 @@
#include <linux/kasan.h>
#include <linux/memremap.h>
#include <linux/slab.h>
+#include <linux/cacheinfo.h>
struct mempolicy;
struct anon_vma;
@@ -3010,7 +3011,15 @@ static inline void pagetable_pte_dtor(struct ptdesc *ptdesc)
lruvec_stat_sub_folio(folio, NR_PAGETABLE);
}
-pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp);
+pte_t *___pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp);
+static inline pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr,
+ pmd_t *pmdvalp)
+{
+ pte_t *pte;
+
+ __cond_lock(RCU, pte = ___pte_offset_map(pmd, addr, pmdvalp));
+ return pte;
+}
static inline pte_t *pte_offset_map(pmd_t *pmd, unsigned long addr)
{
return __pte_offset_map(pmd, addr, NULL);
@@ -3023,7 +3032,8 @@ static inline pte_t *pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
{
pte_t *pte;
- __cond_lock(*ptlp, pte = __pte_offset_map_lock(mm, pmd, addr, ptlp));
+ __cond_lock(RCU, __cond_lock(*ptlp,
+ pte = __pte_offset_map_lock(mm, pmd, addr, ptlp)));
return pte;
}
@@ -3115,6 +3125,7 @@ static inline bool pagetable_pmd_ctor(struct ptdesc *ptdesc)
if (!pmd_ptlock_init(ptdesc))
return false;
__folio_set_pgtable(folio);
+ ptdesc_pmd_pts_init(ptdesc);
lruvec_stat_add_folio(folio, NR_PAGETABLE);
return true;
}
@@ -4091,6 +4102,37 @@ void mem_dump_obj(void *object);
static inline void mem_dump_obj(void *object) {}
#endif
+static inline bool is_write_sealed(int seals)
+{
+ return seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE);
+}
+
+/**
+ * is_readonly_sealed - Checks whether write-sealed but mapped read-only,
+ * in which case writes should be disallowing moving
+ * forwards.
+ * @seals: the seals to check
+ * @vm_flags: the VMA flags to check
+ *
+ * Returns whether readonly sealed, in which case writess should be disallowed
+ * going forward.
+ */
+static inline bool is_readonly_sealed(int seals, vm_flags_t vm_flags)
+{
+ /*
+ * Since an F_SEAL_[FUTURE_]WRITE sealed memfd can be mapped as
+ * MAP_SHARED and read-only, take care to not allow mprotect to
+ * revert protections on such mappings. Do this only for shared
+ * mappings. For private mappings, don't need to mask
+ * VM_MAYWRITE as we still want them to be COW-writable.
+ */
+ if (is_write_sealed(seals) &&
+ ((vm_flags & (VM_SHARED | VM_WRITE)) == VM_SHARED))
+ return true;
+
+ return false;
+}
+
/**
* seal_check_write - Check for F_SEAL_WRITE or F_SEAL_FUTURE_WRITE flags and
* handle them.
@@ -4102,24 +4144,15 @@ static inline void mem_dump_obj(void *object) {}
*/
static inline int seal_check_write(int seals, struct vm_area_struct *vma)
{
- if (seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
- /*
- * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
- * write seals are active.
- */
- if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
- return -EPERM;
-
- /*
- * Since an F_SEAL_[FUTURE_]WRITE sealed memfd can be mapped as
- * MAP_SHARED and read-only, take care to not allow mprotect to
- * revert protections on such mappings. Do this only for shared
- * mappings. For private mappings, don't need to mask
- * VM_MAYWRITE as we still want them to be COW-writable.
- */
- if (vma->vm_flags & VM_SHARED)
- vm_flags_clear(vma, VM_MAYWRITE);
- }
+ if (!is_write_sealed(seals))
+ return 0;
+
+ /*
+ * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
+ * write seals are active.
+ */
+ if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
+ return -EPERM;
return 0;
}
@@ -4175,6 +4208,23 @@ static inline int do_mseal(unsigned long start, size_t len_in, unsigned long fla
}
#endif
+/*
+ * user_alloc_needs_zeroing checks if a user folio from page allocator needs to
+ * be zeroed or not.
+ */
+static inline bool user_alloc_needs_zeroing(void)
+{
+ /*
+ * for user folios, arch with cache aliasing requires cache flush and
+ * arc changes folio->flags to make icache coherent with dcache, so
+ * always return false to make caller use
+ * clear_user_page()/clear_user_highpage().
+ */
+ return cpu_dcache_is_aliasing() || cpu_icache_is_aliasing() ||
+ !static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
+ &init_on_alloc);
+}
+
int arch_get_shadow_stack_status(struct task_struct *t, unsigned long __user *status);
int arch_set_shadow_stack_status(struct task_struct *t, unsigned long status);
int arch_lock_shadow_stack_status(struct task_struct *t, unsigned long status);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 7361a8f3ab68..332cee285662 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -445,6 +445,7 @@ FOLIO_MATCH(compound_head, _head_2a);
* @pt_index: Used for s390 gmap.
* @pt_mm: Used for x86 pgds.
* @pt_frag_refcount: For fragmented page table tracking. Powerpc only.
+ * @pt_share_count: Used for HugeTLB PMD page table share count.
* @_pt_pad_2: Padding to ensure proper alignment.
* @ptl: Lock for the page table.
* @__page_type: Same as page->page_type. Unused for page tables.
@@ -471,6 +472,9 @@ struct ptdesc {
pgoff_t pt_index;
struct mm_struct *pt_mm;
atomic_t pt_frag_refcount;
+#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
+ atomic_t pt_share_count;
+#endif
};
union {
@@ -516,6 +520,32 @@ static_assert(sizeof(struct ptdesc) <= sizeof(struct page));
const struct page *: (const struct ptdesc *)(p), \
struct page *: (struct ptdesc *)(p)))
+#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
+static inline void ptdesc_pmd_pts_init(struct ptdesc *ptdesc)
+{
+ atomic_set(&ptdesc->pt_share_count, 0);
+}
+
+static inline void ptdesc_pmd_pts_inc(struct ptdesc *ptdesc)
+{
+ atomic_inc(&ptdesc->pt_share_count);
+}
+
+static inline void ptdesc_pmd_pts_dec(struct ptdesc *ptdesc)
+{
+ atomic_dec(&ptdesc->pt_share_count);
+}
+
+static inline int ptdesc_pmd_pts_count(struct ptdesc *ptdesc)
+{
+ return atomic_read(&ptdesc->pt_share_count);
+}
+#else
+static inline void ptdesc_pmd_pts_init(struct ptdesc *ptdesc)
+{
+}
+#endif
+
/*
* Used for sizing the vmemmap region on some architectures
*/
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index cf46ac720802..691506bdf2c5 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -862,18 +862,10 @@ static inline void ClearPageCompound(struct page *page)
ClearPageHead(page);
}
FOLIO_FLAG(large_rmappable, FOLIO_SECOND_PAGE)
-FOLIO_TEST_FLAG(partially_mapped, FOLIO_SECOND_PAGE)
-/*
- * PG_partially_mapped is protected by deferred_split split_queue_lock,
- * so its safe to use non-atomic set/clear.
- */
-__FOLIO_SET_FLAG(partially_mapped, FOLIO_SECOND_PAGE)
-__FOLIO_CLEAR_FLAG(partially_mapped, FOLIO_SECOND_PAGE)
+FOLIO_FLAG(partially_mapped, FOLIO_SECOND_PAGE)
#else
FOLIO_FLAG_FALSE(large_rmappable)
-FOLIO_TEST_FLAG_FALSE(partially_mapped)
-__FOLIO_SET_FLAG_NOOP(partially_mapped)
-__FOLIO_CLEAR_FLAG_NOOP(partially_mapped)
+FOLIO_FLAG_FALSE(partially_mapped)
#endif
#define PG_head_mask ((1UL << PG_head))
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 35842d1e3879..5b520fe86b60 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -221,10 +221,7 @@ do { \
} while (0)
#define PERCPU_PTR(__p) \
-({ \
- unsigned long __pcpu_ptr = (__force unsigned long)(__p); \
- (typeof(*(__p)) __force __kernel *)(__pcpu_ptr); \
-})
+ (typeof(*(__p)) __force __kernel *)((__force unsigned long)(__p))
#ifdef CONFIG_SMP
diff --git a/include/linux/platform_data/amd_qdma.h b/include/linux/platform_data/amd_qdma.h
index 576d952f97ed..967a6ef31cf9 100644
--- a/include/linux/platform_data/amd_qdma.h
+++ b/include/linux/platform_data/amd_qdma.h
@@ -26,11 +26,13 @@ struct dma_slave_map;
* @max_mm_channels: Maximum number of MM DMA channels in each direction
* @device_map: DMA slave map
* @irq_index: The index of first IRQ
+ * @dma_dev: The device pointer for dma operations
*/
struct qdma_platdata {
u32 max_mm_channels;
u32 irq_index;
struct dma_slave_map *device_map;
+ struct device *dma_dev;
};
#endif /* _PLATDATA_AMD_QDMA_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 66b311fbd5d6..64934e0830af 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1637,8 +1637,9 @@ static inline unsigned int __task_state_index(unsigned int tsk_state,
* We're lying here, but rather than expose a completely new task state
* to userspace, we can make this appear as if the task has gone through
* a regular rt_mutex_lock() call.
+ * Report frozen tasks as uninterruptible.
*/
- if (tsk_state & TASK_RTLOCK_WAIT)
+ if ((tsk_state & TASK_RTLOCK_WAIT) || (tsk_state & TASK_FROZEN))
state = TASK_UNINTERRUPTIBLE;
return fls(state);
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index d9b03e0746e7..2cbe0c22a32f 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -317,17 +317,22 @@ static inline void sock_drop(struct sock *sk, struct sk_buff *skb)
kfree_skb(skb);
}
-static inline void sk_psock_queue_msg(struct sk_psock *psock,
+static inline bool sk_psock_queue_msg(struct sk_psock *psock,
struct sk_msg *msg)
{
+ bool ret;
+
spin_lock_bh(&psock->ingress_lock);
- if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
+ if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
list_add_tail(&msg->list, &psock->ingress_msg);
- else {
+ ret = true;
+ } else {
sk_msg_free(psock->sk, msg);
kfree(msg);
+ ret = false;
}
spin_unlock_bh(&psock->ingress_lock);
+ return ret;
}
static inline struct sk_msg *sk_psock_dequeue_msg(struct sk_psock *psock)
diff --git a/include/linux/static_call.h b/include/linux/static_call.h
index 141e6b176a1b..78a77a4ae0ea 100644
--- a/include/linux/static_call.h
+++ b/include/linux/static_call.h
@@ -160,6 +160,8 @@ extern void arch_static_call_transform(void *site, void *tramp, void *func, bool
#ifdef CONFIG_HAVE_STATIC_CALL_INLINE
+extern int static_call_initialized;
+
extern int __init static_call_init(void);
extern void static_call_force_reinit(void);
@@ -225,6 +227,8 @@ extern long __static_call_return0(void);
#elif defined(CONFIG_HAVE_STATIC_CALL)
+#define static_call_initialized 0
+
static inline int static_call_init(void) { return 0; }
#define DEFINE_STATIC_CALL(name, _func) \
@@ -281,6 +285,8 @@ extern long __static_call_return0(void);
#else /* Generic implementation */
+#define static_call_initialized 0
+
static inline int static_call_init(void) { return 0; }
static inline long __static_call_return0(void)
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 2a5df5b62cfc..58ad4ead33fc 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -273,7 +273,8 @@ struct trace_event_fields {
const char *name;
const int size;
const int align;
- const int is_signed;
+ const unsigned int is_signed:1;
+ unsigned int needs_test:1;
const int filter_type;
const int len;
};
@@ -324,6 +325,7 @@ enum {
TRACE_EVENT_FL_EPROBE_BIT,
TRACE_EVENT_FL_FPROBE_BIT,
TRACE_EVENT_FL_CUSTOM_BIT,
+ TRACE_EVENT_FL_TEST_STR_BIT,
};
/*
@@ -340,6 +342,7 @@ enum {
* CUSTOM - Event is a custom event (to be attached to an exsiting tracepoint)
* This is set when the custom event has not been attached
* to a tracepoint yet, then it is cleared when it is.
+ * TEST_STR - The event has a "%s" that points to a string outside the event
*/
enum {
TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
@@ -352,6 +355,7 @@ enum {
TRACE_EVENT_FL_EPROBE = (1 << TRACE_EVENT_FL_EPROBE_BIT),
TRACE_EVENT_FL_FPROBE = (1 << TRACE_EVENT_FL_FPROBE_BIT),
TRACE_EVENT_FL_CUSTOM = (1 << TRACE_EVENT_FL_CUSTOM_BIT),
+ TRACE_EVENT_FL_TEST_STR = (1 << TRACE_EVENT_FL_TEST_STR_BIT),
};
#define TRACE_EVENT_FL_UKPROBE (TRACE_EVENT_FL_KPROBE | TRACE_EVENT_FL_UPROBE)
@@ -360,7 +364,7 @@ struct trace_event_call {
struct list_head list;
struct trace_event_class *class;
union {
- char *name;
+ const char *name;
/* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */
struct tracepoint *tp;
};
diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
index a54046bf37e5..939ceabcaf06 100644
--- a/include/linux/vermagic.h
+++ b/include/linux/vermagic.h
@@ -15,10 +15,10 @@
#else
#define MODULE_VERMAGIC_SMP ""
#endif
-#ifdef CONFIG_PREEMPT_BUILD
-#define MODULE_VERMAGIC_PREEMPT "preempt "
-#elif defined(CONFIG_PREEMPT_RT)
+#ifdef CONFIG_PREEMPT_RT
#define MODULE_VERMAGIC_PREEMPT "preempt_rt "
+#elif defined(CONFIG_PREEMPT_BUILD)
+#define MODULE_VERMAGIC_PREEMPT "preempt "
#else
#define MODULE_VERMAGIC_PREEMPT ""
#endif
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index d2761bf8ff32..9f3a04345b86 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -515,7 +515,7 @@ static inline const char *node_stat_name(enum node_stat_item item)
static inline const char *lru_list_name(enum lru_list lru)
{
- return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_"
+ return node_stat_name(NR_LRU_BASE + (enum node_stat_item)lru) + 3; // skip "nr_"
}
#if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 4afa64c81304..0027beca5cd5 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -733,15 +733,18 @@ struct nft_set_ext_tmpl {
/**
* struct nft_set_ext - set extensions
*
- * @genmask: generation mask
+ * @genmask: generation mask, but also flags (see NFT_SET_ELEM_DEAD_BIT)
* @offset: offsets of individual extension types
* @data: beginning of extension data
+ *
+ * This structure must be aligned to word size, otherwise atomic bitops
+ * on genmask field can cause alignment failure on some archs.
*/
struct nft_set_ext {
u8 genmask;
u8 offset[NFT_SET_EXT_NUM];
char data[];
-};
+} __aligned(BITS_PER_LONG / 8);
static inline void nft_set_ext_prepare(struct nft_set_ext_tmpl *tmpl)
{
diff --git a/include/net/sock.h b/include/net/sock.h
index 7464e9f9f47c..c383126f691d 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1527,7 +1527,7 @@ static inline bool sk_wmem_schedule(struct sock *sk, int size)
}
static inline bool
-sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size)
+__sk_rmem_schedule(struct sock *sk, int size, bool pfmemalloc)
{
int delta;
@@ -1535,7 +1535,13 @@ sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size)
return true;
delta = size - sk->sk_forward_alloc;
return delta <= 0 || __sk_mem_schedule(sk, delta, SK_MEM_RECV) ||
- skb_pfmemalloc(skb);
+ pfmemalloc;
+}
+
+static inline bool
+sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size)
+{
+ return __sk_rmem_schedule(sk, size, skb_pfmemalloc(skb));
}
static inline int sk_unused_reserved_mem(const struct sock *sk)
diff --git a/include/sound/sdca.h b/include/sound/sdca.h
index 7e138229e8f3..973252d0adac 100644
--- a/include/sound/sdca.h
+++ b/include/sound/sdca.h
@@ -9,6 +9,9 @@
#ifndef __SDCA_H__
#define __SDCA_H__
+#include <linux/types.h>
+#include <linux/kconfig.h>
+
struct sdw_slave;
#define SDCA_MAX_FUNCTION_COUNT 8
@@ -20,9 +23,9 @@ struct sdw_slave;
* @name: human-readable string
*/
struct sdca_function_desc {
- u64 adr;
- u32 type;
const char *name;
+ u32 type;
+ u8 adr;
};
/**
diff --git a/include/sound/sdca_function.h b/include/sound/sdca_function.h
index a01eec86b9a6..89e42db6d591 100644
--- a/include/sound/sdca_function.h
+++ b/include/sound/sdca_function.h
@@ -9,6 +9,8 @@
#ifndef __SDCA_FUNCTION_H__
#define __SDCA_FUNCTION_H__
+#include <linux/bits.h>
+
/*
* SDCA Function Types from SDCA specification v1.0a Section 5.1.2
* all Function types not described are reserved
@@ -40,6 +42,7 @@ enum sdca_function_type {
#define SDCA_FUNCTION_TYPE_RJ_NAME "RJ"
#define SDCA_FUNCTION_TYPE_SIMPLE_NAME "SimpleJack"
#define SDCA_FUNCTION_TYPE_HID_NAME "HID"
+#define SDCA_FUNCTION_TYPE_IMP_DEF_NAME "ImplementationDefined"
enum sdca_entity0_controls {
SDCA_CONTROL_ENTITY_0_COMMIT_GROUP_MASK = 0x01,
diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h
index 36a485571142..892f70532363 100644
--- a/include/sound/simple_card_utils.h
+++ b/include/sound/simple_card_utils.h
@@ -271,9 +271,13 @@ static inline void simple_util_debug_info(struct simple_util_priv *priv)
simple_util_debug_dai(priv, "codec", dai);
if (link->name)
- dev_dbg(dev, "dai name = %s\n", link->name);
+ dev_dbg(dev, "link name = %s\n", link->name);
if (link->dai_fmt)
- dev_dbg(dev, "dai format = %04x\n", link->dai_fmt);
+ dev_dbg(dev, "link format = %04x\n", link->dai_fmt);
+ if (link->playback_only)
+ dev_dbg(dev, "link has playback_only");
+ if (link->capture_only)
+ dev_dbg(dev, "link has capture_only");
if (props->adata.convert_rate)
dev_dbg(dev, "convert_rate = %d\n", props->adata.convert_rate);
if (props->adata.convert_channels)
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index aab57c19f62b..a11501752637 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -193,6 +193,9 @@ int snd_soc_dai_set_channel_map(struct snd_soc_dai *dai,
int snd_soc_dai_set_tristate(struct snd_soc_dai *dai, int tristate);
+int snd_soc_dai_prepare(struct snd_soc_dai *dai,
+ struct snd_pcm_substream *substream);
+
/* Digital Audio Interface mute */
int snd_soc_dai_digital_mute(struct snd_soc_dai *dai, int mute,
int direction);
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 4f5d411e3823..fcdb5adfcd5e 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -681,6 +681,17 @@ struct snd_soc_dai_link_component {
struct device_node *of_node;
const char *dai_name;
const struct of_phandle_args *dai_args;
+
+ /*
+ * Extra format = SND_SOC_DAIFMT_Bx_Fx
+ *
+ * [Note] it is Bx_Fx base, not CBx_CFx
+ *
+ * It will be used with dai_link->dai_fmt
+ * see
+ * snd_soc_runtime_set_dai_fmt()
+ */
+ unsigned int ext_fmt;
};
/*
@@ -1118,7 +1129,6 @@ struct snd_soc_card {
unsigned int instantiated:1;
unsigned int topology_shortname_created:1;
unsigned int fully_routed:1;
- unsigned int disable_route_checks:1;
unsigned int probed:1;
unsigned int component_chaining:1;
diff --git a/include/sound/soc_sdw_utils.h b/include/sound/soc_sdw_utils.h
index 0e82598e10af..36a4a1e1d8ca 100644
--- a/include/sound/soc_sdw_utils.h
+++ b/include/sound/soc_sdw_utils.h
@@ -224,6 +224,8 @@ int asoc_sdw_cs_amp_init(struct snd_soc_card *card,
struct snd_soc_dai_link *dai_links,
struct asoc_sdw_codec_info *info,
bool playback);
+int asoc_sdw_cs_spk_feedback_rtd_init(struct snd_soc_pcm_runtime *rtd,
+ struct snd_soc_dai *dai);
/* MAXIM codec support */
int asoc_sdw_maxim_init(struct snd_soc_card *card,
diff --git a/include/uapi/linux/mptcp_pm.h b/include/uapi/linux/mptcp_pm.h
index 50589e5dd6a3..84fa8a21dfd0 100644
--- a/include/uapi/linux/mptcp_pm.h
+++ b/include/uapi/linux/mptcp_pm.h
@@ -12,31 +12,33 @@
/**
* enum mptcp_event_type
* @MPTCP_EVENT_UNSPEC: unused event
- * @MPTCP_EVENT_CREATED: token, family, saddr4 | saddr6, daddr4 | daddr6,
- * sport, dport A new MPTCP connection has been created. It is the good time
- * to allocate memory and send ADD_ADDR if needed. Depending on the
+ * @MPTCP_EVENT_CREATED: A new MPTCP connection has been created. It is the
+ * good time to allocate memory and send ADD_ADDR if needed. Depending on the
* traffic-patterns it can take a long time until the MPTCP_EVENT_ESTABLISHED
- * is sent.
- * @MPTCP_EVENT_ESTABLISHED: token, family, saddr4 | saddr6, daddr4 | daddr6,
- * sport, dport A MPTCP connection is established (can start new subflows).
- * @MPTCP_EVENT_CLOSED: token A MPTCP connection has stopped.
- * @MPTCP_EVENT_ANNOUNCED: token, rem_id, family, daddr4 | daddr6 [, dport] A
- * new address has been announced by the peer.
- * @MPTCP_EVENT_REMOVED: token, rem_id An address has been lost by the peer.
- * @MPTCP_EVENT_SUB_ESTABLISHED: token, family, loc_id, rem_id, saddr4 |
- * saddr6, daddr4 | daddr6, sport, dport, backup, if_idx [, error] A new
- * subflow has been established. 'error' should not be set.
- * @MPTCP_EVENT_SUB_CLOSED: token, family, loc_id, rem_id, saddr4 | saddr6,
- * daddr4 | daddr6, sport, dport, backup, if_idx [, error] A subflow has been
- * closed. An error (copy of sk_err) could be set if an error has been
- * detected for this subflow.
- * @MPTCP_EVENT_SUB_PRIORITY: token, family, loc_id, rem_id, saddr4 | saddr6,
- * daddr4 | daddr6, sport, dport, backup, if_idx [, error] The priority of a
- * subflow has changed. 'error' should not be set.
- * @MPTCP_EVENT_LISTENER_CREATED: family, sport, saddr4 | saddr6 A new PM
- * listener is created.
- * @MPTCP_EVENT_LISTENER_CLOSED: family, sport, saddr4 | saddr6 A PM listener
- * is closed.
+ * is sent. Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6,
+ * sport, dport, server-side.
+ * @MPTCP_EVENT_ESTABLISHED: A MPTCP connection is established (can start new
+ * subflows). Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6,
+ * sport, dport, server-side.
+ * @MPTCP_EVENT_CLOSED: A MPTCP connection has stopped. Attribute: token.
+ * @MPTCP_EVENT_ANNOUNCED: A new address has been announced by the peer.
+ * Attributes: token, rem_id, family, daddr4 | daddr6 [, dport].
+ * @MPTCP_EVENT_REMOVED: An address has been lost by the peer. Attributes:
+ * token, rem_id.
+ * @MPTCP_EVENT_SUB_ESTABLISHED: A new subflow has been established. 'error'
+ * should not be set. Attributes: token, family, loc_id, rem_id, saddr4 |
+ * saddr6, daddr4 | daddr6, sport, dport, backup, if_idx [, error].
+ * @MPTCP_EVENT_SUB_CLOSED: A subflow has been closed. An error (copy of
+ * sk_err) could be set if an error has been detected for this subflow.
+ * Attributes: token, family, loc_id, rem_id, saddr4 | saddr6, daddr4 |
+ * daddr6, sport, dport, backup, if_idx [, error].
+ * @MPTCP_EVENT_SUB_PRIORITY: The priority of a subflow has changed. 'error'
+ * should not be set. Attributes: token, family, loc_id, rem_id, saddr4 |
+ * saddr6, daddr4 | daddr6, sport, dport, backup, if_idx [, error].
+ * @MPTCP_EVENT_LISTENER_CREATED: A new PM listener is created. Attributes:
+ * family, sport, saddr4 | saddr6.
+ * @MPTCP_EVENT_LISTENER_CLOSED: A PM listener is closed. Attributes: family,
+ * sport, saddr4 | saddr6.
*/
enum mptcp_event_type {
MPTCP_EVENT_UNSPEC,
diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h
index 58154117d9b0..a6fce46aeb37 100644
--- a/include/uapi/linux/stddef.h
+++ b/include/uapi/linux/stddef.h
@@ -8,6 +8,13 @@
#define __always_inline inline
#endif
+/* Not all C++ standards support type declarations inside an anonymous union */
+#ifndef __cplusplus
+#define __struct_group_tag(TAG) TAG
+#else
+#define __struct_group_tag(TAG)
+#endif
+
/**
* __struct_group() - Create a mirrored named and anonyomous struct
*
@@ -20,13 +27,13 @@
* and size: one anonymous and one named. The former's members can be used
* normally without sub-struct naming, and the latter can be used to
* reason about the start, end, and size of the group of struct members.
- * The named struct can also be explicitly tagged for layer reuse, as well
- * as both having struct attributes appended.
+ * The named struct can also be explicitly tagged for layer reuse (C only),
+ * as well as both having struct attributes appended.
*/
#define __struct_group(TAG, NAME, ATTRS, MEMBERS...) \
union { \
struct { MEMBERS } ATTRS; \
- struct TAG { MEMBERS } ATTRS NAME; \
+ struct __struct_group_tag(TAG) { MEMBERS } ATTRS NAME; \
} ATTRS
#ifdef __cplusplus
diff --git a/include/uapi/linux/thermal.h b/include/uapi/linux/thermal.h
index ba8604bdf206..349718c271eb 100644
--- a/include/uapi/linux/thermal.h
+++ b/include/uapi/linux/thermal.h
@@ -3,8 +3,8 @@
#define _UAPI_LINUX_THERMAL_H
#define THERMAL_NAME_LENGTH 20
-#define THERMAL_THRESHOLD_WAY_UP BIT(0)
-#define THERMAL_THRESHOLD_WAY_DOWN BIT(1)
+#define THERMAL_THRESHOLD_WAY_UP 0x1
+#define THERMAL_THRESHOLD_WAY_DOWN 0x2
enum thermal_device_mode {
THERMAL_DEVICE_DISABLED = 0,
diff --git a/include/uapi/sound/compress_params.h b/include/uapi/sound/compress_params.h
index ddc77322d571..bc7648a30746 100644
--- a/include/uapi/sound/compress_params.h
+++ b/include/uapi/sound/compress_params.h
@@ -334,6 +334,14 @@ union snd_codec_options {
struct snd_dec_wma wma_d;
struct snd_dec_alac alac_d;
struct snd_dec_ape ape_d;
+ struct {
+ __u32 out_sample_rate;
+ } src_d;
+} __attribute__((packed, aligned(4)));
+
+struct snd_codec_desc_src {
+ __u32 out_sample_rate_min;
+ __u32 out_sample_rate_max;
} __attribute__((packed, aligned(4)));
/** struct snd_codec_desc - description of codec capabilities
@@ -347,6 +355,9 @@ union snd_codec_options {
* @modes: Supported modes. See SND_AUDIOMODE defines
* @formats: Supported formats. See SND_AUDIOSTREAMFORMAT defines
* @min_buffer: Minimum buffer size handled by codec implementation
+ * @pcm_formats: Output (for decoders) or input (for encoders)
+ * PCM formats (required to accel mode, 0 for other modes)
+ * @u_space: union space (for codec dependent data)
* @reserved: reserved for future use
*
* This structure provides a scalar value for profiles, modes and stream
@@ -370,7 +381,12 @@ struct snd_codec_desc {
__u32 modes;
__u32 formats;
__u32 min_buffer;
- __u32 reserved[15];
+ __u32 pcm_formats;
+ union {
+ __u32 u_space[6];
+ struct snd_codec_desc_src src;
+ } __attribute__((packed, aligned(4)));
+ __u32 reserved[8];
} __attribute__((packed, aligned(4)));
/** struct snd_codec
@@ -395,6 +411,8 @@ struct snd_codec_desc {
* @align: Block alignment in bytes of an audio sample.
* Only required for PCM or IEC formats.
* @options: encoder-specific settings
+ * @pcm_format: Output (for decoders) or input (for encoders)
+ * PCM formats (required to accel mode, 0 for other modes)
* @reserved: reserved for future use
*/
@@ -411,7 +429,8 @@ struct snd_codec {
__u32 format;
__u32 align;
union snd_codec_options options;
- __u32 reserved[3];
+ __u32 pcm_format;
+ __u32 reserved[2];
} __attribute__((packed, aligned(4)));
#endif
diff --git a/include/uapi/sound/sof/tokens.h b/include/uapi/sound/sof/tokens.h
index 0a246bc218d3..c28c766270de 100644
--- a/include/uapi/sound/sof/tokens.h
+++ b/include/uapi/sound/sof/tokens.h
@@ -153,6 +153,8 @@
/* Stream */
#define SOF_TKN_STREAM_PLAYBACK_COMPATIBLE_D0I3 1200
#define SOF_TKN_STREAM_CAPTURE_COMPATIBLE_D0I3 1201
+#define SOF_TKN_STREAM_PLAYBACK_PAUSE_SUPPORTED 1202
+#define SOF_TKN_STREAM_CAPTURE_PAUSE_SUPPORTED 1203
/* Led control for mute switches */
#define SOF_TKN_MUTE_LED_USE 1300
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 06ff41484e29..d3403c8216db 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -215,9 +215,9 @@ bool io_match_task_safe(struct io_kiocb *head, struct io_uring_task *tctx,
struct io_ring_ctx *ctx = head->ctx;
/* protect against races with linked timeouts */
- spin_lock_irq(&ctx->timeout_lock);
+ raw_spin_lock_irq(&ctx->timeout_lock);
matched = io_match_linked(head);
- spin_unlock_irq(&ctx->timeout_lock);
+ raw_spin_unlock_irq(&ctx->timeout_lock);
} else {
matched = io_match_linked(head);
}
@@ -333,7 +333,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
init_waitqueue_head(&ctx->cq_wait);
init_waitqueue_head(&ctx->poll_wq);
spin_lock_init(&ctx->completion_lock);
- spin_lock_init(&ctx->timeout_lock);
+ raw_spin_lock_init(&ctx->timeout_lock);
INIT_WQ_LIST(&ctx->iopoll_list);
INIT_LIST_HEAD(&ctx->io_buffers_comp);
INIT_LIST_HEAD(&ctx->defer_list);
@@ -498,10 +498,10 @@ static void io_prep_async_link(struct io_kiocb *req)
if (req->flags & REQ_F_LINK_TIMEOUT) {
struct io_ring_ctx *ctx = req->ctx;
- spin_lock_irq(&ctx->timeout_lock);
+ raw_spin_lock_irq(&ctx->timeout_lock);
io_for_each_link(cur, req)
io_prep_async_work(cur);
- spin_unlock_irq(&ctx->timeout_lock);
+ raw_spin_unlock_irq(&ctx->timeout_lock);
} else {
io_for_each_link(cur, req)
io_prep_async_work(cur);
@@ -514,7 +514,11 @@ static void io_queue_iowq(struct io_kiocb *req)
struct io_uring_task *tctx = req->tctx;
BUG_ON(!tctx);
- BUG_ON(!tctx->io_wq);
+
+ if ((current->flags & PF_KTHREAD) || !tctx->io_wq) {
+ io_req_task_queue_fail(req, -ECANCELED);
+ return;
+ }
/* init ->work of the whole link before punting */
io_prep_async_link(req);
@@ -3214,6 +3218,7 @@ end_wait:
void __io_uring_cancel(bool cancel_all)
{
+ io_uring_unreg_ringfd();
io_uring_cancel_generic(cancel_all, NULL);
}
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
index d407576ddfb7..eec5eb7de843 100644
--- a/io_uring/kbuf.c
+++ b/io_uring/kbuf.c
@@ -139,6 +139,7 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
struct io_uring_buf_ring *br = bl->buf_ring;
__u16 tail, head = bl->head;
struct io_uring_buf *buf;
+ void __user *ret;
tail = smp_load_acquire(&br->tail);
if (unlikely(tail == head))
@@ -153,6 +154,7 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
req->flags |= REQ_F_BUFFER_RING | REQ_F_BUFFERS_COMMIT;
req->buf_list = bl;
req->buf_index = buf->bid;
+ ret = u64_to_user_ptr(buf->addr);
if (issue_flags & IO_URING_F_UNLOCKED || !io_file_can_poll(req)) {
/*
@@ -168,7 +170,7 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
io_kbuf_commit(req, bl, *len, 1);
req->buf_list = NULL;
}
- return u64_to_user_ptr(buf->addr);
+ return ret;
}
void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
diff --git a/io_uring/net.c b/io_uring/net.c
index df1f7dc6f1c8..c6cd38cc5dc4 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -754,6 +754,7 @@ static int io_recvmsg_prep_setup(struct io_kiocb *req)
if (req->opcode == IORING_OP_RECV) {
kmsg->msg.msg_name = NULL;
kmsg->msg.msg_namelen = 0;
+ kmsg->msg.msg_inq = 0;
kmsg->msg.msg_control = NULL;
kmsg->msg.msg_get_inq = 1;
kmsg->msg.msg_controllen = 0;
diff --git a/io_uring/register.c b/io_uring/register.c
index 1e99c783abdf..fdd44914c39c 100644
--- a/io_uring/register.c
+++ b/io_uring/register.c
@@ -414,6 +414,9 @@ static int io_register_resize_rings(struct io_ring_ctx *ctx, void __user *arg)
if (ctx->flags & IORING_SETUP_SINGLE_ISSUER &&
current != ctx->submitter_task)
return -EEXIST;
+ /* limited to DEFER_TASKRUN for now */
+ if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
+ return -EINVAL;
if (copy_from_user(&p, arg, sizeof(p)))
return -EFAULT;
if (p.flags & ~RESIZE_FLAGS)
diff --git a/io_uring/rw.c b/io_uring/rw.c
index 0bcb83e4ce3c..29bb3010f9c0 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -983,6 +983,8 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
io_kbuf_recycle(req, issue_flags);
if (ret < 0)
req_set_fail(req);
+ } else if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
+ cflags = io_put_kbuf(req, ret, issue_flags);
} else {
/*
* Any successful return value will keep the multishot read
diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c
index 6df5e649c413..9e5bd79fd2b5 100644
--- a/io_uring/sqpoll.c
+++ b/io_uring/sqpoll.c
@@ -405,6 +405,7 @@ void io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
__cold int io_sq_offload_create(struct io_ring_ctx *ctx,
struct io_uring_params *p)
{
+ struct task_struct *task_to_put = NULL;
int ret;
/* Retain compatibility with failing for an invalid attach attempt */
@@ -480,6 +481,7 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx,
}
sqd->thread = tsk;
+ task_to_put = get_task_struct(tsk);
ret = io_uring_alloc_task_context(tsk, ctx);
wake_up_new_task(tsk);
if (ret)
@@ -490,11 +492,15 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx,
goto err;
}
+ if (task_to_put)
+ put_task_struct(task_to_put);
return 0;
err_sqpoll:
complete(&ctx->sq_data->exited);
err:
io_sq_thread_finish(ctx);
+ if (task_to_put)
+ put_task_struct(task_to_put);
return ret;
}
diff --git a/io_uring/timeout.c b/io_uring/timeout.c
index f3d502717aeb..362689b17ccc 100644
--- a/io_uring/timeout.c
+++ b/io_uring/timeout.c
@@ -74,10 +74,10 @@ static void io_timeout_complete(struct io_kiocb *req, struct io_tw_state *ts)
if (!io_timeout_finish(timeout, data)) {
if (io_req_post_cqe(req, -ETIME, IORING_CQE_F_MORE)) {
/* re-arm timer */
- spin_lock_irq(&ctx->timeout_lock);
+ raw_spin_lock_irq(&ctx->timeout_lock);
list_add(&timeout->list, ctx->timeout_list.prev);
hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
- spin_unlock_irq(&ctx->timeout_lock);
+ raw_spin_unlock_irq(&ctx->timeout_lock);
return;
}
}
@@ -85,7 +85,27 @@ static void io_timeout_complete(struct io_kiocb *req, struct io_tw_state *ts)
io_req_task_complete(req, ts);
}
-static bool io_kill_timeout(struct io_kiocb *req, int status)
+static __cold bool io_flush_killed_timeouts(struct list_head *list, int err)
+{
+ if (list_empty(list))
+ return false;
+
+ while (!list_empty(list)) {
+ struct io_timeout *timeout;
+ struct io_kiocb *req;
+
+ timeout = list_first_entry(list, struct io_timeout, list);
+ list_del_init(&timeout->list);
+ req = cmd_to_io_kiocb(timeout);
+ if (err)
+ req_set_fail(req);
+ io_req_queue_tw_complete(req, err);
+ }
+
+ return true;
+}
+
+static void io_kill_timeout(struct io_kiocb *req, struct list_head *list)
__must_hold(&req->ctx->timeout_lock)
{
struct io_timeout_data *io = req->async_data;
@@ -93,23 +113,19 @@ static bool io_kill_timeout(struct io_kiocb *req, int status)
if (hrtimer_try_to_cancel(&io->timer) != -1) {
struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
- if (status)
- req_set_fail(req);
atomic_set(&req->ctx->cq_timeouts,
atomic_read(&req->ctx->cq_timeouts) + 1);
- list_del_init(&timeout->list);
- io_req_queue_tw_complete(req, status);
- return true;
+ list_move_tail(&timeout->list, list);
}
- return false;
}
__cold void io_flush_timeouts(struct io_ring_ctx *ctx)
{
- u32 seq;
struct io_timeout *timeout, *tmp;
+ LIST_HEAD(list);
+ u32 seq;
- spin_lock_irq(&ctx->timeout_lock);
+ raw_spin_lock_irq(&ctx->timeout_lock);
seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) {
@@ -131,10 +147,11 @@ __cold void io_flush_timeouts(struct io_ring_ctx *ctx)
if (events_got < events_needed)
break;
- io_kill_timeout(req, 0);
+ io_kill_timeout(req, &list);
}
ctx->cq_last_tm_flush = seq;
- spin_unlock_irq(&ctx->timeout_lock);
+ raw_spin_unlock_irq(&ctx->timeout_lock);
+ io_flush_killed_timeouts(&list, 0);
}
static void io_req_tw_fail_links(struct io_kiocb *link, struct io_tw_state *ts)
@@ -200,9 +217,9 @@ void io_disarm_next(struct io_kiocb *req)
} else if (req->flags & REQ_F_LINK_TIMEOUT) {
struct io_ring_ctx *ctx = req->ctx;
- spin_lock_irq(&ctx->timeout_lock);
+ raw_spin_lock_irq(&ctx->timeout_lock);
link = io_disarm_linked_timeout(req);
- spin_unlock_irq(&ctx->timeout_lock);
+ raw_spin_unlock_irq(&ctx->timeout_lock);
if (link)
io_req_queue_tw_complete(link, -ECANCELED);
}
@@ -238,11 +255,11 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
struct io_ring_ctx *ctx = req->ctx;
unsigned long flags;
- spin_lock_irqsave(&ctx->timeout_lock, flags);
+ raw_spin_lock_irqsave(&ctx->timeout_lock, flags);
list_del_init(&timeout->list);
atomic_set(&req->ctx->cq_timeouts,
atomic_read(&req->ctx->cq_timeouts) + 1);
- spin_unlock_irqrestore(&ctx->timeout_lock, flags);
+ raw_spin_unlock_irqrestore(&ctx->timeout_lock, flags);
if (!(data->flags & IORING_TIMEOUT_ETIME_SUCCESS))
req_set_fail(req);
@@ -285,9 +302,9 @@ int io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
{
struct io_kiocb *req;
- spin_lock_irq(&ctx->timeout_lock);
+ raw_spin_lock_irq(&ctx->timeout_lock);
req = io_timeout_extract(ctx, cd);
- spin_unlock_irq(&ctx->timeout_lock);
+ raw_spin_unlock_irq(&ctx->timeout_lock);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -330,7 +347,7 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
struct io_ring_ctx *ctx = req->ctx;
unsigned long flags;
- spin_lock_irqsave(&ctx->timeout_lock, flags);
+ raw_spin_lock_irqsave(&ctx->timeout_lock, flags);
prev = timeout->head;
timeout->head = NULL;
@@ -345,7 +362,7 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
}
list_del(&timeout->list);
timeout->prev = prev;
- spin_unlock_irqrestore(&ctx->timeout_lock, flags);
+ raw_spin_unlock_irqrestore(&ctx->timeout_lock, flags);
req->io_task_work.func = io_req_task_link_timeout;
io_req_task_work_add(req);
@@ -472,12 +489,12 @@ int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
} else {
enum hrtimer_mode mode = io_translate_timeout_mode(tr->flags);
- spin_lock_irq(&ctx->timeout_lock);
+ raw_spin_lock_irq(&ctx->timeout_lock);
if (tr->ltimeout)
ret = io_linked_timeout_update(ctx, tr->addr, &tr->ts, mode);
else
ret = io_timeout_update(ctx, tr->addr, &tr->ts, mode);
- spin_unlock_irq(&ctx->timeout_lock);
+ raw_spin_unlock_irq(&ctx->timeout_lock);
}
if (ret < 0)
@@ -572,7 +589,7 @@ int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
struct list_head *entry;
u32 tail, off = timeout->off;
- spin_lock_irq(&ctx->timeout_lock);
+ raw_spin_lock_irq(&ctx->timeout_lock);
/*
* sqe->off holds how many events that need to occur for this
@@ -611,7 +628,7 @@ add:
list_add(&timeout->list, entry);
data->timer.function = io_timeout_fn;
hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
- spin_unlock_irq(&ctx->timeout_lock);
+ raw_spin_unlock_irq(&ctx->timeout_lock);
return IOU_ISSUE_SKIP_COMPLETE;
}
@@ -620,7 +637,7 @@ void io_queue_linked_timeout(struct io_kiocb *req)
struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
struct io_ring_ctx *ctx = req->ctx;
- spin_lock_irq(&ctx->timeout_lock);
+ raw_spin_lock_irq(&ctx->timeout_lock);
/*
* If the back reference is NULL, then our linked request finished
* before we got a chance to setup the timer
@@ -633,7 +650,7 @@ void io_queue_linked_timeout(struct io_kiocb *req)
data->mode);
list_add_tail(&timeout->list, &ctx->ltimeout_list);
}
- spin_unlock_irq(&ctx->timeout_lock);
+ raw_spin_unlock_irq(&ctx->timeout_lock);
/* drop submission reference */
io_put_req(req);
}
@@ -661,22 +678,22 @@ __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct io_uring_task *tctx
bool cancel_all)
{
struct io_timeout *timeout, *tmp;
- int canceled = 0;
+ LIST_HEAD(list);
/*
* completion_lock is needed for io_match_task(). Take it before
* timeout_lockfirst to keep locking ordering.
*/
spin_lock(&ctx->completion_lock);
- spin_lock_irq(&ctx->timeout_lock);
+ raw_spin_lock_irq(&ctx->timeout_lock);
list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) {
struct io_kiocb *req = cmd_to_io_kiocb(timeout);
- if (io_match_task(req, tctx, cancel_all) &&
- io_kill_timeout(req, -ECANCELED))
- canceled++;
+ if (io_match_task(req, tctx, cancel_all))
+ io_kill_timeout(req, &list);
}
- spin_unlock_irq(&ctx->timeout_lock);
+ raw_spin_unlock_irq(&ctx->timeout_lock);
spin_unlock(&ctx->completion_lock);
- return canceled != 0;
+
+ return io_flush_killed_timeouts(&list, -ECANCELED);
}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index f7f892a52a37..77f56674aaa9 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -21281,11 +21281,15 @@ patch_map_ops_generic:
* changed in some incompatible and hard to support
* way, it's fine to back out this inlining logic
*/
+#ifdef CONFIG_SMP
insn_buf[0] = BPF_MOV32_IMM(BPF_REG_0, (u32)(unsigned long)&pcpu_hot.cpu_number);
insn_buf[1] = BPF_MOV64_PERCPU_REG(BPF_REG_0, BPF_REG_0);
insn_buf[2] = BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0);
cnt = 3;
-
+#else
+ insn_buf[0] = BPF_ALU32_REG(BPF_XOR, BPF_REG_0, BPF_REG_0);
+ cnt = 1;
+#endif
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
if (!new_prog)
return -ENOMEM;
diff --git a/kernel/fork.c b/kernel/fork.c
index 1450b461d196..9b301180fd41 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -639,11 +639,8 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
LIST_HEAD(uf);
VMA_ITERATOR(vmi, mm, 0);
- uprobe_start_dup_mmap();
- if (mmap_write_lock_killable(oldmm)) {
- retval = -EINTR;
- goto fail_uprobe_end;
- }
+ if (mmap_write_lock_killable(oldmm))
+ return -EINTR;
flush_cache_dup_mm(oldmm);
uprobe_dup_mmap(oldmm, mm);
/*
@@ -782,8 +779,6 @@ out:
dup_userfaultfd_complete(&uf);
else
dup_userfaultfd_fail(&uf);
-fail_uprobe_end:
- uprobe_end_dup_mmap();
return retval;
fail_nomem_anon_vma_fork:
@@ -1692,9 +1687,11 @@ static struct mm_struct *dup_mm(struct task_struct *tsk,
if (!mm_init(mm, tsk, mm->user_ns))
goto fail_nomem;
+ uprobe_start_dup_mmap();
err = dup_mmap(mm, oldmm);
if (err)
goto free_pt;
+ uprobe_end_dup_mmap();
mm->hiwater_rss = get_mm_rss(mm);
mm->hiwater_vm = mm->total_vm;
@@ -1709,6 +1706,8 @@ free_pt:
mm->binfmt = NULL;
mm_init_owner(mm, NULL);
mmput(mm);
+ if (err)
+ uprobe_end_dup_mmap();
fail_nomem:
return NULL;
diff --git a/kernel/kcov.c b/kernel/kcov.c
index 28a6be6e64fd..187ba1b80bda 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -166,7 +166,7 @@ static void kcov_remote_area_put(struct kcov_remote_area *area,
* Unlike in_serving_softirq(), this function returns false when called during
* a hardirq or an NMI that happened in the softirq context.
*/
-static inline bool in_softirq_really(void)
+static __always_inline bool in_softirq_really(void)
{
return in_serving_softirq() && !in_hardirq() && !in_nmi();
}
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index e858de203eb6..697a56d3d949 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1292,7 +1292,13 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock,
*/
get_task_struct(owner);
+ preempt_disable();
raw_spin_unlock_irq(&lock->wait_lock);
+ /* wake up any tasks on the wake_q before calling rt_mutex_adjust_prio_chain */
+ wake_up_q(wake_q);
+ wake_q_init(wake_q);
+ preempt_enable();
+
res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
next_lock, waiter, task);
@@ -1596,6 +1602,7 @@ static void __sched remove_waiter(struct rt_mutex_base *lock,
* or TASK_UNINTERRUPTIBLE)
* @timeout: the pre-initialized and started timer, or NULL for none
* @waiter: the pre-initialized rt_mutex_waiter
+ * @wake_q: wake_q of tasks to wake when we drop the lock->wait_lock
*
* Must be called with lock->wait_lock held and interrupts disabled
*/
@@ -1603,7 +1610,8 @@ static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
struct ww_acquire_ctx *ww_ctx,
unsigned int state,
struct hrtimer_sleeper *timeout,
- struct rt_mutex_waiter *waiter)
+ struct rt_mutex_waiter *waiter,
+ struct wake_q_head *wake_q)
__releases(&lock->wait_lock) __acquires(&lock->wait_lock)
{
struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex);
@@ -1634,7 +1642,13 @@ static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
owner = rt_mutex_owner(lock);
else
owner = NULL;
+ preempt_disable();
raw_spin_unlock_irq(&lock->wait_lock);
+ if (wake_q) {
+ wake_up_q(wake_q);
+ wake_q_init(wake_q);
+ }
+ preempt_enable();
if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner))
rt_mutex_schedule();
@@ -1708,7 +1722,7 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
ret = task_blocks_on_rt_mutex(lock, waiter, current, ww_ctx, chwalk, wake_q);
if (likely(!ret))
- ret = rt_mutex_slowlock_block(lock, ww_ctx, state, NULL, waiter);
+ ret = rt_mutex_slowlock_block(lock, ww_ctx, state, NULL, waiter, wake_q);
if (likely(!ret)) {
/* acquired the lock */
diff --git a/kernel/locking/rtmutex_api.c b/kernel/locking/rtmutex_api.c
index 33ea31d6a7b3..191e4720e546 100644
--- a/kernel/locking/rtmutex_api.c
+++ b/kernel/locking/rtmutex_api.c
@@ -383,7 +383,7 @@ int __sched rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock,
raw_spin_lock_irq(&lock->wait_lock);
/* sleep on the mutex */
set_current_state(TASK_INTERRUPTIBLE);
- ret = rt_mutex_slowlock_block(lock, NULL, TASK_INTERRUPTIBLE, to, waiter);
+ ret = rt_mutex_slowlock_block(lock, NULL, TASK_INTERRUPTIBLE, to, waiter, NULL);
/*
* try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
* have to fix that up.
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index 7fff1d045477..19d2699cf638 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -4763,7 +4763,7 @@ static void scx_ops_bypass(bool bypass)
* sees scx_rq_bypassing() before moving tasks to SCX.
*/
if (!scx_enabled()) {
- rq_unlock_irqrestore(rq, &rf);
+ rq_unlock(rq, &rf);
continue;
}
@@ -7013,7 +7013,7 @@ __bpf_kfunc int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id,
return -ENOENT;
INIT_LIST_HEAD(&kit->cursor.node);
- kit->cursor.flags |= SCX_DSQ_LNODE_ITER_CURSOR | flags;
+ kit->cursor.flags = SCX_DSQ_LNODE_ITER_CURSOR | flags;
kit->cursor.priv = READ_ONCE(kit->dsq->seq);
return 0;
diff --git a/kernel/static_call_inline.c b/kernel/static_call_inline.c
index 5259cda486d0..bb7d066a7c39 100644
--- a/kernel/static_call_inline.c
+++ b/kernel/static_call_inline.c
@@ -15,7 +15,7 @@ extern struct static_call_site __start_static_call_sites[],
extern struct static_call_tramp_key __start_static_call_tramp_key[],
__stop_static_call_tramp_key[];
-static int static_call_initialized;
+int static_call_initialized;
/*
* Must be called before early_initcall() to be effective.
diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c
index 0bf78517b5d4..30e3ddc8a8a8 100644
--- a/kernel/trace/fgraph.c
+++ b/kernel/trace/fgraph.c
@@ -833,7 +833,7 @@ static unsigned long __ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs
#endif
{
for_each_set_bit(i, &bitmap, sizeof(bitmap) * BITS_PER_BYTE) {
- struct fgraph_ops *gops = fgraph_array[i];
+ struct fgraph_ops *gops = READ_ONCE(fgraph_array[i]);
if (gops == &fgraph_stub)
continue;
@@ -1215,7 +1215,7 @@ void fgraph_update_pid_func(void)
static int start_graph_tracing(void)
{
unsigned long **ret_stack_list;
- int ret;
+ int ret, cpu;
ret_stack_list = kcalloc(FTRACE_RETSTACK_ALLOC_SIZE,
sizeof(*ret_stack_list), GFP_KERNEL);
@@ -1223,6 +1223,12 @@ static int start_graph_tracing(void)
if (!ret_stack_list)
return -ENOMEM;
+ /* The cpu_boot init_task->ret_stack will never be freed */
+ for_each_online_cpu(cpu) {
+ if (!idle_task(cpu)->ret_stack)
+ ftrace_graph_init_idle_task(idle_task(cpu), cpu);
+ }
+
do {
ret = alloc_retstack_tasklist(ret_stack_list);
} while (ret == -EAGAIN);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 9b17efb1a87d..2e113f8b13a2 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -902,16 +902,13 @@ static void profile_graph_return(struct ftrace_graph_ret *trace,
}
static struct fgraph_ops fprofiler_ops = {
- .ops = {
- .flags = FTRACE_OPS_FL_INITIALIZED,
- INIT_OPS_HASH(fprofiler_ops.ops)
- },
.entryfunc = &profile_graph_entry,
.retfunc = &profile_graph_return,
};
static int register_ftrace_profiler(void)
{
+ ftrace_ops_set_global_filter(&fprofiler_ops.ops);
return register_ftrace_graph(&fprofiler_ops);
}
@@ -922,12 +919,11 @@ static void unregister_ftrace_profiler(void)
#else
static struct ftrace_ops ftrace_profile_ops __read_mostly = {
.func = function_profile_call,
- .flags = FTRACE_OPS_FL_INITIALIZED,
- INIT_OPS_HASH(ftrace_profile_ops)
};
static int register_ftrace_profiler(void)
{
+ ftrace_ops_set_global_filter(&ftrace_profile_ops);
return register_ftrace_function(&ftrace_profile_ops);
}
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 7e257e855dd1..60210fb5b211 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -7019,7 +7019,11 @@ static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer,
lockdep_assert_held(&cpu_buffer->mapping_lock);
nr_subbufs = cpu_buffer->nr_pages + 1; /* + reader-subbuf */
- nr_pages = ((nr_subbufs + 1) << subbuf_order) - pgoff; /* + meta-page */
+ nr_pages = ((nr_subbufs + 1) << subbuf_order); /* + meta-page */
+ if (nr_pages <= pgoff)
+ return -EINVAL;
+
+ nr_pages -= pgoff;
nr_vma_pages = vma_pages(vma);
if (!nr_vma_pages || nr_vma_pages > nr_pages)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index be62f0ea1814..f8aebcb01e62 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3611,17 +3611,12 @@ char *trace_iter_expand_format(struct trace_iterator *iter)
}
/* Returns true if the string is safe to dereference from an event */
-static bool trace_safe_str(struct trace_iterator *iter, const char *str,
- bool star, int len)
+static bool trace_safe_str(struct trace_iterator *iter, const char *str)
{
unsigned long addr = (unsigned long)str;
struct trace_event *trace_event;
struct trace_event_call *event;
- /* Ignore strings with no length */
- if (star && !len)
- return true;
-
/* OK if part of the event data */
if ((addr >= (unsigned long)iter->ent) &&
(addr < (unsigned long)iter->ent + iter->ent_size))
@@ -3661,181 +3656,69 @@ static bool trace_safe_str(struct trace_iterator *iter, const char *str,
return false;
}
-static DEFINE_STATIC_KEY_FALSE(trace_no_verify);
-
-static int test_can_verify_check(const char *fmt, ...)
-{
- char buf[16];
- va_list ap;
- int ret;
-
- /*
- * The verifier is dependent on vsnprintf() modifies the va_list
- * passed to it, where it is sent as a reference. Some architectures
- * (like x86_32) passes it by value, which means that vsnprintf()
- * does not modify the va_list passed to it, and the verifier
- * would then need to be able to understand all the values that
- * vsnprintf can use. If it is passed by value, then the verifier
- * is disabled.
- */
- va_start(ap, fmt);
- vsnprintf(buf, 16, "%d", ap);
- ret = va_arg(ap, int);
- va_end(ap);
-
- return ret;
-}
-
-static void test_can_verify(void)
-{
- if (!test_can_verify_check("%d %d", 0, 1)) {
- pr_info("trace event string verifier disabled\n");
- static_branch_inc(&trace_no_verify);
- }
-}
-
/**
- * trace_check_vprintf - Check dereferenced strings while writing to the seq buffer
+ * ignore_event - Check dereferenced fields while writing to the seq buffer
* @iter: The iterator that holds the seq buffer and the event being printed
- * @fmt: The format used to print the event
- * @ap: The va_list holding the data to print from @fmt.
*
- * This writes the data into the @iter->seq buffer using the data from
- * @fmt and @ap. If the format has a %s, then the source of the string
- * is examined to make sure it is safe to print, otherwise it will
- * warn and print "[UNSAFE MEMORY]" in place of the dereferenced string
- * pointer.
+ * At boot up, test_event_printk() will flag any event that dereferences
+ * a string with "%s" that does exist in the ring buffer. It may still
+ * be valid, as the string may point to a static string in the kernel
+ * rodata that never gets freed. But if the string pointer is pointing
+ * to something that was allocated, there's a chance that it can be freed
+ * by the time the user reads the trace. This would cause a bad memory
+ * access by the kernel and possibly crash the system.
+ *
+ * This function will check if the event has any fields flagged as needing
+ * to be checked at runtime and perform those checks.
+ *
+ * If it is found that a field is unsafe, it will write into the @iter->seq
+ * a message stating what was found to be unsafe.
+ *
+ * @return: true if the event is unsafe and should be ignored,
+ * false otherwise.
*/
-void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
- va_list ap)
+bool ignore_event(struct trace_iterator *iter)
{
- long text_delta = 0;
- long data_delta = 0;
- const char *p = fmt;
- const char *str;
- bool good;
- int i, j;
+ struct ftrace_event_field *field;
+ struct trace_event *trace_event;
+ struct trace_event_call *event;
+ struct list_head *head;
+ struct trace_seq *seq;
+ const void *ptr;
- if (WARN_ON_ONCE(!fmt))
- return;
+ trace_event = ftrace_find_event(iter->ent->type);
- if (static_branch_unlikely(&trace_no_verify))
- goto print;
+ seq = &iter->seq;
- /*
- * When the kernel is booted with the tp_printk command line
- * parameter, trace events go directly through to printk().
- * It also is checked by this function, but it does not
- * have an associated trace_array (tr) for it.
- */
- if (iter->tr) {
- text_delta = iter->tr->text_delta;
- data_delta = iter->tr->data_delta;
+ if (!trace_event) {
+ trace_seq_printf(seq, "EVENT ID %d NOT FOUND?\n", iter->ent->type);
+ return true;
}
- /* Don't bother checking when doing a ftrace_dump() */
- if (iter->fmt == static_fmt_buf)
- goto print;
-
- while (*p) {
- bool star = false;
- int len = 0;
-
- j = 0;
-
- /*
- * We only care about %s and variants
- * as well as %p[sS] if delta is non-zero
- */
- for (i = 0; p[i]; i++) {
- if (i + 1 >= iter->fmt_size) {
- /*
- * If we can't expand the copy buffer,
- * just print it.
- */
- if (!trace_iter_expand_format(iter))
- goto print;
- }
-
- if (p[i] == '\\' && p[i+1]) {
- i++;
- continue;
- }
- if (p[i] == '%') {
- /* Need to test cases like %08.*s */
- for (j = 1; p[i+j]; j++) {
- if (isdigit(p[i+j]) ||
- p[i+j] == '.')
- continue;
- if (p[i+j] == '*') {
- star = true;
- continue;
- }
- break;
- }
- if (p[i+j] == 's')
- break;
-
- if (text_delta && p[i+1] == 'p' &&
- ((p[i+2] == 's' || p[i+2] == 'S')))
- break;
-
- star = false;
- }
- j = 0;
- }
- /* If no %s found then just print normally */
- if (!p[i])
- break;
-
- /* Copy up to the %s, and print that */
- strncpy(iter->fmt, p, i);
- iter->fmt[i] = '\0';
- trace_seq_vprintf(&iter->seq, iter->fmt, ap);
+ event = container_of(trace_event, struct trace_event_call, event);
+ if (!(event->flags & TRACE_EVENT_FL_TEST_STR))
+ return false;
- /* Add delta to %pS pointers */
- if (p[i+1] == 'p') {
- unsigned long addr;
- char fmt[4];
+ head = trace_get_fields(event);
+ if (!head) {
+ trace_seq_printf(seq, "FIELDS FOR EVENT '%s' NOT FOUND?\n",
+ trace_event_name(event));
+ return true;
+ }
- fmt[0] = '%';
- fmt[1] = 'p';
- fmt[2] = p[i+2]; /* Either %ps or %pS */
- fmt[3] = '\0';
+ /* Offsets are from the iter->ent that points to the raw event */
+ ptr = iter->ent;
- addr = va_arg(ap, unsigned long);
- addr += text_delta;
- trace_seq_printf(&iter->seq, fmt, (void *)addr);
+ list_for_each_entry(field, head, link) {
+ const char *str;
+ bool good;
- p += i + 3;
+ if (!field->needs_test)
continue;
- }
-
- /*
- * If iter->seq is full, the above call no longer guarantees
- * that ap is in sync with fmt processing, and further calls
- * to va_arg() can return wrong positional arguments.
- *
- * Ensure that ap is no longer used in this case.
- */
- if (iter->seq.full) {
- p = "";
- break;
- }
- if (star)
- len = va_arg(ap, int);
+ str = *(const char **)(ptr + field->offset);
- /* The ap now points to the string data of the %s */
- str = va_arg(ap, const char *);
-
- good = trace_safe_str(iter, str, star, len);
-
- /* Could be from the last boot */
- if (data_delta && !good) {
- str += data_delta;
- good = trace_safe_str(iter, str, star, len);
- }
+ good = trace_safe_str(iter, str);
/*
* If you hit this warning, it is likely that the
@@ -3846,44 +3729,14 @@ void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
* instead. See samples/trace_events/trace-events-sample.h
* for reference.
*/
- if (WARN_ONCE(!good, "fmt: '%s' current_buffer: '%s'",
- fmt, seq_buf_str(&iter->seq.seq))) {
- int ret;
-
- /* Try to safely read the string */
- if (star) {
- if (len + 1 > iter->fmt_size)
- len = iter->fmt_size - 1;
- if (len < 0)
- len = 0;
- ret = copy_from_kernel_nofault(iter->fmt, str, len);
- iter->fmt[len] = 0;
- star = false;
- } else {
- ret = strncpy_from_kernel_nofault(iter->fmt, str,
- iter->fmt_size);
- }
- if (ret < 0)
- trace_seq_printf(&iter->seq, "(0x%px)", str);
- else
- trace_seq_printf(&iter->seq, "(0x%px:%s)",
- str, iter->fmt);
- str = "[UNSAFE-MEMORY]";
- strcpy(iter->fmt, "%s");
- } else {
- strncpy(iter->fmt, p + i, j + 1);
- iter->fmt[j+1] = '\0';
+ if (WARN_ONCE(!good, "event '%s' has unsafe pointer field '%s'",
+ trace_event_name(event), field->name)) {
+ trace_seq_printf(seq, "EVENT %s: HAS UNSAFE POINTER FIELD '%s'\n",
+ trace_event_name(event), field->name);
+ return true;
}
- if (star)
- trace_seq_printf(&iter->seq, iter->fmt, len, str);
- else
- trace_seq_printf(&iter->seq, iter->fmt, str);
-
- p += i + j + 1;
}
- print:
- if (*p)
- trace_seq_vprintf(&iter->seq, p, ap);
+ return false;
}
const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
@@ -4353,6 +4206,15 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
if (event) {
if (tr->trace_flags & TRACE_ITER_FIELDS)
return print_event_fields(iter, event);
+ /*
+ * For TRACE_EVENT() events, the print_fmt is not
+ * safe to use if the array has delta offsets
+ * Force printing via the fields.
+ */
+ if ((tr->text_delta || tr->data_delta) &&
+ event->type > __TRACE_LAST_TYPE)
+ return print_event_fields(iter, event);
+
return event->funcs->trace(iter, sym_flags, event);
}
@@ -5225,6 +5087,9 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
cpumask_var_t tracing_cpumask_new;
int err;
+ if (count == 0 || count > KMALLOC_MAX_SIZE)
+ return -EINVAL;
+
if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
return -ENOMEM;
@@ -10777,8 +10642,6 @@ __init static int tracer_alloc_buffers(void)
register_snapshot_cmd();
- test_can_verify();
-
return 0;
out_free_pipe_cpumask:
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 266740b4e121..9691b47b5f3d 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -667,9 +667,8 @@ void trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
bool trace_is_tracepoint_string(const char *str);
const char *trace_event_format(struct trace_iterator *iter, const char *fmt);
-void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
- va_list ap) __printf(2, 0);
char *trace_iter_expand_format(struct trace_iterator *iter);
+bool ignore_event(struct trace_iterator *iter);
int trace_empty(struct trace_iterator *iter);
@@ -1413,7 +1412,8 @@ struct ftrace_event_field {
int filter_type;
int offset;
int size;
- int is_signed;
+ unsigned int is_signed:1;
+ unsigned int needs_test:1;
int len;
};
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 77e68efbd43e..770e7ed91716 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -82,7 +82,7 @@ static int system_refcount_dec(struct event_subsystem *system)
}
static struct ftrace_event_field *
-__find_event_field(struct list_head *head, char *name)
+__find_event_field(struct list_head *head, const char *name)
{
struct ftrace_event_field *field;
@@ -114,7 +114,8 @@ trace_find_event_field(struct trace_event_call *call, char *name)
static int __trace_define_field(struct list_head *head, const char *type,
const char *name, int offset, int size,
- int is_signed, int filter_type, int len)
+ int is_signed, int filter_type, int len,
+ int need_test)
{
struct ftrace_event_field *field;
@@ -133,6 +134,7 @@ static int __trace_define_field(struct list_head *head, const char *type,
field->offset = offset;
field->size = size;
field->is_signed = is_signed;
+ field->needs_test = need_test;
field->len = len;
list_add(&field->link, head);
@@ -151,13 +153,13 @@ int trace_define_field(struct trace_event_call *call, const char *type,
head = trace_get_fields(call);
return __trace_define_field(head, type, name, offset, size,
- is_signed, filter_type, 0);
+ is_signed, filter_type, 0, 0);
}
EXPORT_SYMBOL_GPL(trace_define_field);
static int trace_define_field_ext(struct trace_event_call *call, const char *type,
const char *name, int offset, int size, int is_signed,
- int filter_type, int len)
+ int filter_type, int len, int need_test)
{
struct list_head *head;
@@ -166,13 +168,13 @@ static int trace_define_field_ext(struct trace_event_call *call, const char *typ
head = trace_get_fields(call);
return __trace_define_field(head, type, name, offset, size,
- is_signed, filter_type, len);
+ is_signed, filter_type, len, need_test);
}
#define __generic_field(type, item, filter_type) \
ret = __trace_define_field(&ftrace_generic_fields, #type, \
#item, 0, 0, is_signed_type(type), \
- filter_type, 0); \
+ filter_type, 0, 0); \
if (ret) \
return ret;
@@ -181,7 +183,8 @@ static int trace_define_field_ext(struct trace_event_call *call, const char *typ
"common_" #item, \
offsetof(typeof(ent), item), \
sizeof(ent.item), \
- is_signed_type(type), FILTER_OTHER, 0); \
+ is_signed_type(type), FILTER_OTHER, \
+ 0, 0); \
if (ret) \
return ret;
@@ -244,19 +247,16 @@ int trace_event_get_offsets(struct trace_event_call *call)
return tail->offset + tail->size;
}
-/*
- * Check if the referenced field is an array and return true,
- * as arrays are OK to dereference.
- */
-static bool test_field(const char *fmt, struct trace_event_call *call)
+
+static struct trace_event_fields *find_event_field(const char *fmt,
+ struct trace_event_call *call)
{
struct trace_event_fields *field = call->class->fields_array;
- const char *array_descriptor;
const char *p = fmt;
int len;
if (!(len = str_has_prefix(fmt, "REC->")))
- return false;
+ return NULL;
fmt += len;
for (p = fmt; *p; p++) {
if (!isalnum(*p) && *p != '_')
@@ -265,16 +265,141 @@ static bool test_field(const char *fmt, struct trace_event_call *call)
len = p - fmt;
for (; field->type; field++) {
- if (strncmp(field->name, fmt, len) ||
- field->name[len])
+ if (strncmp(field->name, fmt, len) || field->name[len])
continue;
- array_descriptor = strchr(field->type, '[');
- /* This is an array and is OK to dereference. */
- return array_descriptor != NULL;
+
+ return field;
+ }
+ return NULL;
+}
+
+/*
+ * Check if the referenced field is an array and return true,
+ * as arrays are OK to dereference.
+ */
+static bool test_field(const char *fmt, struct trace_event_call *call)
+{
+ struct trace_event_fields *field;
+
+ field = find_event_field(fmt, call);
+ if (!field)
+ return false;
+
+ /* This is an array and is OK to dereference. */
+ return strchr(field->type, '[') != NULL;
+}
+
+/* Look for a string within an argument */
+static bool find_print_string(const char *arg, const char *str, const char *end)
+{
+ const char *r;
+
+ r = strstr(arg, str);
+ return r && r < end;
+}
+
+/* Return true if the argument pointer is safe */
+static bool process_pointer(const char *fmt, int len, struct trace_event_call *call)
+{
+ const char *r, *e, *a;
+
+ e = fmt + len;
+
+ /* Find the REC-> in the argument */
+ r = strstr(fmt, "REC->");
+ if (r && r < e) {
+ /*
+ * Addresses of events on the buffer, or an array on the buffer is
+ * OK to dereference. There's ways to fool this, but
+ * this is to catch common mistakes, not malicious code.
+ */
+ a = strchr(fmt, '&');
+ if ((a && (a < r)) || test_field(r, call))
+ return true;
+ } else if (find_print_string(fmt, "__get_dynamic_array(", e)) {
+ return true;
+ } else if (find_print_string(fmt, "__get_rel_dynamic_array(", e)) {
+ return true;
+ } else if (find_print_string(fmt, "__get_dynamic_array_len(", e)) {
+ return true;
+ } else if (find_print_string(fmt, "__get_rel_dynamic_array_len(", e)) {
+ return true;
+ } else if (find_print_string(fmt, "__get_sockaddr(", e)) {
+ return true;
+ } else if (find_print_string(fmt, "__get_rel_sockaddr(", e)) {
+ return true;
}
return false;
}
+/* Return true if the string is safe */
+static bool process_string(const char *fmt, int len, struct trace_event_call *call)
+{
+ struct trace_event_fields *field;
+ const char *r, *e, *s;
+
+ e = fmt + len;
+
+ /*
+ * There are several helper functions that return strings.
+ * If the argument contains a function, then assume its field is valid.
+ * It is considered that the argument has a function if it has:
+ * alphanumeric or '_' before a parenthesis.
+ */
+ s = fmt;
+ do {
+ r = strstr(s, "(");
+ if (!r || r >= e)
+ break;
+ for (int i = 1; r - i >= s; i++) {
+ char ch = *(r - i);
+ if (isspace(ch))
+ continue;
+ if (isalnum(ch) || ch == '_')
+ return true;
+ /* Anything else, this isn't a function */
+ break;
+ }
+ /* A function could be wrapped in parethesis, try the next one */
+ s = r + 1;
+ } while (s < e);
+
+ /*
+ * Check for arrays. If the argument has: foo[REC->val]
+ * then it is very likely that foo is an array of strings
+ * that are safe to use.
+ */
+ r = strstr(s, "[");
+ if (r && r < e) {
+ r = strstr(r, "REC->");
+ if (r && r < e)
+ return true;
+ }
+
+ /*
+ * If there's any strings in the argument consider this arg OK as it
+ * could be: REC->field ? "foo" : "bar" and we don't want to get into
+ * verifying that logic here.
+ */
+ if (find_print_string(fmt, "\"", e))
+ return true;
+
+ /* Dereferenced strings are also valid like any other pointer */
+ if (process_pointer(fmt, len, call))
+ return true;
+
+ /* Make sure the field is found */
+ field = find_event_field(fmt, call);
+ if (!field)
+ return false;
+
+ /* Test this field's string before printing the event */
+ call->flags |= TRACE_EVENT_FL_TEST_STR;
+ field->needs_test = 1;
+
+ return true;
+}
+
/*
* Examine the print fmt of the event looking for unsafe dereference
* pointers using %p* that could be recorded in the trace event and
@@ -284,13 +409,14 @@ static bool test_field(const char *fmt, struct trace_event_call *call)
static void test_event_printk(struct trace_event_call *call)
{
u64 dereference_flags = 0;
+ u64 string_flags = 0;
bool first = true;
- const char *fmt, *c, *r, *a;
+ const char *fmt;
int parens = 0;
char in_quote = 0;
int start_arg = 0;
int arg = 0;
- int i;
+ int i, e;
fmt = call->print_fmt;
@@ -374,8 +500,16 @@ static void test_event_printk(struct trace_event_call *call)
star = true;
continue;
}
- if ((fmt[i + j] == 's') && star)
- arg++;
+ if ((fmt[i + j] == 's')) {
+ if (star)
+ arg++;
+ if (WARN_ONCE(arg == 63,
+ "Too many args for event: %s",
+ trace_event_name(call)))
+ return;
+ dereference_flags |= 1ULL << arg;
+ string_flags |= 1ULL << arg;
+ }
break;
}
break;
@@ -403,42 +537,47 @@ static void test_event_printk(struct trace_event_call *call)
case ',':
if (in_quote || parens)
continue;
+ e = i;
i++;
while (isspace(fmt[i]))
i++;
- start_arg = i;
- if (!(dereference_flags & (1ULL << arg)))
- goto next_arg;
- /* Find the REC-> in the argument */
- c = strchr(fmt + i, ',');
- r = strstr(fmt + i, "REC->");
- if (r && (!c || r < c)) {
- /*
- * Addresses of events on the buffer,
- * or an array on the buffer is
- * OK to dereference.
- * There's ways to fool this, but
- * this is to catch common mistakes,
- * not malicious code.
- */
- a = strchr(fmt + i, '&');
- if ((a && (a < r)) || test_field(r, call))
+ /*
+ * If start_arg is zero, then this is the start of the
+ * first argument. The processing of the argument happens
+ * when the end of the argument is found, as it needs to
+ * handle paranthesis and such.
+ */
+ if (!start_arg) {
+ start_arg = i;
+ /* Balance out the i++ in the for loop */
+ i--;
+ continue;
+ }
+
+ if (dereference_flags & (1ULL << arg)) {
+ if (string_flags & (1ULL << arg)) {
+ if (process_string(fmt + start_arg, e - start_arg, call))
+ dereference_flags &= ~(1ULL << arg);
+ } else if (process_pointer(fmt + start_arg, e - start_arg, call))
dereference_flags &= ~(1ULL << arg);
- } else if ((r = strstr(fmt + i, "__get_dynamic_array(")) &&
- (!c || r < c)) {
- dereference_flags &= ~(1ULL << arg);
- } else if ((r = strstr(fmt + i, "__get_sockaddr(")) &&
- (!c || r < c)) {
- dereference_flags &= ~(1ULL << arg);
}
- next_arg:
- i--;
+ start_arg = i;
arg++;
+ /* Balance out the i++ in the for loop */
+ i--;
}
}
+ if (dereference_flags & (1ULL << arg)) {
+ if (string_flags & (1ULL << arg)) {
+ if (process_string(fmt + start_arg, i - start_arg, call))
+ dereference_flags &= ~(1ULL << arg);
+ } else if (process_pointer(fmt + start_arg, i - start_arg, call))
+ dereference_flags &= ~(1ULL << arg);
+ }
+
/*
* If you triggered the below warning, the trace event reported
* uses an unsafe dereference pointer %p*. As the data stored
@@ -2471,7 +2610,7 @@ event_define_fields(struct trace_event_call *call)
ret = trace_define_field_ext(call, field->type, field->name,
offset, field->size,
field->is_signed, field->filter_type,
- field->len);
+ field->len, field->needs_test);
if (WARN_ON_ONCE(ret)) {
pr_err("error code is %d\n", ret);
break;
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 74c353164ca1..d358c9935164 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -176,7 +176,8 @@ static void function_trace_start(struct trace_array *tr)
tracing_reset_online_cpus(&tr->array_buffer);
}
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+/* fregs are guaranteed not to be NULL if HAVE_DYNAMIC_FTRACE_WITH_ARGS is set */
+#if defined(CONFIG_FUNCTION_GRAPH_TRACER) && defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS)
static __always_inline unsigned long
function_get_true_parent_ip(unsigned long parent_ip, struct ftrace_regs *fregs)
{
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 263fac44d3ca..935a886af40c 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -725,7 +725,7 @@ static int trace_kprobe_module_callback(struct notifier_block *nb,
static struct notifier_block trace_kprobe_module_nb = {
.notifier_call = trace_kprobe_module_callback,
- .priority = 1 /* Invoked after kprobe module callback */
+ .priority = 2 /* Invoked after kprobe and jump_label module callback */
};
static int trace_kprobe_register_module_notifier(void)
{
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index da748b7cbc4d..03d56f711ad1 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -317,10 +317,14 @@ EXPORT_SYMBOL(trace_raw_output_prep);
void trace_event_printf(struct trace_iterator *iter, const char *fmt, ...)
{
+ struct trace_seq *s = &iter->seq;
va_list ap;
+ if (ignore_event(iter))
+ return;
+
va_start(ap, fmt);
- trace_check_vprintf(iter, trace_event_format(iter, fmt), ap);
+ trace_seq_vprintf(s, trace_event_format(iter, fmt), ap);
va_end(ap);
}
EXPORT_SYMBOL(trace_event_printf);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 8b07576814a5..f7d8fc204579 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3680,23 +3680,27 @@ void workqueue_softirq_dead(unsigned int cpu)
* check_flush_dependency - check for flush dependency sanity
* @target_wq: workqueue being flushed
* @target_work: work item being flushed (NULL for workqueue flushes)
+ * @from_cancel: are we called from the work cancel path
*
* %current is trying to flush the whole @target_wq or @target_work on it.
- * If @target_wq doesn't have %WQ_MEM_RECLAIM, verify that %current is not
- * reclaiming memory or running on a workqueue which doesn't have
- * %WQ_MEM_RECLAIM as that can break forward-progress guarantee leading to
- * a deadlock.
+ * If this is not the cancel path (which implies work being flushed is either
+ * already running, or will not be at all), check if @target_wq doesn't have
+ * %WQ_MEM_RECLAIM and verify that %current is not reclaiming memory or running
+ * on a workqueue which doesn't have %WQ_MEM_RECLAIM as that can break forward-
+ * progress guarantee leading to a deadlock.
*/
static void check_flush_dependency(struct workqueue_struct *target_wq,
- struct work_struct *target_work)
+ struct work_struct *target_work,
+ bool from_cancel)
{
- work_func_t target_func = target_work ? target_work->func : NULL;
+ work_func_t target_func;
struct worker *worker;
- if (target_wq->flags & WQ_MEM_RECLAIM)
+ if (from_cancel || target_wq->flags & WQ_MEM_RECLAIM)
return;
worker = current_wq_worker();
+ target_func = target_work ? target_work->func : NULL;
WARN_ONCE(current->flags & PF_MEMALLOC,
"workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%ps",
@@ -3980,7 +3984,7 @@ void __flush_workqueue(struct workqueue_struct *wq)
list_add_tail(&this_flusher.list, &wq->flusher_overflow);
}
- check_flush_dependency(wq, NULL);
+ check_flush_dependency(wq, NULL, false);
mutex_unlock(&wq->mutex);
@@ -4155,7 +4159,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
}
wq = pwq->wq;
- check_flush_dependency(wq, work);
+ check_flush_dependency(wq, work, from_cancel);
insert_wq_barrier(pwq, barr, work, worker);
raw_spin_unlock_irq(&pool->lock);
@@ -5641,6 +5645,7 @@ static void wq_adjust_max_active(struct workqueue_struct *wq)
} while (activated);
}
+__printf(1, 0)
static struct workqueue_struct *__alloc_workqueue(const char *fmt,
unsigned int flags,
int max_active, va_list args)
diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c
index 35f7560a309a..7dcebf118a3e 100644
--- a/lib/alloc_tag.c
+++ b/lib/alloc_tag.c
@@ -209,6 +209,13 @@ void pgalloc_tag_swap(struct folio *new, struct folio *old)
return;
}
+ /*
+ * Clear tag references to avoid debug warning when using
+ * __alloc_tag_ref_set() with non-empty reference.
+ */
+ set_codetag_empty(&ref_old);
+ set_codetag_empty(&ref_new);
+
/* swap tags */
__alloc_tag_ref_set(&ref_old, tag_new);
update_page_tag_ref(handle_old, &ref_old);
@@ -401,28 +408,52 @@ repeat:
static int vm_module_tags_populate(void)
{
- unsigned long phys_size = vm_module_tags->nr_pages << PAGE_SHIFT;
+ unsigned long phys_end = ALIGN_DOWN(module_tags.start_addr, PAGE_SIZE) +
+ (vm_module_tags->nr_pages << PAGE_SHIFT);
+ unsigned long new_end = module_tags.start_addr + module_tags.size;
- if (phys_size < module_tags.size) {
+ if (phys_end < new_end) {
struct page **next_page = vm_module_tags->pages + vm_module_tags->nr_pages;
- unsigned long addr = module_tags.start_addr + phys_size;
+ unsigned long old_shadow_end = ALIGN(phys_end, MODULE_ALIGN);
+ unsigned long new_shadow_end = ALIGN(new_end, MODULE_ALIGN);
unsigned long more_pages;
unsigned long nr;
- more_pages = ALIGN(module_tags.size - phys_size, PAGE_SIZE) >> PAGE_SHIFT;
+ more_pages = ALIGN(new_end - phys_end, PAGE_SIZE) >> PAGE_SHIFT;
nr = alloc_pages_bulk_array_node(GFP_KERNEL | __GFP_NOWARN,
NUMA_NO_NODE, more_pages, next_page);
if (nr < more_pages ||
- vmap_pages_range(addr, addr + (nr << PAGE_SHIFT), PAGE_KERNEL,
+ vmap_pages_range(phys_end, phys_end + (nr << PAGE_SHIFT), PAGE_KERNEL,
next_page, PAGE_SHIFT) < 0) {
/* Clean up and error out */
for (int i = 0; i < nr; i++)
__free_page(next_page[i]);
return -ENOMEM;
}
+
vm_module_tags->nr_pages += nr;
+
+ /*
+ * Kasan allocates 1 byte of shadow for every 8 bytes of data.
+ * When kasan_alloc_module_shadow allocates shadow memory,
+ * its unit of allocation is a page.
+ * Therefore, here we need to align to MODULE_ALIGN.
+ */
+ if (old_shadow_end < new_shadow_end)
+ kasan_alloc_module_shadow((void *)old_shadow_end,
+ new_shadow_end - old_shadow_end,
+ GFP_KERNEL);
}
+ /*
+ * Mark the pages as accessible, now that they are mapped.
+ * With hardware tag-based KASAN, marking is skipped for
+ * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
+ */
+ kasan_unpoison_vmalloc((void *)module_tags.start_addr,
+ new_end - module_tags.start_addr,
+ KASAN_VMALLOC_PROT_NORMAL);
+
return 0;
}
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index d0ae808f3a14..047397136f15 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -4354,6 +4354,7 @@ int mas_alloc_cyclic(struct ma_state *mas, unsigned long *startp,
ret = 1;
}
if (ret < 0 && range_lo > min) {
+ mas_reset(mas);
ret = mas_empty_area(mas, min, range_hi, 1);
if (ret == 0)
ret = 1;
diff --git a/mm/damon/core.c b/mm/damon/core.c
index 8b8e2933dcd4..0776452a1abb 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -868,6 +868,11 @@ static int damon_commit_schemes(struct damon_ctx *dst, struct damon_ctx *src)
NUMA_NO_NODE);
if (!new_scheme)
return -ENOMEM;
+ err = damos_commit(new_scheme, src_scheme);
+ if (err) {
+ damon_destroy_scheme(new_scheme);
+ return err;
+ }
damon_add_scheme(dst, new_scheme);
}
return 0;
@@ -961,8 +966,11 @@ static int damon_commit_targets(
return -ENOMEM;
err = damon_commit_target(new_target, false,
src_target, damon_target_has_pid(src));
- if (err)
+ if (err) {
+ damon_destroy_target(new_target);
return err;
+ }
+ damon_add_target(dst, new_target);
}
return 0;
}
diff --git a/mm/filemap.c b/mm/filemap.c
index f61cf51c2238..33b60d448fca 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -124,15 +124,6 @@
* ->private_lock (zap_pte_range->block_dirty_folio)
*/
-static void mapping_set_update(struct xa_state *xas,
- struct address_space *mapping)
-{
- if (dax_mapping(mapping) || shmem_mapping(mapping))
- return;
- xas_set_update(xas, workingset_update_node);
- xas_set_lru(xas, &shadow_nodes);
-}
-
static void page_cache_delete(struct address_space *mapping,
struct folio *folio, void *shadow)
{
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index ee335d96fc39..e53d83b3e5cf 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1176,11 +1176,12 @@ static struct folio *vma_alloc_anon_folio_pmd(struct vm_area_struct *vma,
folio_throttle_swaprate(folio, gfp);
/*
- * When a folio is not zeroed during allocation (__GFP_ZERO not used),
- * folio_zero_user() is used to make sure that the page corresponding
- * to the faulting address will be hot in the cache after zeroing.
+ * When a folio is not zeroed during allocation (__GFP_ZERO not used)
+ * or user folios require special handling, folio_zero_user() is used to
+ * make sure that the page corresponding to the faulting address will be
+ * hot in the cache after zeroing.
*/
- if (!alloc_zeroed())
+ if (user_alloc_needs_zeroing())
folio_zero_user(folio, addr);
/*
* The memory barrier inside __folio_mark_uptodate makes sure that
@@ -3576,7 +3577,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
!list_empty(&folio->_deferred_list)) {
ds_queue->split_queue_len--;
if (folio_test_partially_mapped(folio)) {
- __folio_clear_partially_mapped(folio);
+ folio_clear_partially_mapped(folio);
mod_mthp_stat(folio_order(folio),
MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
}
@@ -3688,7 +3689,7 @@ bool __folio_unqueue_deferred_split(struct folio *folio)
if (!list_empty(&folio->_deferred_list)) {
ds_queue->split_queue_len--;
if (folio_test_partially_mapped(folio)) {
- __folio_clear_partially_mapped(folio);
+ folio_clear_partially_mapped(folio);
mod_mthp_stat(folio_order(folio),
MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
}
@@ -3732,7 +3733,7 @@ void deferred_split_folio(struct folio *folio, bool partially_mapped)
spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
if (partially_mapped) {
if (!folio_test_partially_mapped(folio)) {
- __folio_set_partially_mapped(folio);
+ folio_set_partially_mapped(folio);
if (folio_test_pmd_mappable(folio))
count_vm_event(THP_DEFERRED_SPLIT_PAGE);
count_mthp_stat(folio_order(folio), MTHP_STAT_SPLIT_DEFERRED);
@@ -3825,7 +3826,7 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
} else {
/* We lost race with folio_put() */
if (folio_test_partially_mapped(folio)) {
- __folio_clear_partially_mapped(folio);
+ folio_clear_partially_mapped(folio);
mod_mthp_stat(folio_order(folio),
MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
}
@@ -4168,7 +4169,7 @@ static ssize_t split_huge_pages_write(struct file *file, const char __user *buf,
size_t input_len = strlen(input_buf);
tok = strsep(&buf, ",");
- if (tok) {
+ if (tok && buf) {
strscpy(file_path, tok);
} else {
ret = -EINVAL;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ea2ed8e301ef..c498874a7170 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5340,7 +5340,7 @@ again:
break;
}
ret = copy_user_large_folio(new_folio, pte_folio,
- ALIGN_DOWN(addr, sz), dst_vma);
+ addr, dst_vma);
folio_put(pte_folio);
if (ret) {
folio_put(new_folio);
@@ -6643,8 +6643,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
*foliop = NULL;
goto out;
}
- ret = copy_user_large_folio(folio, *foliop,
- ALIGN_DOWN(dst_addr, size), dst_vma);
+ ret = copy_user_large_folio(folio, *foliop, dst_addr, dst_vma);
folio_put(*foliop);
*foliop = NULL;
if (ret) {
@@ -7212,7 +7211,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
spte = hugetlb_walk(svma, saddr,
vma_mmu_pagesize(svma));
if (spte) {
- get_page(virt_to_page(spte));
+ ptdesc_pmd_pts_inc(virt_to_ptdesc(spte));
break;
}
}
@@ -7227,7 +7226,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
(pmd_t *)((unsigned long)spte & PAGE_MASK));
mm_inc_nr_pmds(mm);
} else {
- put_page(virt_to_page(spte));
+ ptdesc_pmd_pts_dec(virt_to_ptdesc(spte));
}
spin_unlock(&mm->page_table_lock);
out:
@@ -7239,10 +7238,6 @@ out:
/*
* unmap huge page backed by shared pte.
*
- * Hugetlb pte page is ref counted at the time of mapping. If pte is shared
- * indicated by page_count > 1, unmap is achieved by clearing pud and
- * decrementing the ref count. If count == 1, the pte page is not shared.
- *
* Called with page table lock held.
*
* returns: 1 successfully unmapped a shared pte page
@@ -7251,18 +7246,20 @@ out:
int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
+ unsigned long sz = huge_page_size(hstate_vma(vma));
pgd_t *pgd = pgd_offset(mm, addr);
p4d_t *p4d = p4d_offset(pgd, addr);
pud_t *pud = pud_offset(p4d, addr);
i_mmap_assert_write_locked(vma->vm_file->f_mapping);
hugetlb_vma_assert_locked(vma);
- BUG_ON(page_count(virt_to_page(ptep)) == 0);
- if (page_count(virt_to_page(ptep)) == 1)
+ if (sz != PMD_SIZE)
+ return 0;
+ if (!ptdesc_pmd_pts_count(virt_to_ptdesc(ptep)))
return 0;
pud_clear(pud);
- put_page(virt_to_page(ptep));
+ ptdesc_pmd_pts_dec(virt_to_ptdesc(ptep));
mm_dec_nr_pmds(mm);
return 1;
}
diff --git a/mm/internal.h b/mm/internal.h
index cb8d8e8e3ffa..9826f7dce607 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1285,12 +1285,6 @@ void touch_pud(struct vm_area_struct *vma, unsigned long addr,
void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd, bool write);
-static inline bool alloc_zeroed(void)
-{
- return static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
- &init_on_alloc);
-}
-
/*
* Parses a string with mem suffixes into its order. Useful to parse kernel
* parameters.
@@ -1510,6 +1504,12 @@ static inline void shrinker_debugfs_remove(struct dentry *debugfs_entry,
/* Only track the nodes of mappings with shadow entries */
void workingset_update_node(struct xa_node *node);
extern struct list_lru shadow_nodes;
+#define mapping_set_update(xas, mapping) do { \
+ if (!dax_mapping(mapping) && !shmem_mapping(mapping)) { \
+ xas_set_update(xas, workingset_update_node); \
+ xas_set_lru(xas, &shadow_nodes); \
+ } \
+} while (0)
/* mremap.c */
unsigned long move_page_tables(struct vm_area_struct *vma,
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 6f8d46d107b4..653dbb1ff05c 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -19,6 +19,7 @@
#include <linux/rcupdate_wait.h>
#include <linux/swapops.h>
#include <linux/shmem_fs.h>
+#include <linux/dax.h>
#include <linux/ksm.h>
#include <asm/tlb.h>
@@ -1837,6 +1838,8 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
if (result != SCAN_SUCCEED)
goto out;
+ mapping_set_update(&xas, mapping);
+
__folio_set_locked(new_folio);
if (is_shmem)
__folio_set_swapbacked(new_folio);
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 2a945c07ae99..737af23f4f4e 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -373,7 +373,7 @@ static void print_unreferenced(struct seq_file *seq,
for (i = 0; i < nr_entries; i++) {
void *ptr = (void *)entries[i];
- warn_or_seq_printf(seq, " [<%pK>] %pS\n", ptr, ptr);
+ warn_or_seq_printf(seq, " %pS\n", ptr);
}
}
diff --git a/mm/list_lru.c b/mm/list_lru.c
index f93ada6a207b..7d69434c70e0 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -77,7 +77,6 @@ again:
spin_lock(&l->lock);
nr_items = READ_ONCE(l->nr_items);
if (likely(nr_items != LONG_MIN)) {
- WARN_ON(nr_items < 0);
rcu_read_unlock();
return l;
}
@@ -450,6 +449,7 @@ static void memcg_reparent_list_lru_one(struct list_lru *lru, int nid,
list_splice_init(&src->list, &dst->list);
if (src->nr_items) {
+ WARN_ON(src->nr_items < 0);
dst->nr_items += src->nr_items;
set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
}
diff --git a/mm/memfd.c b/mm/memfd.c
index c17c3ea701a1..35a370d75c9a 100644
--- a/mm/memfd.c
+++ b/mm/memfd.c
@@ -170,7 +170,7 @@ static int memfd_wait_for_pins(struct address_space *mapping)
return error;
}
-static unsigned int *memfd_file_seals_ptr(struct file *file)
+unsigned int *memfd_file_seals_ptr(struct file *file)
{
if (shmem_file(file))
return &SHMEM_I(file_inode(file))->seals;
diff --git a/mm/memory.c b/mm/memory.c
index 75c2dfd04f72..398c031be9ba 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4733,12 +4733,12 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf)
folio_throttle_swaprate(folio, gfp);
/*
* When a folio is not zeroed during allocation
- * (__GFP_ZERO not used), folio_zero_user() is used
- * to make sure that the page corresponding to the
- * faulting address will be hot in the cache after
- * zeroing.
+ * (__GFP_ZERO not used) or user folios require special
+ * handling, folio_zero_user() is used to make sure
+ * that the page corresponding to the faulting address
+ * will be hot in the cache after zeroing.
*/
- if (!alloc_zeroed())
+ if (user_alloc_needs_zeroing())
folio_zero_user(folio, vmf->address);
return folio;
}
@@ -6815,9 +6815,10 @@ static inline int process_huge_page(
return 0;
}
-static void clear_gigantic_page(struct folio *folio, unsigned long addr,
+static void clear_gigantic_page(struct folio *folio, unsigned long addr_hint,
unsigned int nr_pages)
{
+ unsigned long addr = ALIGN_DOWN(addr_hint, folio_size(folio));
int i;
might_sleep();
@@ -6851,13 +6852,14 @@ void folio_zero_user(struct folio *folio, unsigned long addr_hint)
}
static int copy_user_gigantic_page(struct folio *dst, struct folio *src,
- unsigned long addr,
+ unsigned long addr_hint,
struct vm_area_struct *vma,
unsigned int nr_pages)
{
- int i;
+ unsigned long addr = ALIGN_DOWN(addr_hint, folio_size(dst));
struct page *dst_page;
struct page *src_page;
+ int i;
for (i = 0; i < nr_pages; i++) {
dst_page = folio_page(dst, i);
diff --git a/mm/mmap.c b/mm/mmap.c
index d32b7e701058..aec208f90337 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -47,6 +47,7 @@
#include <linux/oom.h>
#include <linux/sched/mm.h>
#include <linux/ksm.h>
+#include <linux/memfd.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
@@ -368,6 +369,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
if (file) {
struct inode *inode = file_inode(file);
+ unsigned int seals = memfd_file_seals(file);
unsigned long flags_mask;
if (!file_mmap_ok(file, inode, pgoff, len))
@@ -408,6 +410,8 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
vm_flags |= VM_SHARED | VM_MAYSHARE;
if (!(file->f_mode & FMODE_WRITE))
vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
+ else if (is_readonly_sealed(seals, vm_flags))
+ vm_flags &= ~VM_MAYWRITE;
fallthrough;
case MAP_PRIVATE:
if (!(file->f_mode & FMODE_READ))
@@ -888,7 +892,7 @@ __get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
if (get_area) {
addr = get_area(file, addr, len, pgoff, flags);
- } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)
+ } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && !file
&& !addr /* no hint */
&& IS_ALIGNED(len, PMD_SIZE)) {
/* Ensures that larger anonymous mappings are THP aligned. */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1cb4b8c8886d..cae7b93864c2 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1238,13 +1238,15 @@ static void split_large_buddy(struct zone *zone, struct page *page,
if (order > pageblock_order)
order = pageblock_order;
- while (pfn != end) {
+ do {
int mt = get_pfnblock_migratetype(page, pfn);
__free_one_page(page, pfn, zone, order, mt, fpi);
pfn += 1 << order;
+ if (pfn == end)
+ break;
page = pfn_to_page(pfn);
- }
+ } while (1);
}
static void free_one_page(struct zone *zone, struct page *page,
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index 5297dcc38c37..5a882f2b10f9 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -279,7 +279,7 @@ static unsigned long pmdp_get_lockless_start(void) { return 0; }
static void pmdp_get_lockless_end(unsigned long irqflags) { }
#endif
-pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp)
+pte_t *___pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp)
{
unsigned long irqflags;
pmd_t pmdval;
diff --git a/mm/readahead.c b/mm/readahead.c
index ea650b8b02fb..e151f4b13ca4 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -646,7 +646,11 @@ void page_cache_async_ra(struct readahead_control *ractl,
1UL << order);
if (index == expected) {
ra->start += ra->size;
- ra->size = get_next_ra_size(ra, max_pages);
+ /*
+ * In the case of MADV_HUGEPAGE, the actual size might exceed
+ * the readahead window.
+ */
+ ra->size = max(ra->size, get_next_ra_size(ra, max_pages));
ra->async_size = ra->size;
goto readit;
}
diff --git a/mm/shmem.c b/mm/shmem.c
index ccb9629a0f70..ac58d4fb2e6f 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -787,6 +787,14 @@ static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+static void shmem_update_stats(struct folio *folio, int nr_pages)
+{
+ if (folio_test_pmd_mappable(folio))
+ __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr_pages);
+ __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr_pages);
+ __lruvec_stat_mod_folio(folio, NR_SHMEM, nr_pages);
+}
+
/*
* Somewhat like filemap_add_folio, but error if expected item has gone.
*/
@@ -821,10 +829,7 @@ static int shmem_add_to_page_cache(struct folio *folio,
xas_store(&xas, folio);
if (xas_error(&xas))
goto unlock;
- if (folio_test_pmd_mappable(folio))
- __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr);
- __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
- __lruvec_stat_mod_folio(folio, NR_SHMEM, nr);
+ shmem_update_stats(folio, nr);
mapping->nrpages += nr;
unlock:
xas_unlock_irq(&xas);
@@ -852,8 +857,7 @@ static void shmem_delete_from_page_cache(struct folio *folio, void *radswap)
error = shmem_replace_entry(mapping, folio->index, folio, radswap);
folio->mapping = NULL;
mapping->nrpages -= nr;
- __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
- __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
+ shmem_update_stats(folio, -nr);
xa_unlock_irq(&mapping->i_pages);
folio_put_refs(folio, nr);
BUG_ON(error);
@@ -1531,7 +1535,7 @@ try_split:
!shmem_falloc->waitq &&
index >= shmem_falloc->start &&
index < shmem_falloc->next)
- shmem_falloc->nr_unswapped++;
+ shmem_falloc->nr_unswapped += nr_pages;
else
shmem_falloc = NULL;
spin_unlock(&inode->i_lock);
@@ -1685,6 +1689,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
unsigned long mask = READ_ONCE(huge_shmem_orders_always);
unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size);
unsigned long vm_flags = vma ? vma->vm_flags : 0;
+ pgoff_t aligned_index;
bool global_huge;
loff_t i_size;
int order;
@@ -1719,9 +1724,9 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
/* Allow mTHP that will be fully within i_size. */
order = highest_order(within_size_orders);
while (within_size_orders) {
- index = round_up(index + 1, order);
+ aligned_index = round_up(index + 1, 1 << order);
i_size = round_up(i_size_read(inode), PAGE_SIZE);
- if (i_size >> PAGE_SHIFT >= index) {
+ if (i_size >> PAGE_SHIFT >= aligned_index) {
mask |= within_size_orders;
break;
}
@@ -1969,10 +1974,8 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
}
if (!error) {
mem_cgroup_replace_folio(old, new);
- __lruvec_stat_mod_folio(new, NR_FILE_PAGES, nr_pages);
- __lruvec_stat_mod_folio(new, NR_SHMEM, nr_pages);
- __lruvec_stat_mod_folio(old, NR_FILE_PAGES, -nr_pages);
- __lruvec_stat_mod_folio(old, NR_SHMEM, -nr_pages);
+ shmem_update_stats(new, nr_pages);
+ shmem_update_stats(old, -nr_pages);
}
xa_unlock_irq(&swap_mapping->i_pages);
diff --git a/mm/util.c b/mm/util.c
index c1c3b06ab4f9..60aa40f612b8 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -297,12 +297,7 @@ void *memdup_user_nul(const void __user *src, size_t len)
{
char *p;
- /*
- * Always use GFP_KERNEL, since copy_from_user() can sleep and
- * cause pagefault, which makes it pointless to use GFP_NOFS
- * or GFP_ATOMIC.
- */
- p = kmalloc_track_caller(len + 1, GFP_KERNEL);
+ p = kmem_buckets_alloc_track_caller(user_buckets, len + 1, GFP_USER | __GFP_NOWARN);
if (!p)
return ERR_PTR(-ENOMEM);
diff --git a/mm/vma.c b/mm/vma.c
index 8e31b7e25aeb..bb2119e5a0d0 100644
--- a/mm/vma.c
+++ b/mm/vma.c
@@ -2460,10 +2460,13 @@ unsigned long __mmap_region(struct file *file, unsigned long addr,
/* If flags changed, we might be able to merge, so try again. */
if (map.retry_merge) {
+ struct vm_area_struct *merged;
VMG_MMAP_STATE(vmg, &map, vma);
vma_iter_config(map.vmi, map.addr, map.end);
- vma_merge_existing_range(&vmg);
+ merged = vma_merge_existing_range(&vmg);
+ if (merged)
+ vma = merged;
}
__mmap_complete(&map, vma);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index f009b21705c1..5c88d0e90c20 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -3374,7 +3374,8 @@ void vfree(const void *addr)
struct page *page = vm->pages[i];
BUG_ON(!page);
- mod_memcg_page_state(page, MEMCG_VMALLOC, -1);
+ if (!(vm->flags & VM_MAP_PUT_PAGES))
+ mod_memcg_page_state(page, MEMCG_VMALLOC, -1);
/*
* High-order allocs for huge vmallocs are split, so
* can be freed as an array of order-0 allocations
@@ -3382,7 +3383,8 @@ void vfree(const void *addr)
__free_page(page);
cond_resched();
}
- atomic_long_sub(vm->nr_pages, &nr_vmalloc_pages);
+ if (!(vm->flags & VM_MAP_PUT_PAGES))
+ atomic_long_sub(vm->nr_pages, &nr_vmalloc_pages);
kvfree(vm->pages);
kfree(vm);
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 76378bc257e3..9a859b7d18d7 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -374,7 +374,14 @@ unsigned long zone_reclaimable_pages(struct zone *zone)
if (can_reclaim_anon_pages(NULL, zone_to_nid(zone), NULL))
nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) +
zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON);
-
+ /*
+ * If there are no reclaimable file-backed or anonymous pages,
+ * ensure zones with sufficient free pages are not skipped.
+ * This prevents zones like DMA32 from being ignored in reclaim
+ * scenarios where they can still help alleviate memory pressure.
+ */
+ if (nr == 0)
+ nr = zone_page_state_snapshot(zone, NR_FREE_PAGES);
return nr;
}
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 4d016314a56c..0889b75cef14 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -2148,13 +2148,14 @@ static int vmstat_cpu_online(unsigned int cpu)
if (!node_state(cpu_to_node(cpu), N_CPU)) {
node_set_state(cpu_to_node(cpu), N_CPU);
}
+ enable_delayed_work(&per_cpu(vmstat_work, cpu));
return 0;
}
static int vmstat_cpu_down_prep(unsigned int cpu)
{
- cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
+ disable_delayed_work_sync(&per_cpu(vmstat_work, cpu));
return 0;
}
diff --git a/mm/zswap.c b/mm/zswap.c
index f6316b66fb23..5a27af8d86ea 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -880,6 +880,18 @@ static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
return 0;
}
+/* Prevent CPU hotplug from freeing up the per-CPU acomp_ctx resources */
+static struct crypto_acomp_ctx *acomp_ctx_get_cpu(struct crypto_acomp_ctx __percpu *acomp_ctx)
+{
+ cpus_read_lock();
+ return raw_cpu_ptr(acomp_ctx);
+}
+
+static void acomp_ctx_put_cpu(void)
+{
+ cpus_read_unlock();
+}
+
static bool zswap_compress(struct page *page, struct zswap_entry *entry,
struct zswap_pool *pool)
{
@@ -893,8 +905,7 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry,
gfp_t gfp;
u8 *dst;
- acomp_ctx = raw_cpu_ptr(pool->acomp_ctx);
-
+ acomp_ctx = acomp_ctx_get_cpu(pool->acomp_ctx);
mutex_lock(&acomp_ctx->mutex);
dst = acomp_ctx->buffer;
@@ -950,6 +961,7 @@ unlock:
zswap_reject_alloc_fail++;
mutex_unlock(&acomp_ctx->mutex);
+ acomp_ctx_put_cpu();
return comp_ret == 0 && alloc_ret == 0;
}
@@ -960,7 +972,7 @@ static void zswap_decompress(struct zswap_entry *entry, struct folio *folio)
struct crypto_acomp_ctx *acomp_ctx;
u8 *src;
- acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
+ acomp_ctx = acomp_ctx_get_cpu(entry->pool->acomp_ctx);
mutex_lock(&acomp_ctx->mutex);
src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO);
@@ -990,6 +1002,7 @@ static void zswap_decompress(struct zswap_entry *entry, struct folio *folio)
if (src != acomp_ctx->buffer)
zpool_unmap_handle(zpool, entry->handle);
+ acomp_ctx_put_cpu();
}
/*********************************
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 9b1168eb77ab..b24afec24138 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1173,6 +1173,8 @@ EXPORT_SYMBOL(ceph_osdc_new_request);
int __ceph_alloc_sparse_ext_map(struct ceph_osd_req_op *op, int cnt)
{
+ WARN_ON(op->op != CEPH_OSD_OP_SPARSE_READ);
+
op->extent.sparse_ext_cnt = cnt;
op->extent.sparse_ext = kmalloc_array(cnt,
sizeof(*op->extent.sparse_ext),
diff --git a/net/core/dev.c b/net/core/dev.c
index 45a8c3dd4a64..faa23042df38 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3642,8 +3642,10 @@ int skb_csum_hwoffload_help(struct sk_buff *skb,
if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
if (vlan_get_protocol(skb) == htons(ETH_P_IPV6) &&
- skb_network_header_len(skb) != sizeof(struct ipv6hdr))
+ skb_network_header_len(skb) != sizeof(struct ipv6hdr) &&
+ !ipv6_has_hopopt_jumbo(skb))
goto sw_checksum;
+
switch (skb->csum_offset) {
case offsetof(struct tcphdr, check):
case offsetof(struct udphdr, check):
diff --git a/net/core/filter.c b/net/core/filter.c
index 21131ec25f24..834614071727 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -3734,13 +3734,22 @@ static const struct bpf_func_proto bpf_skb_adjust_room_proto = {
static u32 __bpf_skb_min_len(const struct sk_buff *skb)
{
- u32 min_len = skb_network_offset(skb);
+ int offset = skb_network_offset(skb);
+ u32 min_len = 0;
- if (skb_transport_header_was_set(skb))
- min_len = skb_transport_offset(skb);
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- min_len = skb_checksum_start_offset(skb) +
- skb->csum_offset + sizeof(__sum16);
+ if (offset > 0)
+ min_len = offset;
+ if (skb_transport_header_was_set(skb)) {
+ offset = skb_transport_offset(skb);
+ if (offset > 0)
+ min_len = offset;
+ }
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ offset = skb_checksum_start_offset(skb) +
+ skb->csum_offset + sizeof(__sum16);
+ if (offset > 0)
+ min_len = offset;
+ }
return min_len;
}
diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
index 9527dd46e4dc..b0772d135efb 100644
--- a/net/core/netdev-genl.c
+++ b/net/core/netdev-genl.c
@@ -246,8 +246,12 @@ int netdev_nl_napi_get_doit(struct sk_buff *skb, struct genl_info *info)
rcu_read_unlock();
rtnl_unlock();
- if (err)
+ if (err) {
+ goto err_free_msg;
+ } else if (!rsp->len) {
+ err = -ENOENT;
goto err_free_msg;
+ }
return genlmsg_reply(rsp, info);
@@ -430,10 +434,10 @@ static int
netdev_nl_queue_fill(struct sk_buff *rsp, struct net_device *netdev, u32 q_idx,
u32 q_type, const struct genl_info *info)
{
- int err = 0;
+ int err;
if (!(netdev->flags & IFF_UP))
- return err;
+ return -ENOENT;
err = netdev_nl_queue_validate(netdev, q_idx, q_type);
if (err)
@@ -488,24 +492,21 @@ netdev_nl_queue_dump_one(struct net_device *netdev, struct sk_buff *rsp,
struct netdev_nl_dump_ctx *ctx)
{
int err = 0;
- int i;
if (!(netdev->flags & IFF_UP))
return err;
- for (i = ctx->rxq_idx; i < netdev->real_num_rx_queues;) {
- err = netdev_nl_queue_fill_one(rsp, netdev, i,
+ for (; ctx->rxq_idx < netdev->real_num_rx_queues; ctx->rxq_idx++) {
+ err = netdev_nl_queue_fill_one(rsp, netdev, ctx->rxq_idx,
NETDEV_QUEUE_TYPE_RX, info);
if (err)
return err;
- ctx->rxq_idx = i++;
}
- for (i = ctx->txq_idx; i < netdev->real_num_tx_queues;) {
- err = netdev_nl_queue_fill_one(rsp, netdev, i,
+ for (; ctx->txq_idx < netdev->real_num_tx_queues; ctx->txq_idx++) {
+ err = netdev_nl_queue_fill_one(rsp, netdev, ctx->txq_idx,
NETDEV_QUEUE_TYPE_TX, info);
if (err)
return err;
- ctx->txq_idx = i++;
}
return err;
@@ -671,7 +672,7 @@ netdev_nl_stats_by_queue(struct net_device *netdev, struct sk_buff *rsp,
i, info);
if (err)
return err;
- ctx->rxq_idx = i++;
+ ctx->rxq_idx = ++i;
}
i = ctx->txq_idx;
while (ops->get_queue_stats_tx && i < netdev->real_num_tx_queues) {
@@ -679,7 +680,7 @@ netdev_nl_stats_by_queue(struct net_device *netdev, struct sk_buff *rsp,
i, info);
if (err)
return err;
- ctx->txq_idx = i++;
+ ctx->txq_idx = ++i;
}
ctx->rxq_idx = 0;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index ebcfc2debf1a..d9f959c619d9 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -3819,6 +3819,7 @@ out_unregister:
}
static struct net *rtnl_get_peer_net(const struct rtnl_link_ops *ops,
+ struct nlattr *tbp[],
struct nlattr *data[],
struct netlink_ext_ack *extack)
{
@@ -3826,7 +3827,7 @@ static struct net *rtnl_get_peer_net(const struct rtnl_link_ops *ops,
int err;
if (!data || !data[ops->peer_type])
- return NULL;
+ return rtnl_link_get_net_ifla(tbp);
err = rtnl_nla_parse_ifinfomsg(tb, data[ops->peer_type], extack);
if (err < 0)
@@ -3971,7 +3972,7 @@ static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
}
if (ops->peer_type) {
- peer_net = rtnl_get_peer_net(ops, data, extack);
+ peer_net = rtnl_get_peer_net(ops, tb, data, extack);
if (IS_ERR(peer_net)) {
ret = PTR_ERR(peer_net);
goto put_ops;
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index e90fbab703b2..61f3f3d4e528 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -369,8 +369,8 @@ int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
struct sk_msg *msg, u32 bytes)
{
int ret = -ENOSPC, i = msg->sg.curr;
+ u32 copy, buf_size, copied = 0;
struct scatterlist *sge;
- u32 copy, buf_size;
void *to;
do {
@@ -397,6 +397,7 @@ int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
goto out;
}
bytes -= copy;
+ copied += copy;
if (!bytes)
break;
msg->sg.copybreak = 0;
@@ -404,7 +405,7 @@ int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
} while (i != msg->sg.end);
out:
msg->sg.curr = i;
- return ret;
+ return (ret < 0) ? ret : copied;
}
EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
@@ -445,8 +446,10 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
if (likely(!peek)) {
sge->offset += copy;
sge->length -= copy;
- if (!msg_rx->skb)
+ if (!msg_rx->skb) {
sk_mem_uncharge(sk, copy);
+ atomic_sub(copy, &sk->sk_rmem_alloc);
+ }
msg_rx->sg.size -= copy;
if (!sge->length) {
@@ -772,6 +775,8 @@ static void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
list_del(&msg->list);
+ if (!msg->skb)
+ atomic_sub(msg->sg.size, &psock->sk->sk_rmem_alloc);
sk_msg_free(psock->sk, msg);
kfree(msg);
}
diff --git a/net/core/sock.c b/net/core/sock.c
index 74729d20cd00..be84885f9290 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1295,7 +1295,10 @@ int sk_setsockopt(struct sock *sk, int level, int optname,
sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
break;
case SO_REUSEPORT:
- sk->sk_reuseport = valbool;
+ if (valbool && !sk_is_inet(sk))
+ ret = -EOPNOTSUPP;
+ else
+ sk->sk_reuseport = valbool;
break;
case SO_DONTROUTE:
sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
diff --git a/net/dsa/tag.h b/net/dsa/tag.h
index d5707870906b..5d80ddad4ff6 100644
--- a/net/dsa/tag.h
+++ b/net/dsa/tag.h
@@ -138,9 +138,10 @@ static inline void dsa_software_untag_vlan_unaware_bridge(struct sk_buff *skb,
* dsa_software_vlan_untag: Software VLAN untagging in DSA receive path
* @skb: Pointer to socket buffer (packet)
*
- * Receive path method for switches which cannot avoid tagging all packets
- * towards the CPU port. Called when ds->untag_bridge_pvid (legacy) or
- * ds->untag_vlan_aware_bridge_pvid is set to true.
+ * Receive path method for switches which send some packets as VLAN-tagged
+ * towards the CPU port (generally from VLAN-aware bridge ports) even when the
+ * packet was not tagged on the wire. Called when ds->untag_bridge_pvid
+ * (legacy) or ds->untag_vlan_aware_bridge_pvid is set to true.
*
* As a side effect of this method, any VLAN tag from the skb head is moved
* to hwaccel.
@@ -149,14 +150,19 @@ static inline struct sk_buff *dsa_software_vlan_untag(struct sk_buff *skb)
{
struct dsa_port *dp = dsa_user_to_port(skb->dev);
struct net_device *br = dsa_port_bridge_dev_get(dp);
- u16 vid;
+ u16 vid, proto;
+ int err;
/* software untagging for standalone ports not yet necessary */
if (!br)
return skb;
+ err = br_vlan_get_proto(br, &proto);
+ if (err)
+ return skb;
+
/* Move VLAN tag from data to hwaccel */
- if (!skb_vlan_tag_present(skb)) {
+ if (!skb_vlan_tag_present(skb) && skb->protocol == htons(proto)) {
skb = skb_vlan_untag(skb);
if (!skb)
return NULL;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 25505f9b724c..09b73acf037a 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -294,7 +294,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
ip_tunnel_init_flow(&fl4, iph->protocol, iph->daddr,
iph->saddr, tunnel->parms.o_key,
- iph->tos & INET_DSCP_MASK, dev_net(dev),
+ iph->tos & INET_DSCP_MASK, tunnel->net,
tunnel->parms.link, tunnel->fwmark, 0, 0);
rt = ip_route_output_key(tunnel->net, &fl4);
@@ -611,7 +611,7 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
}
ip_tunnel_init_flow(&fl4, proto, key->u.ipv4.dst, key->u.ipv4.src,
tunnel_id_to_key32(key->tun_id),
- tos & INET_DSCP_MASK, dev_net(dev), 0, skb->mark,
+ tos & INET_DSCP_MASK, tunnel->net, 0, skb->mark,
skb_get_hash(skb), key->flow_flags);
if (!tunnel_hlen)
@@ -774,7 +774,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
ip_tunnel_init_flow(&fl4, protocol, dst, tnl_params->saddr,
tunnel->parms.o_key, tos & INET_DSCP_MASK,
- dev_net(dev), READ_ONCE(tunnel->parms.link),
+ tunnel->net, READ_ONCE(tunnel->parms.link),
tunnel->fwmark, skb_get_hash(skb), 0);
if (ip_tunnel_encap(skb, &tunnel->encap, &protocol, &fl4) < 0)
diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
index 99cef92e6290..47f65b1b70ca 100644
--- a/net/ipv4/tcp_bpf.c
+++ b/net/ipv4/tcp_bpf.c
@@ -49,13 +49,14 @@ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
sge = sk_msg_elem(msg, i);
size = (apply && apply_bytes < sge->length) ?
apply_bytes : sge->length;
- if (!sk_wmem_schedule(sk, size)) {
+ if (!__sk_rmem_schedule(sk, size, false)) {
if (!copied)
ret = -ENOMEM;
break;
}
sk_mem_charge(sk, size);
+ atomic_add(size, &sk->sk_rmem_alloc);
sk_msg_xfer(tmp, msg, i, size);
copied += size;
if (sge->length)
@@ -74,7 +75,8 @@ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
if (!ret) {
msg->sg.start = i;
- sk_psock_queue_msg(psock, tmp);
+ if (!sk_psock_queue_msg(psock, tmp))
+ atomic_sub(copied, &sk->sk_rmem_alloc);
sk_psock_data_ready(sk, psock);
} else {
sk_msg_free(sk, tmp);
@@ -493,7 +495,7 @@ more_data:
static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
{
struct sk_msg tmp, *msg_tx = NULL;
- int copied = 0, err = 0;
+ int copied = 0, err = 0, ret = 0;
struct sk_psock *psock;
long timeo;
int flags;
@@ -536,14 +538,14 @@ static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
copy = msg_tx->sg.size - osize;
}
- err = sk_msg_memcopy_from_iter(sk, &msg->msg_iter, msg_tx,
+ ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter, msg_tx,
copy);
- if (err < 0) {
+ if (ret < 0) {
sk_msg_trim(sk, msg_tx, osize);
goto out_err;
}
- copied += copy;
+ copied += ret;
if (psock->cork_bytes) {
if (size > psock->cork_bytes)
psock->cork_bytes = 0;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 5bdf13ac26ef..4811727b8a02 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -7328,6 +7328,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
if (unlikely(!inet_csk_reqsk_queue_hash_add(sk, req,
req->timeout))) {
reqsk_free(req);
+ dst_release(dst);
return 0;
}
diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
index 7646e401c630..1d41b2ab4884 100644
--- a/net/ipv6/ila/ila_xlat.c
+++ b/net/ipv6/ila/ila_xlat.c
@@ -195,6 +195,8 @@ static const struct nf_hook_ops ila_nf_hook_ops[] = {
},
};
+static DEFINE_MUTEX(ila_mutex);
+
static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp)
{
struct ila_net *ilan = net_generic(net, ila_net_id);
@@ -202,16 +204,20 @@ static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp)
spinlock_t *lock = ila_get_lock(ilan, xp->ip.locator_match);
int err = 0, order;
- if (!ilan->xlat.hooks_registered) {
+ if (!READ_ONCE(ilan->xlat.hooks_registered)) {
/* We defer registering net hooks in the namespace until the
* first mapping is added.
*/
- err = nf_register_net_hooks(net, ila_nf_hook_ops,
- ARRAY_SIZE(ila_nf_hook_ops));
+ mutex_lock(&ila_mutex);
+ if (!ilan->xlat.hooks_registered) {
+ err = nf_register_net_hooks(net, ila_nf_hook_ops,
+ ARRAY_SIZE(ila_nf_hook_ops));
+ if (!err)
+ WRITE_ONCE(ilan->xlat.hooks_registered, true);
+ }
+ mutex_unlock(&ila_mutex);
if (err)
return err;
-
- ilan->xlat.hooks_registered = true;
}
ila = kzalloc(sizeof(*ila), GFP_KERNEL);
diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c
index 51bccfb00a9c..61b0159b2fbe 100644
--- a/net/llc/llc_input.c
+++ b/net/llc/llc_input.c
@@ -124,8 +124,8 @@ static inline int llc_fixup_skb(struct sk_buff *skb)
if (unlikely(!pskb_may_pull(skb, llc_len)))
return 0;
- skb->transport_header += llc_len;
skb_pull(skb, llc_len);
+ skb_reset_transport_header(skb);
if (skb->protocol == htons(ETH_P_802_2)) {
__be16 pdulen;
s32 data_size;
diff --git a/net/mctp/route.c b/net/mctp/route.c
index 597e9cf5aa64..3f2bd65ff5e3 100644
--- a/net/mctp/route.c
+++ b/net/mctp/route.c
@@ -374,8 +374,13 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
msk = NULL;
rc = -EINVAL;
- /* we may be receiving a locally-routed packet; drop source sk
- * accounting
+ /* We may be receiving a locally-routed packet; drop source sk
+ * accounting.
+ *
+ * From here, we will either queue the skb - either to a frag_queue, or
+ * to a receiving socket. When that succeeds, we clear the skb pointer;
+ * a non-NULL skb on exit will be otherwise unowned, and hence
+ * kfree_skb()-ed.
*/
skb_orphan(skb);
@@ -434,7 +439,9 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
* pending key.
*/
if (flags & MCTP_HDR_FLAG_EOM) {
- sock_queue_rcv_skb(&msk->sk, skb);
+ rc = sock_queue_rcv_skb(&msk->sk, skb);
+ if (!rc)
+ skb = NULL;
if (key) {
/* we've hit a pending reassembly; not much we
* can do but drop it
@@ -443,7 +450,6 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
MCTP_TRACE_KEY_REPLIED);
key = NULL;
}
- rc = 0;
goto out_unlock;
}
@@ -470,8 +476,10 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
* this function.
*/
rc = mctp_key_add(key, msk);
- if (!rc)
+ if (!rc) {
trace_mctp_key_acquire(key);
+ skb = NULL;
+ }
/* we don't need to release key->lock on exit, so
* clean up here and suppress the unlock via
@@ -489,6 +497,8 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
key = NULL;
} else {
rc = mctp_frag_queue(key, skb);
+ if (!rc)
+ skb = NULL;
}
}
@@ -503,12 +513,19 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
else
rc = mctp_frag_queue(key, skb);
+ if (rc)
+ goto out_unlock;
+
+ /* we've queued; the queue owns the skb now */
+ skb = NULL;
+
/* end of message? deliver to socket, and we're done with
* the reassembly/response key
*/
- if (!rc && flags & MCTP_HDR_FLAG_EOM) {
- sock_queue_rcv_skb(key->sk, key->reasm_head);
- key->reasm_head = NULL;
+ if (flags & MCTP_HDR_FLAG_EOM) {
+ rc = sock_queue_rcv_skb(key->sk, key->reasm_head);
+ if (!rc)
+ key->reasm_head = NULL;
__mctp_key_done_in(key, net, f, MCTP_TRACE_KEY_REPLIED);
key = NULL;
}
@@ -527,8 +544,7 @@ out_unlock:
if (any_key)
mctp_key_unref(any_key);
out:
- if (rc)
- kfree_skb(skb);
+ kfree_skb(skb);
return rc;
}
diff --git a/net/mctp/test/route-test.c b/net/mctp/test/route-test.c
index 8551dab1d1e6..17165b86ce22 100644
--- a/net/mctp/test/route-test.c
+++ b/net/mctp/test/route-test.c
@@ -837,6 +837,90 @@ static void mctp_test_route_input_multiple_nets_key(struct kunit *test)
mctp_test_route_input_multiple_nets_key_fini(test, &t2);
}
+/* Input route to socket, using a single-packet message, where sock delivery
+ * fails. Ensure we're handling the failure appropriately.
+ */
+static void mctp_test_route_input_sk_fail_single(struct kunit *test)
+{
+ const struct mctp_hdr hdr = RX_HDR(1, 10, 8, FL_S | FL_E | FL_TO);
+ struct mctp_test_route *rt;
+ struct mctp_test_dev *dev;
+ struct socket *sock;
+ struct sk_buff *skb;
+ int rc;
+
+ __mctp_route_test_init(test, &dev, &rt, &sock, MCTP_NET_ANY);
+
+ /* No rcvbuf space, so delivery should fail. __sock_set_rcvbuf will
+ * clamp the minimum to SOCK_MIN_RCVBUF, so we open-code this.
+ */
+ lock_sock(sock->sk);
+ WRITE_ONCE(sock->sk->sk_rcvbuf, 0);
+ release_sock(sock->sk);
+
+ skb = mctp_test_create_skb(&hdr, 10);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb);
+ skb_get(skb);
+
+ mctp_test_skb_set_dev(skb, dev);
+
+ /* do route input, which should fail */
+ rc = mctp_route_input(&rt->rt, skb);
+ KUNIT_EXPECT_NE(test, rc, 0);
+
+ /* we should hold the only reference to skb */
+ KUNIT_EXPECT_EQ(test, refcount_read(&skb->users), 1);
+ kfree_skb(skb);
+
+ __mctp_route_test_fini(test, dev, rt, sock);
+}
+
+/* Input route to socket, using a fragmented message, where sock delivery fails.
+ */
+static void mctp_test_route_input_sk_fail_frag(struct kunit *test)
+{
+ const struct mctp_hdr hdrs[2] = { RX_FRAG(FL_S, 0), RX_FRAG(FL_E, 1) };
+ struct mctp_test_route *rt;
+ struct mctp_test_dev *dev;
+ struct sk_buff *skbs[2];
+ struct socket *sock;
+ unsigned int i;
+ int rc;
+
+ __mctp_route_test_init(test, &dev, &rt, &sock, MCTP_NET_ANY);
+
+ lock_sock(sock->sk);
+ WRITE_ONCE(sock->sk->sk_rcvbuf, 0);
+ release_sock(sock->sk);
+
+ for (i = 0; i < ARRAY_SIZE(skbs); i++) {
+ skbs[i] = mctp_test_create_skb(&hdrs[i], 10);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skbs[i]);
+ skb_get(skbs[i]);
+
+ mctp_test_skb_set_dev(skbs[i], dev);
+ }
+
+ /* first route input should succeed, we're only queueing to the
+ * frag list
+ */
+ rc = mctp_route_input(&rt->rt, skbs[0]);
+ KUNIT_EXPECT_EQ(test, rc, 0);
+
+ /* final route input should fail to deliver to the socket */
+ rc = mctp_route_input(&rt->rt, skbs[1]);
+ KUNIT_EXPECT_NE(test, rc, 0);
+
+ /* we should hold the only reference to both skbs */
+ KUNIT_EXPECT_EQ(test, refcount_read(&skbs[0]->users), 1);
+ kfree_skb(skbs[0]);
+
+ KUNIT_EXPECT_EQ(test, refcount_read(&skbs[1]->users), 1);
+ kfree_skb(skbs[1]);
+
+ __mctp_route_test_fini(test, dev, rt, sock);
+}
+
#if IS_ENABLED(CONFIG_MCTP_FLOWS)
static void mctp_test_flow_init(struct kunit *test,
@@ -1053,6 +1137,8 @@ static struct kunit_case mctp_test_cases[] = {
mctp_route_input_sk_reasm_gen_params),
KUNIT_CASE_PARAM(mctp_test_route_input_sk_keys,
mctp_route_input_sk_keys_gen_params),
+ KUNIT_CASE(mctp_test_route_input_sk_fail_single),
+ KUNIT_CASE(mctp_test_route_input_sk_fail_frag),
KUNIT_CASE(mctp_test_route_input_multiple_nets_bind),
KUNIT_CASE(mctp_test_route_input_multiple_nets_key),
KUNIT_CASE(mctp_test_packet_flow),
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index 1603b3702e22..a62bc874bf1e 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -667,8 +667,15 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff *
&echo, &drop_other_suboptions))
return false;
+ /*
+ * Later on, mptcp_write_options() will enforce mutually exclusion with
+ * DSS, bail out if such option is set and we can't drop it.
+ */
if (drop_other_suboptions)
remaining += opt_size;
+ else if (opts->suboptions & OPTION_MPTCP_DSS)
+ return false;
+
len = mptcp_add_addr_len(opts->addr.family, echo, !!opts->addr.port);
if (remaining < len)
return false;
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 08a72242428c..1b2e7cbb577f 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -136,6 +136,7 @@ static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to,
int delta;
if (MPTCP_SKB_CB(from)->offset ||
+ ((to->len + from->len) > (sk->sk_rcvbuf >> 3)) ||
!skb_try_coalesce(to, from, &fragstolen, &delta))
return false;
@@ -528,13 +529,13 @@ static void mptcp_send_ack(struct mptcp_sock *msk)
mptcp_subflow_send_ack(mptcp_subflow_tcp_sock(subflow));
}
-static void mptcp_subflow_cleanup_rbuf(struct sock *ssk)
+static void mptcp_subflow_cleanup_rbuf(struct sock *ssk, int copied)
{
bool slow;
slow = lock_sock_fast(ssk);
if (tcp_can_send_ack(ssk))
- tcp_cleanup_rbuf(ssk, 1);
+ tcp_cleanup_rbuf(ssk, copied);
unlock_sock_fast(ssk, slow);
}
@@ -551,7 +552,7 @@ static bool mptcp_subflow_could_cleanup(const struct sock *ssk, bool rx_empty)
(ICSK_ACK_PUSHED2 | ICSK_ACK_PUSHED)));
}
-static void mptcp_cleanup_rbuf(struct mptcp_sock *msk)
+static void mptcp_cleanup_rbuf(struct mptcp_sock *msk, int copied)
{
int old_space = READ_ONCE(msk->old_wspace);
struct mptcp_subflow_context *subflow;
@@ -559,14 +560,14 @@ static void mptcp_cleanup_rbuf(struct mptcp_sock *msk)
int space = __mptcp_space(sk);
bool cleanup, rx_empty;
- cleanup = (space > 0) && (space >= (old_space << 1));
- rx_empty = !__mptcp_rmem(sk);
+ cleanup = (space > 0) && (space >= (old_space << 1)) && copied;
+ rx_empty = !__mptcp_rmem(sk) && copied;
mptcp_for_each_subflow(msk, subflow) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
if (cleanup || mptcp_subflow_could_cleanup(ssk, rx_empty))
- mptcp_subflow_cleanup_rbuf(ssk);
+ mptcp_subflow_cleanup_rbuf(ssk, copied);
}
}
@@ -1939,6 +1940,8 @@ do_error:
goto out;
}
+static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied);
+
static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
struct msghdr *msg,
size_t len, int flags,
@@ -1992,6 +1995,7 @@ static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
break;
}
+ mptcp_rcv_space_adjust(msk, copied);
return copied;
}
@@ -2217,9 +2221,6 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
copied += bytes_read;
- /* be sure to advertise window change */
- mptcp_cleanup_rbuf(msk);
-
if (skb_queue_empty(&msk->receive_queue) && __mptcp_move_skbs(msk))
continue;
@@ -2268,7 +2269,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
}
pr_debug("block timeout %ld\n", timeo);
- mptcp_rcv_space_adjust(msk, copied);
+ mptcp_cleanup_rbuf(msk, copied);
err = sk_wait_data(sk, &timeo, NULL);
if (err < 0) {
err = copied ? : err;
@@ -2276,7 +2277,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
}
}
- mptcp_rcv_space_adjust(msk, copied);
+ mptcp_cleanup_rbuf(msk, copied);
out_err:
if (cmsg_flags && copied >= 0) {
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index bfae7066936b..db794fe1300e 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -611,6 +611,8 @@ init_list_set(struct net *net, struct ip_set *set, u32 size)
return true;
}
+static struct lock_class_key list_set_lockdep_key;
+
static int
list_set_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
u32 flags)
@@ -627,6 +629,7 @@ list_set_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
if (size < IP_SET_LIST_MIN_SIZE)
size = IP_SET_LIST_MIN_SIZE;
+ lockdep_set_class(&set->lock, &list_set_lockdep_key);
set->variant = &set_variant;
set->dsize = ip_set_elem_len(set, tb, sizeof(struct set_elem),
__alignof__(struct set_elem));
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 98d7dbe3d787..c0289f83f96d 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -1495,8 +1495,8 @@ int __init ip_vs_conn_init(void)
max_avail -= 2; /* ~4 in hash row */
max_avail -= 1; /* IPVS up to 1/2 of mem */
max_avail -= order_base_2(sizeof(struct ip_vs_conn));
- max = clamp(max, min, max_avail);
- ip_vs_conn_tab_bits = clamp_val(ip_vs_conn_tab_bits, min, max);
+ max = clamp(max_avail, min, max);
+ ip_vs_conn_tab_bits = clamp(ip_vs_conn_tab_bits, min, max);
ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits;
ip_vs_conn_tab_mask = ip_vs_conn_tab_size - 1;
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index 2b5e246b8d9a..b94cb2ffbaf8 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -754,6 +754,12 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
int ret;
struct sk_buff *skbn;
+ /*
+ * Reject malformed packets early. Check that it contains at least 2
+ * addresses and 1 byte more for Time-To-Live
+ */
+ if (skb->len < 2 * sizeof(ax25_address) + 1)
+ return 0;
nr_src = (ax25_address *)(skb->data + 0);
nr_dest = (ax25_address *)(skb->data + 7);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 886c0dd47b66..2d73769d67f4 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -538,10 +538,8 @@ static void *packet_current_frame(struct packet_sock *po,
return packet_lookup_frame(po, rb, rb->head, status);
}
-static u16 vlan_get_tci(struct sk_buff *skb, struct net_device *dev)
+static u16 vlan_get_tci(const struct sk_buff *skb, struct net_device *dev)
{
- u8 *skb_orig_data = skb->data;
- int skb_orig_len = skb->len;
struct vlan_hdr vhdr, *vh;
unsigned int header_len;
@@ -562,33 +560,21 @@ static u16 vlan_get_tci(struct sk_buff *skb, struct net_device *dev)
else
return 0;
- skb_push(skb, skb->data - skb_mac_header(skb));
- vh = skb_header_pointer(skb, header_len, sizeof(vhdr), &vhdr);
- if (skb_orig_data != skb->data) {
- skb->data = skb_orig_data;
- skb->len = skb_orig_len;
- }
+ vh = skb_header_pointer(skb, skb_mac_offset(skb) + header_len,
+ sizeof(vhdr), &vhdr);
if (unlikely(!vh))
return 0;
return ntohs(vh->h_vlan_TCI);
}
-static __be16 vlan_get_protocol_dgram(struct sk_buff *skb)
+static __be16 vlan_get_protocol_dgram(const struct sk_buff *skb)
{
__be16 proto = skb->protocol;
- if (unlikely(eth_type_vlan(proto))) {
- u8 *skb_orig_data = skb->data;
- int skb_orig_len = skb->len;
-
- skb_push(skb, skb->data - skb_mac_header(skb));
- proto = __vlan_get_protocol(skb, proto, NULL);
- if (skb_orig_data != skb->data) {
- skb->data = skb_orig_data;
- skb->len = skb_orig_len;
- }
- }
+ if (unlikely(eth_type_vlan(proto)))
+ proto = __vlan_get_protocol_offset(skb, proto,
+ skb_mac_offset(skb), NULL);
return proto;
}
diff --git a/net/psample/psample.c b/net/psample/psample.c
index a0ddae8a65f9..25f92ba0840c 100644
--- a/net/psample/psample.c
+++ b/net/psample/psample.c
@@ -393,7 +393,9 @@ void psample_sample_packet(struct psample_group *group,
nla_total_size_64bit(sizeof(u64)) + /* timestamp */
nla_total_size(sizeof(u16)) + /* protocol */
(md->user_cookie_len ?
- nla_total_size(md->user_cookie_len) : 0); /* user cookie */
+ nla_total_size(md->user_cookie_len) : 0) + /* user cookie */
+ (md->rate_as_probability ?
+ nla_total_size(0) : 0); /* rate as probability */
#ifdef CONFIG_INET
tun_info = skb_tunnel_info(skb);
@@ -498,8 +500,9 @@ void psample_sample_packet(struct psample_group *group,
md->user_cookie))
goto error;
- if (md->rate_as_probability)
- nla_put_flag(nl_skb, PSAMPLE_ATTR_SAMPLE_PROBABILITY);
+ if (md->rate_as_probability &&
+ nla_put_flag(nl_skb, PSAMPLE_ATTR_SAMPLE_PROBABILITY))
+ goto error;
genlmsg_end(nl_skb, data);
genlmsg_multicast_netns(&psample_nl_family, group->net, nl_skb, 0,
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index c45c192b7878..0b0794f164cf 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -137,7 +137,8 @@ static struct sctp_association *sctp_association_init(
= 5 * asoc->rto_max;
asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
- asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ;
+ asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
+ (unsigned long)sp->autoclose * HZ;
/* Initializes the timers */
for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 9e6c69d18581..6cc7b846cff1 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -2032,6 +2032,8 @@ static int smc_listen_prfx_check(struct smc_sock *new_smc,
if (pclc->hdr.typev1 == SMC_TYPE_N)
return 0;
pclc_prfx = smc_clc_proposal_get_prefix(pclc);
+ if (!pclc_prfx)
+ return -EPROTO;
if (smc_clc_prfx_match(newclcsock, pclc_prfx))
return SMC_CLC_DECL_DIFFPREFIX;
@@ -2145,6 +2147,8 @@ static void smc_find_ism_v2_device_serv(struct smc_sock *new_smc,
pclc_smcd = smc_get_clc_msg_smcd(pclc);
smc_v2_ext = smc_get_clc_v2_ext(pclc);
smcd_v2_ext = smc_get_clc_smcd_v2_ext(smc_v2_ext);
+ if (!pclc_smcd || !smc_v2_ext || !smcd_v2_ext)
+ goto not_found;
mutex_lock(&smcd_dev_list.mutex);
if (pclc_smcd->ism.chid) {
@@ -2221,7 +2225,9 @@ static void smc_find_ism_v1_device_serv(struct smc_sock *new_smc,
int rc = 0;
/* check if ISM V1 is available */
- if (!(ini->smcd_version & SMC_V1) || !smcd_indicated(ini->smc_type_v1))
+ if (!(ini->smcd_version & SMC_V1) ||
+ !smcd_indicated(ini->smc_type_v1) ||
+ !pclc_smcd)
goto not_found;
ini->is_smcd = true; /* prepare ISM check */
ini->ism_peer_gid[0].gid = ntohll(pclc_smcd->ism.gid);
@@ -2272,7 +2278,8 @@ static void smc_find_rdma_v2_device_serv(struct smc_sock *new_smc,
goto not_found;
smc_v2_ext = smc_get_clc_v2_ext(pclc);
- if (!smc_clc_match_eid(ini->negotiated_eid, smc_v2_ext, NULL, NULL))
+ if (!smc_v2_ext ||
+ !smc_clc_match_eid(ini->negotiated_eid, smc_v2_ext, NULL, NULL))
goto not_found;
/* prepare RDMA check */
@@ -2881,6 +2888,13 @@ __poll_t smc_poll(struct file *file, struct socket *sock,
} else {
sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+
+ if (sk->sk_state != SMC_INIT) {
+ /* Race breaker the same way as tcp_poll(). */
+ smp_mb__after_atomic();
+ if (atomic_read(&smc->conn.sndbuf_space))
+ mask |= EPOLLOUT | EPOLLWRNORM;
+ }
}
if (atomic_read(&smc->conn.bytes_to_rcv))
mask |= EPOLLIN | EPOLLRDNORM;
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index 33fa787c28eb..521f5df80e10 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -352,8 +352,11 @@ static bool smc_clc_msg_prop_valid(struct smc_clc_msg_proposal *pclc)
struct smc_clc_msg_hdr *hdr = &pclc->hdr;
struct smc_clc_v2_extension *v2_ext;
- v2_ext = smc_get_clc_v2_ext(pclc);
pclc_prfx = smc_clc_proposal_get_prefix(pclc);
+ if (!pclc_prfx ||
+ pclc_prfx->ipv6_prefixes_cnt > SMC_CLC_MAX_V6_PREFIX)
+ return false;
+
if (hdr->version == SMC_V1) {
if (hdr->typev1 == SMC_TYPE_N)
return false;
@@ -365,6 +368,13 @@ static bool smc_clc_msg_prop_valid(struct smc_clc_msg_proposal *pclc)
sizeof(struct smc_clc_msg_trail))
return false;
} else {
+ v2_ext = smc_get_clc_v2_ext(pclc);
+ if ((hdr->typev2 != SMC_TYPE_N &&
+ (!v2_ext || v2_ext->hdr.eid_cnt > SMC_CLC_MAX_UEID)) ||
+ (smcd_indicated(hdr->typev2) &&
+ v2_ext->hdr.ism_gid_cnt > SMCD_CLC_MAX_V2_GID_ENTRIES))
+ return false;
+
if (ntohs(hdr->length) !=
sizeof(*pclc) +
sizeof(struct smc_clc_msg_smcd) +
@@ -764,6 +774,11 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
SMC_CLC_RECV_BUF_LEN : datlen;
iov_iter_kvec(&msg.msg_iter, ITER_DEST, &vec, 1, recvlen);
len = sock_recvmsg(smc->clcsock, &msg, krflags);
+ if (len < recvlen) {
+ smc->sk.sk_err = EPROTO;
+ reason_code = -EPROTO;
+ goto out;
+ }
datlen -= len;
}
if (clcm->type == SMC_CLC_DECLINE) {
diff --git a/net/smc/smc_clc.h b/net/smc/smc_clc.h
index 5fd6f5b8ef03..767289925410 100644
--- a/net/smc/smc_clc.h
+++ b/net/smc/smc_clc.h
@@ -336,8 +336,12 @@ struct smc_clc_msg_decline_v2 { /* clc decline message */
static inline struct smc_clc_msg_proposal_prefix *
smc_clc_proposal_get_prefix(struct smc_clc_msg_proposal *pclc)
{
+ u16 offset = ntohs(pclc->iparea_offset);
+
+ if (offset > sizeof(struct smc_clc_msg_smcd))
+ return NULL;
return (struct smc_clc_msg_proposal_prefix *)
- ((u8 *)pclc + sizeof(*pclc) + ntohs(pclc->iparea_offset));
+ ((u8 *)pclc + sizeof(*pclc) + offset);
}
static inline bool smcr_indicated(int smc_type)
@@ -376,8 +380,14 @@ static inline struct smc_clc_v2_extension *
smc_get_clc_v2_ext(struct smc_clc_msg_proposal *prop)
{
struct smc_clc_msg_smcd *prop_smcd = smc_get_clc_msg_smcd(prop);
+ u16 max_offset;
- if (!prop_smcd || !ntohs(prop_smcd->v2_ext_offset))
+ max_offset = offsetof(struct smc_clc_msg_proposal_area, pclc_v2_ext) -
+ offsetof(struct smc_clc_msg_proposal_area, pclc_smcd) -
+ offsetofend(struct smc_clc_msg_smcd, v2_ext_offset);
+
+ if (!prop_smcd || !ntohs(prop_smcd->v2_ext_offset) ||
+ ntohs(prop_smcd->v2_ext_offset) > max_offset)
return NULL;
return (struct smc_clc_v2_extension *)
@@ -390,9 +400,15 @@ smc_get_clc_v2_ext(struct smc_clc_msg_proposal *prop)
static inline struct smc_clc_smcd_v2_extension *
smc_get_clc_smcd_v2_ext(struct smc_clc_v2_extension *prop_v2ext)
{
+ u16 max_offset = offsetof(struct smc_clc_msg_proposal_area, pclc_smcd_v2_ext) -
+ offsetof(struct smc_clc_msg_proposal_area, pclc_v2_ext) -
+ offsetof(struct smc_clc_v2_extension, hdr) -
+ offsetofend(struct smc_clnt_opts_area_hdr, smcd_v2_ext_offset);
+
if (!prop_v2ext)
return NULL;
- if (!ntohs(prop_v2ext->hdr.smcd_v2_ext_offset))
+ if (!ntohs(prop_v2ext->hdr.smcd_v2_ext_offset) ||
+ ntohs(prop_v2ext->hdr.smcd_v2_ext_offset) > max_offset)
return NULL;
return (struct smc_clc_smcd_v2_extension *)
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 500952c2e67b..3b125d348b4a 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -1818,7 +1818,9 @@ void smcr_link_down_cond_sched(struct smc_link *lnk)
{
if (smc_link_downing(&lnk->state)) {
trace_smcr_link_down(lnk, __builtin_return_address(0));
- schedule_work(&lnk->link_down_wrk);
+ smcr_link_hold(lnk); /* smcr_link_put in link_down_wrk */
+ if (!schedule_work(&lnk->link_down_wrk))
+ smcr_link_put(lnk);
}
}
@@ -1850,11 +1852,14 @@ static void smc_link_down_work(struct work_struct *work)
struct smc_link_group *lgr = link->lgr;
if (list_empty(&lgr->list))
- return;
+ goto out;
wake_up_all(&lgr->llc_msg_waiter);
down_write(&lgr->llc_conf_mutex);
smcr_link_down(link);
up_write(&lgr->llc_conf_mutex);
+
+out:
+ smcr_link_put(link); /* smcr_link_hold by schedulers of link_down_work */
}
static int smc_vlan_by_tcpsk_walk(struct net_device *lower_dev,
diff --git a/rust/kernel/net/phy.rs b/rust/kernel/net/phy.rs
index b89c681d97c0..2fbfb6a94c11 100644
--- a/rust/kernel/net/phy.rs
+++ b/rust/kernel/net/phy.rs
@@ -860,7 +860,7 @@ impl DeviceMask {
/// ];
/// #[cfg(MODULE)]
/// #[no_mangle]
-/// static __mod_mdio__phydev_device_table: [::kernel::bindings::mdio_device_id; 2] = _DEVICE_TABLE;
+/// static __mod_device_table__mdio__phydev: [::kernel::bindings::mdio_device_id; 2] = _DEVICE_TABLE;
/// ```
#[macro_export]
macro_rules! module_phy_driver {
@@ -883,7 +883,7 @@ macro_rules! module_phy_driver {
#[cfg(MODULE)]
#[no_mangle]
- static __mod_mdio__phydev_device_table: [$crate::bindings::mdio_device_id;
+ static __mod_device_table__mdio__phydev: [$crate::bindings::mdio_device_id;
$crate::module_phy_driver!(@count_devices $($dev),+) + 1] = _DEVICE_TABLE;
};
diff --git a/rust/kernel/workqueue.rs b/rust/kernel/workqueue.rs
index 4d1d2062f6eb..fd3e97192ed8 100644
--- a/rust/kernel/workqueue.rs
+++ b/rust/kernel/workqueue.rs
@@ -519,7 +519,15 @@ impl_has_work! {
impl{T} HasWork<Self> for ClosureWork<T> { self.work }
}
-// SAFETY: TODO.
+// SAFETY: The `__enqueue` implementation in RawWorkItem uses a `work_struct` initialized with the
+// `run` method of this trait as the function pointer because:
+// - `__enqueue` gets the `work_struct` from the `Work` field, using `T::raw_get_work`.
+// - The only safe way to create a `Work` object is through `Work::new`.
+// - `Work::new` makes sure that `T::Pointer::run` is passed to `init_work_with_key`.
+// - Finally `Work` and `RawWorkItem` guarantee that the correct `Work` field
+// will be used because of the ID const generic bound. This makes sure that `T::raw_get_work`
+// uses the correct offset for the `Work` field, and `Work::new` picks the correct
+// implementation of `WorkItemPointer` for `Arc<T>`.
unsafe impl<T, const ID: u64> WorkItemPointer<ID> for Arc<T>
where
T: WorkItem<ID, Pointer = Self>,
@@ -537,7 +545,13 @@ where
}
}
-// SAFETY: TODO.
+// SAFETY: The `work_struct` raw pointer is guaranteed to be valid for the duration of the call to
+// the closure because we get it from an `Arc`, which means that the ref count will be at least 1,
+// and we don't drop the `Arc` ourselves. If `queue_work_on` returns true, it is further guaranteed
+// to be valid until a call to the function pointer in `work_struct` because we leak the memory it
+// points to, and only reclaim it if the closure returns false, or in `WorkItemPointer::run`, which
+// is what the function pointer in the `work_struct` must be pointing to, according to the safety
+// requirements of `WorkItemPointer`.
unsafe impl<T, const ID: u64> RawWorkItem<ID> for Arc<T>
where
T: WorkItem<ID, Pointer = Self>,
diff --git a/scripts/mksysmap b/scripts/mksysmap
index c12723a04655..3accbdb269ac 100755
--- a/scripts/mksysmap
+++ b/scripts/mksysmap
@@ -26,7 +26,7 @@
# (do not forget a space before each pattern)
# local symbols for ARM, MIPS, etc.
-/ \\$/d
+/ \$/d
# local labels, .LBB, .Ltmpxxx, .L__unnamed_xx, .LASANPC, etc.
/ \.L/d
@@ -39,7 +39,7 @@
/ __pi_\.L/d
# arm64 local symbols in non-VHE KVM namespace
-/ __kvm_nvhe_\\$/d
+/ __kvm_nvhe_\$/d
/ __kvm_nvhe_\.L/d
# lld arm/aarch64/mips thunks
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 5b5745f00eb3..19ec72a69e90 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -132,7 +132,8 @@ struct devtable {
* based at address m.
*/
#define DEF_FIELD(m, devid, f) \
- typeof(((struct devid *)0)->f) f = TO_NATIVE(*(typeof(f) *)((m) + OFF_##devid##_##f))
+ typeof(((struct devid *)0)->f) f = \
+ get_unaligned_native((typeof(f) *)((m) + OFF_##devid##_##f))
/* Define a variable f that holds the address of field f of struct devid
* based at address m. Due to the way typeof works, for a field of type
@@ -600,7 +601,7 @@ static void do_pnp_card_entry(struct module *mod, void *symval)
static void do_pcmcia_entry(struct module *mod, void *symval)
{
char alias[256] = {};
- unsigned int i;
+
DEF_FIELD(symval, pcmcia_device_id, match_flags);
DEF_FIELD(symval, pcmcia_device_id, manf_id);
DEF_FIELD(symval, pcmcia_device_id, card_id);
@@ -609,10 +610,6 @@ static void do_pcmcia_entry(struct module *mod, void *symval)
DEF_FIELD(symval, pcmcia_device_id, device_no);
DEF_FIELD_ADDR(symval, pcmcia_device_id, prod_id_hash);
- for (i=0; i<4; i++) {
- (*prod_id_hash)[i] = TO_NATIVE((*prod_id_hash)[i]);
- }
-
ADD(alias, "m", match_flags & PCMCIA_DEV_ID_MATCH_MANF_ID,
manf_id);
ADD(alias, "c", match_flags & PCMCIA_DEV_ID_MATCH_CARD_ID,
@@ -623,10 +620,14 @@ static void do_pcmcia_entry(struct module *mod, void *symval)
function);
ADD(alias, "pfn", match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO,
device_no);
- ADD(alias, "pa", match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID1, (*prod_id_hash)[0]);
- ADD(alias, "pb", match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID2, (*prod_id_hash)[1]);
- ADD(alias, "pc", match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID3, (*prod_id_hash)[2]);
- ADD(alias, "pd", match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID4, (*prod_id_hash)[3]);
+ ADD(alias, "pa", match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID1,
+ get_unaligned_native(*prod_id_hash + 0));
+ ADD(alias, "pb", match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID2,
+ get_unaligned_native(*prod_id_hash + 1));
+ ADD(alias, "pc", match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID3,
+ get_unaligned_native(*prod_id_hash + 2));
+ ADD(alias, "pd", match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID4,
+ get_unaligned_native(*prod_id_hash + 3));
module_alias_printf(mod, true, "pcmcia:%s", alias);
}
@@ -654,10 +655,9 @@ static void do_input(char *alias,
{
unsigned int i;
- for (i = min / BITS_PER_LONG; i < max / BITS_PER_LONG + 1; i++)
- arr[i] = TO_NATIVE(arr[i]);
- for (i = min; i < max; i++)
- if (arr[i / BITS_PER_LONG] & (1ULL << (i%BITS_PER_LONG)))
+ for (i = min; i <= max; i++)
+ if (get_unaligned_native(arr + i / BITS_PER_LONG) &
+ (1ULL << (i % BITS_PER_LONG)))
sprintf(alias + strlen(alias), "%X,*", i);
}
@@ -812,15 +812,13 @@ static void do_virtio_entry(struct module *mod, void *symval)
* Each byte of the guid will be represented by two hex characters
* in the name.
*/
-
static void do_vmbus_entry(struct module *mod, void *symval)
{
- int i;
DEF_FIELD_ADDR(symval, hv_vmbus_device_id, guid);
- char guid_name[(sizeof(*guid) + 1) * 2];
+ char guid_name[sizeof(*guid) * 2 + 1];
- for (i = 0; i < (sizeof(*guid) * 2); i += 2)
- sprintf(&guid_name[i], "%02x", TO_NATIVE((guid->b)[i/2]));
+ for (int i = 0; i < sizeof(*guid); i++)
+ sprintf(&guid_name[i * 2], "%02x", guid->b[i]);
module_alias_printf(mod, false, "vmbus:%s", guid_name);
}
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index fb787a5715f5..7ea59dc4926b 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -155,12 +155,13 @@ char *get_line(char **stringp)
/* A list of all modules we processed */
LIST_HEAD(modules);
-static struct module *find_module(const char *modname)
+static struct module *find_module(const char *filename, const char *modname)
{
struct module *mod;
list_for_each_entry(mod, &modules, list) {
- if (strcmp(mod->name, modname) == 0)
+ if (!strcmp(mod->dump_file, filename) &&
+ !strcmp(mod->name, modname))
return mod;
}
return NULL;
@@ -1137,9 +1138,9 @@ static Elf_Addr addend_386_rel(uint32_t *location, unsigned int r_type)
{
switch (r_type) {
case R_386_32:
- return TO_NATIVE(*location);
+ return get_unaligned_native(location);
case R_386_PC32:
- return TO_NATIVE(*location) + 4;
+ return get_unaligned_native(location) + 4;
}
return (Elf_Addr)(-1);
@@ -1160,24 +1161,24 @@ static Elf_Addr addend_arm_rel(void *loc, Elf_Sym *sym, unsigned int r_type)
switch (r_type) {
case R_ARM_ABS32:
case R_ARM_REL32:
- inst = TO_NATIVE(*(uint32_t *)loc);
+ inst = get_unaligned_native((uint32_t *)loc);
return inst + sym->st_value;
case R_ARM_MOVW_ABS_NC:
case R_ARM_MOVT_ABS:
- inst = TO_NATIVE(*(uint32_t *)loc);
+ inst = get_unaligned_native((uint32_t *)loc);
offset = sign_extend32(((inst & 0xf0000) >> 4) | (inst & 0xfff),
15);
return offset + sym->st_value;
case R_ARM_PC24:
case R_ARM_CALL:
case R_ARM_JUMP24:
- inst = TO_NATIVE(*(uint32_t *)loc);
+ inst = get_unaligned_native((uint32_t *)loc);
offset = sign_extend32((inst & 0x00ffffff) << 2, 25);
return offset + sym->st_value + 8;
case R_ARM_THM_MOVW_ABS_NC:
case R_ARM_THM_MOVT_ABS:
- upper = TO_NATIVE(*(uint16_t *)loc);
- lower = TO_NATIVE(*((uint16_t *)loc + 1));
+ upper = get_unaligned_native((uint16_t *)loc);
+ lower = get_unaligned_native((uint16_t *)loc + 1);
offset = sign_extend32(((upper & 0x000f) << 12) |
((upper & 0x0400) << 1) |
((lower & 0x7000) >> 4) |
@@ -1194,8 +1195,8 @@ static Elf_Addr addend_arm_rel(void *loc, Elf_Sym *sym, unsigned int r_type)
* imm11 = lower[10:0]
* imm32 = SignExtend(S:J2:J1:imm6:imm11:'0')
*/
- upper = TO_NATIVE(*(uint16_t *)loc);
- lower = TO_NATIVE(*((uint16_t *)loc + 1));
+ upper = get_unaligned_native((uint16_t *)loc);
+ lower = get_unaligned_native((uint16_t *)loc + 1);
sign = (upper >> 10) & 1;
j1 = (lower >> 13) & 1;
@@ -1218,8 +1219,8 @@ static Elf_Addr addend_arm_rel(void *loc, Elf_Sym *sym, unsigned int r_type)
* I2 = NOT(J2 XOR S)
* imm32 = SignExtend(S:I1:I2:imm10:imm11:'0')
*/
- upper = TO_NATIVE(*(uint16_t *)loc);
- lower = TO_NATIVE(*((uint16_t *)loc + 1));
+ upper = get_unaligned_native((uint16_t *)loc);
+ lower = get_unaligned_native((uint16_t *)loc + 1);
sign = (upper >> 10) & 1;
j1 = (lower >> 13) & 1;
@@ -1240,7 +1241,7 @@ static Elf_Addr addend_mips_rel(uint32_t *location, unsigned int r_type)
{
uint32_t inst;
- inst = TO_NATIVE(*location);
+ inst = get_unaligned_native(location);
switch (r_type) {
case R_MIPS_LO16:
return inst & 0xffff;
@@ -2030,10 +2031,10 @@ static void read_dump(const char *fname)
continue;
}
- mod = find_module(modname);
+ mod = find_module(fname, modname);
if (!mod) {
mod = new_module(modname, strlen(modname));
- mod->from_dump = true;
+ mod->dump_file = fname;
}
s = sym_add_exported(symname, mod, gpl_only, namespace);
sym_set_crc(s, crc);
@@ -2052,7 +2053,7 @@ static void write_dump(const char *fname)
struct symbol *sym;
list_for_each_entry(mod, &modules, list) {
- if (mod->from_dump)
+ if (mod->dump_file)
continue;
list_for_each_entry(sym, &mod->exported_symbols, list) {
if (trim_unused_exports && !sym->used)
@@ -2076,7 +2077,7 @@ static void write_namespace_deps_files(const char *fname)
list_for_each_entry(mod, &modules, list) {
- if (mod->from_dump || list_empty(&mod->missing_namespaces))
+ if (mod->dump_file || list_empty(&mod->missing_namespaces))
continue;
buf_printf(&ns_deps_buf, "%s.ko:", mod->name);
@@ -2194,7 +2195,7 @@ int main(int argc, char **argv)
read_symbols_from_files(files_source);
list_for_each_entry(mod, &modules, list) {
- if (mod->from_dump || mod->is_vmlinux)
+ if (mod->dump_file || mod->is_vmlinux)
continue;
check_modname_len(mod);
@@ -2205,7 +2206,7 @@ int main(int argc, char **argv)
handle_white_list_exports(unused_exports_white_list);
list_for_each_entry(mod, &modules, list) {
- if (mod->from_dump)
+ if (mod->dump_file)
continue;
if (mod->is_vmlinux)
diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
index 49848fcbe2a1..ffd0a52a606e 100644
--- a/scripts/mod/modpost.h
+++ b/scripts/mod/modpost.h
@@ -65,6 +65,20 @@
#define TO_NATIVE(x) \
(target_is_big_endian == host_is_big_endian ? x : bswap(x))
+#define __get_unaligned_t(type, ptr) ({ \
+ const struct { type x; } __attribute__((__packed__)) *__pptr = \
+ (typeof(__pptr))(ptr); \
+ __pptr->x; \
+})
+
+#define get_unaligned(ptr) __get_unaligned_t(typeof(*(ptr)), (ptr))
+
+#define get_unaligned_native(ptr) \
+({ \
+ typeof(*(ptr)) _val = get_unaligned(ptr); \
+ TO_NATIVE(_val); \
+})
+
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
#define strstarts(str, prefix) (strncmp(str, prefix, strlen(prefix)) == 0)
@@ -95,14 +109,15 @@ struct module_alias {
/**
* struct module - represent a module (vmlinux or *.ko)
*
+ * @dump_file: path to the .symvers file if loaded from a file
* @aliases: list head for module_aliases
*/
struct module {
struct list_head list;
struct list_head exported_symbols;
struct list_head unresolved_symbols;
+ const char *dump_file;
bool is_gpl_compatible;
- bool from_dump; /* true if module was loaded from *.symvers */
bool is_vmlinux;
bool seen;
bool has_init;
diff --git a/scripts/package/PKGBUILD b/scripts/package/PKGBUILD
index f83493838cf9..dca706617adc 100644
--- a/scripts/package/PKGBUILD
+++ b/scripts/package/PKGBUILD
@@ -103,7 +103,7 @@ _package-headers() {
_package-api-headers() {
pkgdesc="Kernel headers sanitized for use in userspace"
- provides=(linux-api-headers)
+ provides=(linux-api-headers="${pkgver}")
conflicts=(linux-api-headers)
_prologue
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index fb686fd3266f..ad7aba0f268e 100755
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -63,6 +63,12 @@ install_linux_image () {
esac
cp "$(${MAKE} -s -f ${srctree}/Makefile image_name)" "${pdir}/${installed_image_path}"
+ if [ "${ARCH}" != um ]; then
+ install_maint_scripts "${pdir}"
+ fi
+}
+
+install_maint_scripts () {
# Install the maintainer scripts
# Note: hook scripts under /etc/kernel are also executed by official Debian
# kernel packages, as well as kernel packages built using make-kpkg.
diff --git a/scripts/package/mkdebian b/scripts/package/mkdebian
index 4ffcc70f8e31..b038a1380b8a 100755
--- a/scripts/package/mkdebian
+++ b/scripts/package/mkdebian
@@ -70,6 +70,13 @@ set_debarch() {
debarch=sh4$(if_enabled_echo CONFIG_CPU_BIG_ENDIAN eb)
fi
;;
+ um)
+ if is_enabled CONFIG_64BIT; then
+ debarch=amd64
+ else
+ debarch=i386
+ fi
+ ;;
esac
if [ -z "$debarch" ]; then
debarch=$(dpkg-architecture -qDEB_HOST_ARCH)
diff --git a/scripts/sorttable.h b/scripts/sorttable.h
index 7bd0184380d3..a7c5445baf00 100644
--- a/scripts/sorttable.h
+++ b/scripts/sorttable.h
@@ -110,7 +110,7 @@ static inline unsigned long orc_ip(const int *ip)
static int orc_sort_cmp(const void *_a, const void *_b)
{
- struct orc_entry *orc_a;
+ struct orc_entry *orc_a, *orc_b;
const int *a = g_orc_ip_table + *(int *)_a;
const int *b = g_orc_ip_table + *(int *)_b;
unsigned long a_val = orc_ip(a);
@@ -128,6 +128,9 @@ static int orc_sort_cmp(const void *_a, const void *_b)
* whitelisted .o files which didn't get objtool generation.
*/
orc_a = g_orc_table + (a - g_orc_ip_table);
+ orc_b = g_orc_table + (b - g_orc_ip_table);
+ if (orc_a->type == ORC_TYPE_UNDEFINED && orc_b->type == ORC_TYPE_UNDEFINED)
+ return 0;
return orc_a->type == ORC_TYPE_UNDEFINED ? -1 : 1;
}
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 971c45d576ba..3d5c563cfc4c 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -979,7 +979,10 @@ void services_compute_xperms_decision(struct extended_perms_decision *xpermd,
return;
break;
default:
- BUG();
+ pr_warn_once(
+ "SELinux: unknown extended permission (%u) will be ignored\n",
+ node->datum.u.xperms->specified);
+ return;
}
if (node->key.specified == AVTAB_XPERMS_ALLOWED) {
@@ -998,7 +1001,8 @@ void services_compute_xperms_decision(struct extended_perms_decision *xpermd,
&node->datum.u.xperms->perms,
xpermd->dontaudit);
} else {
- BUG();
+ pr_warn_once("SELinux: unknown specified key (%u)\n",
+ node->key.specified);
}
}
diff --git a/sound/soc/amd/Kconfig b/sound/soc/amd/Kconfig
index c7590d4989bb..803521178279 100644
--- a/sound/soc/amd/Kconfig
+++ b/sound/soc/amd/Kconfig
@@ -105,7 +105,7 @@ config SND_SOC_AMD_ACP6x
config SND_SOC_AMD_YC_MACH
tristate "AMD YC support for DMIC"
select SND_SOC_DMIC
- depends on SND_SOC_AMD_ACP6x
+ depends on SND_SOC_AMD_ACP6x && ACPI
help
This option enables machine driver for Yellow Carp platform
using dmic. ACP IP has PDM Decoder block with DMA controller.
diff --git a/sound/soc/amd/ps/pci-ps.c b/sound/soc/amd/ps/pci-ps.c
index 4575326d0635..8b556950b855 100644
--- a/sound/soc/amd/ps/pci-ps.c
+++ b/sound/soc/amd/ps/pci-ps.c
@@ -83,6 +83,7 @@ static int acp63_init(void __iomem *acp_base, struct device *dev)
return ret;
}
acp63_enable_interrupts(acp_base);
+ writel(0, acp_base + ACP_ZSC_DSP_CTRL);
return 0;
}
@@ -97,6 +98,7 @@ static int acp63_deinit(void __iomem *acp_base, struct device *dev)
return ret;
}
writel(0, acp_base + ACP_CONTROL);
+ writel(1, acp_base + ACP_ZSC_DSP_CTRL);
return 0;
}
@@ -312,6 +314,7 @@ static struct snd_soc_acpi_mach *acp63_sdw_machine_select(struct device *dev)
if (mach && mach->link_mask) {
mach->mach_params.links = mach->links;
mach->mach_params.link_mask = mach->link_mask;
+ mach->mach_params.subsystem_rev = acp_data->acp_rev;
return mach;
}
}
@@ -669,8 +672,10 @@ static int __maybe_unused snd_acp63_suspend(struct device *dev)
adata = dev_get_drvdata(dev);
if (adata->is_sdw_dev) {
adata->sdw_en_stat = check_acp_sdw_enable_status(adata);
- if (adata->sdw_en_stat)
+ if (adata->sdw_en_stat) {
+ writel(1, adata->acp63_base + ACP_ZSC_DSP_CTRL);
return 0;
+ }
}
ret = acp63_deinit(adata->acp63_base, dev);
if (ret)
@@ -685,9 +690,10 @@ static int __maybe_unused snd_acp63_runtime_resume(struct device *dev)
int ret;
adata = dev_get_drvdata(dev);
- if (adata->sdw_en_stat)
+ if (adata->sdw_en_stat) {
+ writel(0, adata->acp63_base + ACP_ZSC_DSP_CTRL);
return 0;
-
+ }
ret = acp63_init(adata->acp63_base, dev);
if (ret) {
dev_err(dev, "ACP init failed\n");
@@ -705,8 +711,10 @@ static int __maybe_unused snd_acp63_resume(struct device *dev)
int ret;
adata = dev_get_drvdata(dev);
- if (adata->sdw_en_stat)
+ if (adata->sdw_en_stat) {
+ writel(0, adata->acp63_base + ACP_ZSC_DSP_CTRL);
return 0;
+ }
ret = acp63_init(adata->acp63_base, dev);
if (ret)
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 0b9e87dc2b6c..ee35f3aa5521 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -692,7 +692,7 @@ config SND_SOC_AW88261
the input amplitude.
config SND_SOC_AW88081
- tristate "Soc Audio for awinic aw88081"
+ tristate "Soc Audio for awinic aw88081/aw88083"
depends on I2C
select REGMAP_I2C
select SND_SOC_AW88395_LIB
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index f37e82ddb7a1..d7ad795603c1 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -80,7 +80,7 @@ snd-soc-cs35l56-shared-y := cs35l56-shared.o
snd-soc-cs35l56-i2c-y := cs35l56-i2c.o
snd-soc-cs35l56-spi-y := cs35l56-spi.o
snd-soc-cs35l56-sdw-y := cs35l56-sdw.o
-snd-soc-cs40l50-objs := cs40l50-codec.o
+snd-soc-cs40l50-y := cs40l50-codec.o
snd-soc-cs42l42-y := cs42l42.o
snd-soc-cs42l42-i2c-y := cs42l42-i2c.o
snd-soc-cs42l42-sdw-y := cs42l42-sdw.o
@@ -92,7 +92,7 @@ snd-soc-cs42l52-y := cs42l52.o
snd-soc-cs42l56-y := cs42l56.o
snd-soc-cs42l73-y := cs42l73.o
snd-soc-cs42l83-i2c-y := cs42l83-i2c.o
-snd-soc-cs42l84-objs := cs42l84.o
+snd-soc-cs42l84-y := cs42l84.o
snd-soc-cs4234-y := cs4234.o
snd-soc-cs4265-y := cs4265.o
snd-soc-cs4270-y := cs4270.o
@@ -334,8 +334,8 @@ snd-soc-wcd-classh-y := wcd-clsh-v2.o
snd-soc-wcd-mbhc-y := wcd-mbhc-v2.o
snd-soc-wcd9335-y := wcd9335.o
snd-soc-wcd934x-y := wcd934x.o
-snd-soc-wcd937x-objs := wcd937x.o
-snd-soc-wcd937x-sdw-objs := wcd937x-sdw.o
+snd-soc-wcd937x-y := wcd937x.o
+snd-soc-wcd937x-sdw-y := wcd937x-sdw.o
snd-soc-wcd938x-y := wcd938x.o
snd-soc-wcd938x-sdw-y := wcd938x-sdw.o
snd-soc-wcd939x-y := wcd939x.o
diff --git a/sound/soc/codecs/ad193x-i2c.c b/sound/soc/codecs/ad193x-i2c.c
index 15d74bb31c4c..6aa168e01fbb 100644
--- a/sound/soc/codecs/ad193x-i2c.c
+++ b/sound/soc/codecs/ad193x-i2c.c
@@ -23,7 +23,6 @@ MODULE_DEVICE_TABLE(i2c, ad193x_id);
static int ad193x_i2c_probe(struct i2c_client *client)
{
struct regmap_config config;
- const struct i2c_device_id *id = i2c_match_id(ad193x_id, client);
config = ad193x_regmap_config;
config.val_bits = 8;
@@ -31,7 +30,7 @@ static int ad193x_i2c_probe(struct i2c_client *client)
return ad193x_probe(&client->dev,
devm_regmap_init_i2c(client, &config),
- (enum ad193x_type)id->driver_data);
+ (uintptr_t)i2c_get_match_data(client));
}
static struct i2c_driver ad193x_i2c_driver = {
diff --git a/sound/soc/codecs/adau1761-i2c.c b/sound/soc/codecs/adau1761-i2c.c
index a554255186ae..eba7e4f42c78 100644
--- a/sound/soc/codecs/adau1761-i2c.c
+++ b/sound/soc/codecs/adau1761-i2c.c
@@ -14,12 +14,9 @@
#include "adau1761.h"
-static const struct i2c_device_id adau1761_i2c_ids[];
-
static int adau1761_i2c_probe(struct i2c_client *client)
{
struct regmap_config config;
- const struct i2c_device_id *id = i2c_match_id(adau1761_i2c_ids, client);
config = adau1761_regmap_config;
config.val_bits = 8;
@@ -27,7 +24,7 @@ static int adau1761_i2c_probe(struct i2c_client *client)
return adau1761_probe(&client->dev,
devm_regmap_init_i2c(client, &config),
- id->driver_data, NULL);
+ (uintptr_t)i2c_get_match_data(client), NULL);
}
static void adau1761_i2c_remove(struct i2c_client *client)
diff --git a/sound/soc/codecs/adau1781-i2c.c b/sound/soc/codecs/adau1781-i2c.c
index 3a170fd78ff3..cb67fde8d9a8 100644
--- a/sound/soc/codecs/adau1781-i2c.c
+++ b/sound/soc/codecs/adau1781-i2c.c
@@ -14,12 +14,9 @@
#include "adau1781.h"
-static const struct i2c_device_id adau1781_i2c_ids[];
-
static int adau1781_i2c_probe(struct i2c_client *client)
{
struct regmap_config config;
- const struct i2c_device_id *id = i2c_match_id(adau1781_i2c_ids, client);
config = adau1781_regmap_config;
config.val_bits = 8;
@@ -27,7 +24,7 @@ static int adau1781_i2c_probe(struct i2c_client *client)
return adau1781_probe(&client->dev,
devm_regmap_init_i2c(client, &config),
- id->driver_data, NULL);
+ (uintptr_t)i2c_get_match_data(client), NULL);
}
static void adau1781_i2c_remove(struct i2c_client *client)
diff --git a/sound/soc/codecs/adau1977-i2c.c b/sound/soc/codecs/adau1977-i2c.c
index 24c7b9c84c19..441c8079246a 100644
--- a/sound/soc/codecs/adau1977-i2c.c
+++ b/sound/soc/codecs/adau1977-i2c.c
@@ -14,12 +14,9 @@
#include "adau1977.h"
-static const struct i2c_device_id adau1977_i2c_ids[];
-
static int adau1977_i2c_probe(struct i2c_client *client)
{
struct regmap_config config;
- const struct i2c_device_id *id = i2c_match_id(adau1977_i2c_ids, client);
config = adau1977_regmap_config;
config.val_bits = 8;
@@ -27,7 +24,7 @@ static int adau1977_i2c_probe(struct i2c_client *client)
return adau1977_probe(&client->dev,
devm_regmap_init_i2c(client, &config),
- id->driver_data, NULL);
+ (uintptr_t)i2c_get_match_data(client), NULL);
}
static const struct i2c_device_id adau1977_i2c_ids[] = {
diff --git a/sound/soc/codecs/alc5623.c b/sound/soc/codecs/alc5623.c
index b24c32206884..fbf723758079 100644
--- a/sound/soc/codecs/alc5623.c
+++ b/sound/soc/codecs/alc5623.c
@@ -987,9 +987,9 @@ static int alc5623_i2c_probe(struct i2c_client *client)
struct alc5623_priv *alc5623;
struct device_node *np;
unsigned int vid1, vid2;
+ unsigned int matched_id;
int ret;
u32 val32;
- const struct i2c_device_id *id;
alc5623 = devm_kzalloc(&client->dev, sizeof(struct alc5623_priv),
GFP_KERNEL);
@@ -1016,12 +1016,12 @@ static int alc5623_i2c_probe(struct i2c_client *client)
}
vid2 >>= 8;
- id = i2c_match_id(alc5623_i2c_table, client);
+ matched_id = (uintptr_t)i2c_get_match_data(client);
- if ((vid1 != 0x10ec) || (vid2 != id->driver_data)) {
+ if ((vid1 != 0x10ec) || (vid2 != matched_id)) {
dev_err(&client->dev, "unknown or wrong codec\n");
- dev_err(&client->dev, "Expected %x:%lx, got %x:%x\n",
- 0x10ec, id->driver_data,
+ dev_err(&client->dev, "Expected %x:%x, got %x:%x\n",
+ 0x10ec, matched_id,
vid1, vid2);
return -ENODEV;
}
diff --git a/sound/soc/codecs/alc5632.c b/sound/soc/codecs/alc5632.c
index d5021f266930..72f4622204ff 100644
--- a/sound/soc/codecs/alc5632.c
+++ b/sound/soc/codecs/alc5632.c
@@ -1108,7 +1108,7 @@ static int alc5632_i2c_probe(struct i2c_client *client)
struct alc5632_priv *alc5632;
int ret, ret1, ret2;
unsigned int vid1, vid2;
- const struct i2c_device_id *id;
+ unsigned int matched_id;
alc5632 = devm_kzalloc(&client->dev,
sizeof(struct alc5632_priv), GFP_KERNEL);
@@ -1134,9 +1134,9 @@ static int alc5632_i2c_probe(struct i2c_client *client)
vid2 >>= 8;
- id = i2c_match_id(alc5632_i2c_table, client);
+ matched_id = (uintptr_t)i2c_get_match_data(client);
- if ((vid1 != 0x10EC) || (vid2 != id->driver_data)) {
+ if ((vid1 != 0x10EC) || (vid2 != matched_id)) {
dev_err(&client->dev,
"Device is not a ALC5632: VID1=0x%x, VID2=0x%x\n", vid1, vid2);
return -EINVAL;
diff --git a/sound/soc/codecs/aw88081.c b/sound/soc/codecs/aw88081.c
index 58b8e002d76f..ad16ab6812cd 100644
--- a/sound/soc/codecs/aw88081.c
+++ b/sound/soc/codecs/aw88081.c
@@ -14,13 +14,18 @@
#include "aw88081.h"
#include "aw88395/aw88395_device.h"
+enum aw8808x_type {
+ AW88081,
+ AW88083,
+};
+
struct aw88081 {
struct aw_device *aw_pa;
struct mutex lock;
struct delayed_work start_work;
struct regmap *regmap;
struct aw_container *aw_cfg;
-
+ enum aw8808x_type devtype;
bool phase_sync;
};
@@ -32,6 +37,14 @@ static const struct regmap_config aw88081_regmap_config = {
.val_format_endian = REGMAP_ENDIAN_BIG,
};
+static const struct regmap_config aw88083_regmap_config = {
+ .val_bits = 16,
+ .reg_bits = 8,
+ .max_register = AW88083_REG_MAX,
+ .reg_format_endian = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+};
+
static int aw88081_dev_get_iis_status(struct aw_device *aw_dev)
{
unsigned int reg_val;
@@ -196,6 +209,41 @@ static void aw88081_dev_amppd(struct aw_device *aw_dev, bool amppd)
~AW88081_EN_PA_MASK, AW88081_EN_PA_WORKING_VALUE);
}
+static void aw88083_i2c_wen(struct aw88081 *aw88081, bool flag)
+{
+ struct aw_device *aw_dev = aw88081->aw_pa;
+
+ if (aw88081->devtype != AW88083)
+ return;
+
+ if (flag)
+ regmap_update_bits(aw_dev->regmap, AW88081_SYSCTRL_REG,
+ ~AW88083_I2C_WEN_MASK, AW88083_I2C_WEN_ENABLE_VALUE);
+ else
+ regmap_update_bits(aw_dev->regmap, AW88081_SYSCTRL_REG,
+ ~AW88083_I2C_WEN_MASK, AW88083_I2C_WEN_DISABLE_VALUE);
+}
+
+static void aw88083_dev_amppd(struct aw_device *aw_dev, bool amppd)
+{
+ if (amppd)
+ regmap_update_bits(aw_dev->regmap, AW88081_SYSCTRL_REG,
+ ~AW88083_AMPPD_MASK, AW88083_AMPPD_POWER_DOWN_VALUE);
+ else
+ regmap_update_bits(aw_dev->regmap, AW88081_SYSCTRL_REG,
+ ~AW88083_AMPPD_MASK, AW88083_AMPPD_WORKING_VALUE);
+}
+
+static void aw88083_dev_pllpd(struct aw_device *aw_dev, bool pllpd)
+{
+ if (pllpd)
+ regmap_update_bits(aw_dev->regmap, AW88081_SYSCTRL_REG,
+ ~AW88083_PLL_PD_MASK, AW88083_PLL_PD_WORKING_VALUE);
+ else
+ regmap_update_bits(aw_dev->regmap, AW88081_SYSCTRL_REG,
+ ~AW88083_PLL_PD_MASK, AW88083_PLL_PD_POWER_DOWN_VALUE);
+}
+
static void aw88081_dev_clear_int_status(struct aw_device *aw_dev)
{
unsigned int int_status;
@@ -284,12 +332,90 @@ static void aw88081_dev_uls_hmute(struct aw_device *aw_dev, bool uls_hmute)
AW88081_ULS_HMUTE_DISABLE_VALUE);
}
+static int aw88081_dev_reg_value_check(struct aw_device *aw_dev,
+ unsigned char reg_addr, unsigned short *reg_val)
+{
+ unsigned int read_vol;
+
+ if (reg_addr == AW88081_SYSCTRL_REG) {
+ *reg_val &= ~(~AW88081_EN_PA_MASK |
+ ~AW88081_PWDN_MASK |
+ ~AW88081_HMUTE_MASK |
+ ~AW88081_ULS_HMUTE_MASK);
+
+ *reg_val |= AW88081_EN_PA_POWER_DOWN_VALUE |
+ AW88081_PWDN_POWER_DOWN_VALUE |
+ AW88081_HMUTE_ENABLE_VALUE |
+ AW88081_ULS_HMUTE_ENABLE_VALUE;
+ }
+
+ if (reg_addr == AW88081_SYSCTRL2_REG) {
+ read_vol = (*reg_val & (~AW88081_VOL_MASK)) >> AW88081_VOL_START_BIT;
+ aw_dev->volume_desc.init_volume = read_vol;
+ }
+
+ /* i2stxen */
+ if (reg_addr == AW88081_I2SCTRL3_REG) {
+ /* close tx */
+ *reg_val &= AW88081_I2STXEN_MASK;
+ *reg_val |= AW88081_I2STXEN_DISABLE_VALUE;
+ }
+
+ return 0;
+}
+
+static int aw88083_dev_reg_value_check(struct aw_device *aw_dev,
+ unsigned char reg_addr, unsigned short *reg_val)
+{
+ unsigned int read_vol;
+
+ if (reg_addr == AW88081_SYSCTRL_REG) {
+ *reg_val &= ~(~AW88083_AMPPD_MASK |
+ ~AW88081_PWDN_MASK |
+ ~AW88081_HMUTE_MASK |
+ ~AW88083_I2C_WEN_MASK);
+
+ *reg_val |= AW88083_AMPPD_POWER_DOWN_VALUE |
+ AW88081_PWDN_POWER_DOWN_VALUE |
+ AW88081_HMUTE_ENABLE_VALUE |
+ AW88083_I2C_WEN_ENABLE_VALUE;
+ }
+
+ if (reg_addr == AW88081_SYSCTRL2_REG) {
+ read_vol = (*reg_val & (~AW88081_VOL_MASK)) >> AW88081_VOL_START_BIT;
+ aw_dev->volume_desc.init_volume = read_vol;
+ }
+
+ return 0;
+}
+
+static int aw88081_reg_value_check(struct aw88081 *aw88081,
+ unsigned char reg_addr, unsigned short *reg_val)
+{
+ struct aw_device *aw_dev = aw88081->aw_pa;
+ int ret;
+
+ switch (aw88081->devtype) {
+ case AW88081:
+ ret = aw88081_dev_reg_value_check(aw_dev, reg_addr, reg_val);
+ break;
+ case AW88083:
+ ret = aw88083_dev_reg_value_check(aw_dev, reg_addr, reg_val);
+ break;
+ default:
+ dev_err(aw_dev->dev, "unsupported device\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
static int aw88081_dev_reg_update(struct aw88081 *aw88081,
unsigned char *data, unsigned int len)
{
struct aw_device *aw_dev = aw88081->aw_pa;
struct aw_volume_desc *vol_desc = &aw_dev->volume_desc;
- unsigned int read_vol;
int data_len, i, ret;
int16_t *reg_data;
u16 reg_val;
@@ -312,30 +438,9 @@ static int aw88081_dev_reg_update(struct aw88081 *aw88081,
reg_addr = reg_data[i];
reg_val = reg_data[i + 1];
- if (reg_addr == AW88081_SYSCTRL_REG) {
- reg_val &= ~(~AW88081_EN_PA_MASK |
- ~AW88081_PWDN_MASK |
- ~AW88081_HMUTE_MASK |
- ~AW88081_ULS_HMUTE_MASK);
-
- reg_val |= AW88081_EN_PA_POWER_DOWN_VALUE |
- AW88081_PWDN_POWER_DOWN_VALUE |
- AW88081_HMUTE_ENABLE_VALUE |
- AW88081_ULS_HMUTE_ENABLE_VALUE;
- }
-
- if (reg_addr == AW88081_SYSCTRL2_REG) {
- read_vol = (reg_val & (~AW88081_VOL_MASK)) >>
- AW88081_VOL_START_BIT;
- aw_dev->volume_desc.init_volume = read_vol;
- }
-
- /* i2stxen */
- if (reg_addr == AW88081_I2SCTRL3_REG) {
- /* close tx */
- reg_val &= AW88081_I2STXEN_MASK;
- reg_val |= AW88081_I2STXEN_DISABLE_VALUE;
- }
+ ret = aw88081_reg_value_check(aw88081, reg_addr, &reg_val);
+ if (ret)
+ return ret;
ret = regmap_write(aw_dev->regmap, reg_addr, reg_val);
if (ret)
@@ -474,8 +579,60 @@ pll_check_fail:
return ret;
}
-static int aw88081_dev_stop(struct aw_device *aw_dev)
+static int aw88083_dev_start(struct aw88081 *aw88081)
+{
+ struct aw_device *aw_dev = aw88081->aw_pa;
+
+ if (aw_dev->status == AW88081_DEV_PW_ON) {
+ dev_dbg(aw_dev->dev, "already power on");
+ return 0;
+ }
+
+ aw88083_i2c_wen(aw88081, true);
+
+ /* power on */
+ aw88081_dev_pwd(aw_dev, false);
+ usleep_range(AW88081_2000_US, AW88081_2000_US + 10);
+
+ aw88083_dev_pllpd(aw_dev, true);
+ /* amppd on */
+ aw88083_dev_amppd(aw_dev, false);
+ usleep_range(AW88081_2000_US, AW88081_2000_US + 50);
+
+ /* close mute */
+ aw88081_dev_mute(aw_dev, false);
+
+ aw88083_i2c_wen(aw88081, false);
+
+ aw_dev->status = AW88081_DEV_PW_ON;
+
+ return 0;
+}
+
+static int aw88081_device_start(struct aw88081 *aw88081)
+{
+ int ret;
+
+ switch (aw88081->devtype) {
+ case AW88081:
+ ret = aw88081_dev_start(aw88081);
+ break;
+ case AW88083:
+ ret = aw88083_dev_start(aw88081);
+ break;
+ default:
+ ret = -EINVAL;
+ dev_err(aw88081->aw_pa->dev, "unsupported device\n");
+ break;
+ }
+
+ return ret;
+}
+
+static int aw88081_dev_stop(struct aw88081 *aw88081)
{
+ struct aw_device *aw_dev = aw88081->aw_pa;
+
if (aw_dev->status == AW88081_DEV_PW_OFF) {
dev_dbg(aw_dev->dev, "already power off");
return 0;
@@ -503,6 +660,56 @@ static int aw88081_dev_stop(struct aw_device *aw_dev)
return 0;
}
+static int aw88083_dev_stop(struct aw88081 *aw88081)
+{
+ struct aw_device *aw_dev = aw88081->aw_pa;
+
+ if (aw_dev->status == AW88081_DEV_PW_OFF) {
+ dev_dbg(aw_dev->dev, "already power off");
+ return 0;
+ }
+
+ aw_dev->status = AW88081_DEV_PW_OFF;
+
+ aw88083_i2c_wen(aw88081, true);
+ /* set mute */
+ aw88081_dev_mute(aw_dev, true);
+
+ usleep_range(AW88081_2000_US, AW88081_2000_US + 100);
+
+ /* enable amppd */
+ aw88083_dev_amppd(aw_dev, true);
+
+ aw88083_dev_pllpd(aw_dev, false);
+
+ /* set power down */
+ aw88081_dev_pwd(aw_dev, true);
+
+ aw88083_i2c_wen(aw88081, false);
+
+ return 0;
+}
+
+static int aw88081_stop(struct aw88081 *aw88081)
+{
+ int ret;
+
+ switch (aw88081->devtype) {
+ case AW88081:
+ ret = aw88081_dev_stop(aw88081);
+ break;
+ case AW88083:
+ ret = aw88083_dev_stop(aw88081);
+ break;
+ default:
+ dev_err(aw88081->aw_pa->dev, "unsupported device\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
static int aw88081_reg_update(struct aw88081 *aw88081, bool force)
{
struct aw_device *aw_dev = aw88081->aw_pa;
@@ -540,7 +747,7 @@ static void aw88081_start_pa(struct aw88081 *aw88081)
dev_err(aw88081->aw_pa->dev, "fw update failed, cnt:%d\n", i);
continue;
}
- ret = aw88081_dev_start(aw88081);
+ ret = aw88081_device_start(aw88081);
if (ret) {
dev_err(aw88081->aw_pa->dev, "aw88081 device start failed. retry = %d", i);
continue;
@@ -745,7 +952,7 @@ static int aw88081_profile_set(struct snd_kcontrol *kcontrol,
}
if (aw88081->aw_pa->status) {
- aw88081_dev_stop(aw88081->aw_pa);
+ aw88081_stop(aw88081);
aw88081_start(aw88081, AW88081_SYNC_START);
}
@@ -781,12 +988,16 @@ static int aw88081_volume_set(struct snd_kcontrol *kcontrol,
if (value < mc->min || value > mc->max)
return -EINVAL;
+ aw88083_i2c_wen(aw88081, true);
+
if (vol_desc->ctl_volume != value) {
vol_desc->ctl_volume = value;
aw88081_dev_set_volume(aw88081->aw_pa, vol_desc->ctl_volume);
return 1;
}
+ aw88083_i2c_wen(aw88081, false);
+
return 0;
}
@@ -860,13 +1071,19 @@ static int aw88081_init(struct aw88081 *aw88081, struct i2c_client *i2c, struct
dev_err(&i2c->dev, "%s read chipid error. ret = %d", __func__, ret);
return ret;
}
- if (chip_id != AW88081_CHIP_ID) {
+
+ switch (chip_id) {
+ case AW88081_CHIP_ID:
+ dev_dbg(&i2c->dev, "chip id = 0x%x\n", chip_id);
+ break;
+ case AW88083_CHIP_ID:
+ dev_dbg(&i2c->dev, "chip id = 0x%x\n", chip_id);
+ break;
+ default:
dev_err(&i2c->dev, "unsupported device");
return -ENXIO;
}
- dev_dbg(&i2c->dev, "chip id = %x\n", chip_id);
-
aw_dev = devm_kzalloc(&i2c->dev, sizeof(*aw_dev), GFP_KERNEL);
if (!aw_dev)
return -ENOMEM;
@@ -875,7 +1092,7 @@ static int aw88081_init(struct aw88081 *aw88081, struct i2c_client *i2c, struct
aw_dev->i2c = i2c;
aw_dev->regmap = regmap;
aw_dev->dev = &i2c->dev;
- aw_dev->chip_id = AW88081_CHIP_ID;
+ aw_dev->chip_id = chip_id;
aw_dev->acf = NULL;
aw_dev->prof_info.prof_desc = NULL;
aw_dev->prof_info.prof_type = AW88395_DEV_NONE_TYPE_ID;
@@ -912,21 +1129,8 @@ static int aw88081_dev_init(struct aw88081 *aw88081, struct aw_container *aw_cfg
return ret;
}
- aw88081_dev_clear_int_status(aw_dev);
-
- aw88081_dev_uls_hmute(aw_dev, true);
-
- aw88081_dev_mute(aw_dev, true);
-
- usleep_range(AW88081_5000_US, AW88081_5000_US + 10);
-
- aw88081_dev_i2s_tx_enable(aw_dev, false);
-
- usleep_range(AW88081_1000_US, AW88081_1000_US + 100);
-
- aw88081_dev_amppd(aw_dev, true);
-
- aw88081_dev_pwd(aw_dev, true);
+ aw_dev->status = AW88081_DEV_PW_ON;
+ aw88081_stop(aw88081);
return 0;
}
@@ -977,7 +1181,7 @@ static int aw88081_playback_event(struct snd_soc_dapm_widget *w,
aw88081_start(aw88081, AW88081_ASYNC_START);
break;
case SND_SOC_DAPM_POST_PMD:
- aw88081_dev_stop(aw88081->aw_pa);
+ aw88081_stop(aw88081);
break;
default:
break;
@@ -1036,8 +1240,17 @@ static const struct snd_soc_component_driver soc_codec_dev_aw88081 = {
.num_controls = ARRAY_SIZE(aw88081_controls),
};
+static const struct i2c_device_id aw88081_i2c_id[] = {
+ { AW88081_I2C_NAME, AW88081},
+ { AW88083_I2C_NAME, AW88083},
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, aw88081_i2c_id);
+
static int aw88081_i2c_probe(struct i2c_client *i2c)
{
+ const struct regmap_config *regmap_config;
+ const struct i2c_device_id *id;
struct aw88081 *aw88081;
int ret;
@@ -1049,11 +1262,25 @@ static int aw88081_i2c_probe(struct i2c_client *i2c)
if (!aw88081)
return -ENOMEM;
+ id = i2c_match_id(aw88081_i2c_id, i2c);
+ aw88081->devtype = id->driver_data;
+
mutex_init(&aw88081->lock);
i2c_set_clientdata(i2c, aw88081);
- aw88081->regmap = devm_regmap_init_i2c(i2c, &aw88081_regmap_config);
+ switch (aw88081->devtype) {
+ case AW88081:
+ regmap_config = &aw88081_regmap_config;
+ break;
+ case AW88083:
+ regmap_config = &aw88083_regmap_config;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ aw88081->regmap = devm_regmap_init_i2c(i2c, regmap_config);
if (IS_ERR(aw88081->regmap))
return dev_err_probe(&i2c->dev, PTR_ERR(aw88081->regmap),
"failed to init regmap\n");
@@ -1068,12 +1295,6 @@ static int aw88081_i2c_probe(struct i2c_client *i2c)
aw88081_dai, ARRAY_SIZE(aw88081_dai));
}
-static const struct i2c_device_id aw88081_i2c_id[] = {
- { AW88081_I2C_NAME },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, aw88081_i2c_id);
-
static struct i2c_driver aw88081_i2c_driver = {
.driver = {
.name = AW88081_I2C_NAME,
diff --git a/sound/soc/codecs/aw88081.h b/sound/soc/codecs/aw88081.h
index b4bf7288021a..7a4564270ab3 100644
--- a/sound/soc/codecs/aw88081.h
+++ b/sound/soc/codecs/aw88081.h
@@ -231,6 +231,49 @@
#define AW88081_CCO_MUX_BYPASS_VALUE \
(AW88081_CCO_MUX_BYPASS << AW88081_CCO_MUX_START_BIT)
+#define AW88083_I2C_WEN_START_BIT (14)
+#define AW88083_I2C_WEN_BITS_LEN (2)
+#define AW88083_I2C_WEN_MASK \
+ (~(((1<<AW88083_I2C_WEN_BITS_LEN)-1) << AW88083_I2C_WEN_START_BIT))
+
+#define AW88083_I2C_WEN_DISABLE (0)
+#define AW88083_I2C_WEN_DISABLE_VALUE \
+ (AW88083_I2C_WEN_DISABLE << AW88083_I2C_WEN_START_BIT)
+
+#define AW88083_I2C_WEN_ENABLE (2)
+#define AW88083_I2C_WEN_ENABLE_VALUE \
+ (AW88083_I2C_WEN_ENABLE << AW88083_I2C_WEN_START_BIT)
+
+#define AW88083_PLL_PD_START_BIT (2)
+#define AW88083_PLL_PD_BITS_LEN (1)
+#define AW88083_PLL_PD_MASK \
+ (~(((1<<AW88083_PLL_PD_BITS_LEN)-1) << AW88083_PLL_PD_START_BIT))
+
+#define AW88083_PLL_PD_POWER_DOWN (1)
+#define AW88083_PLL_PD_POWER_DOWN_VALUE \
+ (AW88083_PLL_PD_POWER_DOWN << AW88083_PLL_PD_START_BIT)
+
+#define AW88083_PLL_PD_WORKING (0)
+#define AW88083_PLL_PD_WORKING_VALUE \
+ (AW88083_PLL_PD_WORKING << AW88083_PLL_PD_START_BIT)
+
+#define AW88083_AMPPD_START_BIT (1)
+#define AW88083_AMPPD_BITS_LEN (1)
+#define AW88083_AMPPD_MASK \
+ (~(((1<<AW88083_AMPPD_BITS_LEN)-1) << AW88083_AMPPD_START_BIT))
+
+#define AW88083_AMPPD_WORKING (0)
+#define AW88083_AMPPD_WORKING_VALUE \
+ (AW88083_AMPPD_WORKING << AW88083_AMPPD_START_BIT)
+
+#define AW88083_AMPPD_POWER_DOWN (1)
+#define AW88083_AMPPD_POWER_DOWN_VALUE \
+ (AW88083_AMPPD_POWER_DOWN << AW88083_AMPPD_START_BIT)
+
+#define AW88083_REG_MAX (0x7D)
+#define AW88083_I2C_NAME "aw88083"
+#define AW88083_CHIP_ID 0x2407
+
#define AW88081_START_RETRIES (5)
#define AW88081_START_WORK_DELAY_MS (0)
diff --git a/sound/soc/codecs/cs35l56.c b/sound/soc/codecs/cs35l56.c
index ae045c88c48d..735a1e487c6f 100644
--- a/sound/soc/codecs/cs35l56.c
+++ b/sound/soc/codecs/cs35l56.c
@@ -646,6 +646,12 @@ static struct snd_soc_dai_driver cs35l56_dai[] = {
.rates = CS35L56_RATES,
.formats = CS35L56_RX_FORMATS,
},
+ .symmetric_rate = 1,
+ .ops = &cs35l56_sdw_dai_ops,
+ },
+ {
+ .name = "cs35l56-sdw1c",
+ .id = 2,
.capture = {
.stream_name = "SDW1 Capture",
.channels_min = 1,
@@ -655,7 +661,7 @@ static struct snd_soc_dai_driver cs35l56_dai[] = {
},
.symmetric_rate = 1,
.ops = &cs35l56_sdw_dai_ops,
- }
+ },
};
static int cs35l56_write_cal(struct cs35l56_private *cs35l56)
diff --git a/sound/soc/codecs/cs42l43.c b/sound/soc/codecs/cs42l43.c
index 83c21c17fb80..d2a2daefc2ec 100644
--- a/sound/soc/codecs/cs42l43.c
+++ b/sound/soc/codecs/cs42l43.c
@@ -12,7 +12,7 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/errno.h>
-#include <linux/find.h>
+#include <linux/bitmap.h>
#include <linux/gcd.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
diff --git a/sound/soc/codecs/cs42l51-i2c.c b/sound/soc/codecs/cs42l51-i2c.c
index e7cc50096297..f171bd66fcac 100644
--- a/sound/soc/codecs/cs42l51-i2c.c
+++ b/sound/soc/codecs/cs42l51-i2c.c
@@ -13,9 +13,9 @@
#include "cs42l51.h"
-static struct i2c_device_id cs42l51_i2c_id[] = {
- {"cs42l51"},
- {}
+static const struct i2c_device_id cs42l51_i2c_id[] = {
+ { "cs42l51" },
+ { }
};
MODULE_DEVICE_TABLE(i2c, cs42l51_i2c_id);
diff --git a/sound/soc/codecs/cs42l84.c b/sound/soc/codecs/cs42l84.c
index 17d5c96e334d..88cf3c03986e 100644
--- a/sound/soc/codecs/cs42l84.c
+++ b/sound/soc/codecs/cs42l84.c
@@ -1087,7 +1087,7 @@ static const struct of_device_id cs42l84_of_match[] = {
MODULE_DEVICE_TABLE(of, cs42l84_of_match);
static const struct i2c_device_id cs42l84_id[] = {
- {"cs42l84", 0},
+ { "cs42l84" },
{}
};
MODULE_DEVICE_TABLE(i2c, cs42l84_id);
diff --git a/sound/soc/codecs/es8323.c b/sound/soc/codecs/es8323.c
index 6f4fa36ea34d..a9822998199f 100644
--- a/sound/soc/codecs/es8323.c
+++ b/sound/soc/codecs/es8323.c
@@ -758,7 +758,7 @@ static int es8323_i2c_probe(struct i2c_client *i2c_client)
}
static const struct i2c_device_id es8323_i2c_id[] = {
- { "es8323", 0 },
+ { "es8323" },
{ }
};
MODULE_DEVICE_TABLE(i2c, es8323_i2c_id);
diff --git a/sound/soc/codecs/madera.c b/sound/soc/codecs/madera.c
index b24d6472ad5f..a840a2eb92b9 100644
--- a/sound/soc/codecs/madera.c
+++ b/sound/soc/codecs/madera.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/tlv.h>
@@ -3965,7 +3966,7 @@ static int madera_enable_fll(struct madera_fll *fll)
}
madera_fll_dbg(fll, "Enabling FLL, initially %s\n",
- already_enabled ? "enabled" : "disabled");
+ str_enabled_disabled(already_enabled));
if (fll->fout < MADERA_FLL_MIN_FOUT ||
fll->fout > MADERA_FLL_MAX_FOUT) {
@@ -4252,7 +4253,7 @@ static int madera_enable_fll_ao(struct madera_fll *fll,
pm_runtime_get_sync(madera->dev);
madera_fll_dbg(fll, "Enabling FLL_AO, initially %s\n",
- already_enabled ? "enabled" : "disabled");
+ str_enabled_disabled(already_enabled));
/* FLL_AO_HOLD must be set before configuring any registers */
regmap_update_bits(fll->madera->regmap,
@@ -4576,7 +4577,7 @@ static int madera_fllhj_enable(struct madera_fll *fll)
pm_runtime_get_sync(madera->dev);
madera_fll_dbg(fll, "Enabling FLL, initially %s\n",
- already_enabled ? "enabled" : "disabled");
+ str_enabled_disabled(already_enabled));
/* FLLn_HOLD must be set before configuring any registers */
regmap_update_bits(fll->madera->regmap,
diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c
index 8915f5250695..37e61d8d4be6 100644
--- a/sound/soc/codecs/max98088.c
+++ b/sound/soc/codecs/max98088.c
@@ -1731,7 +1731,6 @@ MODULE_DEVICE_TABLE(i2c, max98088_i2c_id);
static int max98088_i2c_probe(struct i2c_client *i2c)
{
struct max98088_priv *max98088;
- const struct i2c_device_id *id;
max98088 = devm_kzalloc(&i2c->dev, sizeof(struct max98088_priv),
GFP_KERNEL);
@@ -1747,8 +1746,7 @@ static int max98088_i2c_probe(struct i2c_client *i2c)
if (PTR_ERR(max98088->mclk) == -EPROBE_DEFER)
return PTR_ERR(max98088->mclk);
- id = i2c_match_id(max98088_i2c_id, i2c);
- max98088->devtype = id->driver_data;
+ max98088->devtype = (uintptr_t)i2c_get_match_data(i2c);
i2c_set_clientdata(i2c, max98088);
max98088->pdata = i2c->dev.platform_data;
diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
index 2adf744c6526..790e2ae6dc18 100644
--- a/sound/soc/codecs/max98090.c
+++ b/sound/soc/codecs/max98090.c
@@ -2543,8 +2543,6 @@ MODULE_DEVICE_TABLE(i2c, max98090_i2c_id);
static int max98090_i2c_probe(struct i2c_client *i2c)
{
struct max98090_priv *max98090;
- const struct acpi_device_id *acpi_id;
- kernel_ulong_t driver_data = 0;
int ret;
pr_debug("max98090_i2c_probe\n");
@@ -2554,21 +2552,7 @@ static int max98090_i2c_probe(struct i2c_client *i2c)
if (max98090 == NULL)
return -ENOMEM;
- if (ACPI_HANDLE(&i2c->dev)) {
- acpi_id = acpi_match_device(i2c->dev.driver->acpi_match_table,
- &i2c->dev);
- if (!acpi_id) {
- dev_err(&i2c->dev, "No driver data\n");
- return -EINVAL;
- }
- driver_data = acpi_id->driver_data;
- } else {
- const struct i2c_device_id *i2c_id =
- i2c_match_id(max98090_i2c_id, i2c);
- driver_data = i2c_id->driver_data;
- }
-
- max98090->devtype = driver_data;
+ max98090->devtype = (uintptr_t)i2c_get_match_data(i2c);
i2c_set_clientdata(i2c, max98090);
max98090->pdata = i2c->dev.platform_data;
diff --git a/sound/soc/codecs/max98095.c b/sound/soc/codecs/max98095.c
index 7e525d49328d..cfb63fe69267 100644
--- a/sound/soc/codecs/max98095.c
+++ b/sound/soc/codecs/max98095.c
@@ -2115,7 +2115,6 @@ static int max98095_i2c_probe(struct i2c_client *i2c)
{
struct max98095_priv *max98095;
int ret;
- const struct i2c_device_id *id;
max98095 = devm_kzalloc(&i2c->dev, sizeof(struct max98095_priv),
GFP_KERNEL);
@@ -2131,8 +2130,7 @@ static int max98095_i2c_probe(struct i2c_client *i2c)
return ret;
}
- id = i2c_match_id(max98095_i2c_id, i2c);
- max98095->devtype = id->driver_data;
+ max98095->devtype = (uintptr_t)i2c_get_match_data(i2c);
i2c_set_clientdata(i2c, max98095);
max98095->pdata = i2c->dev.platform_data;
diff --git a/sound/soc/codecs/ntp8835.c b/sound/soc/codecs/ntp8835.c
index 796e1410496f..2cc4c6395f55 100644
--- a/sound/soc/codecs/ntp8835.c
+++ b/sound/soc/codecs/ntp8835.c
@@ -454,7 +454,7 @@ static int ntp8835_i2c_probe(struct i2c_client *i2c)
}
static const struct i2c_device_id ntp8835_i2c_id[] = {
- { "ntp8835", 0 },
+ { "ntp8835" },
{}
};
MODULE_DEVICE_TABLE(i2c, ntp8835_i2c_id);
diff --git a/sound/soc/codecs/ntp8918.c b/sound/soc/codecs/ntp8918.c
index 0493ab6acbe4..a332893fc51d 100644
--- a/sound/soc/codecs/ntp8918.c
+++ b/sound/soc/codecs/ntp8918.c
@@ -371,7 +371,7 @@ static int ntp8918_i2c_probe(struct i2c_client *i2c)
}
static const struct i2c_device_id ntp8918_i2c_id[] = {
- { "ntp8918", 0 },
+ { "ntp8918" },
{}
};
MODULE_DEVICE_TABLE(i2c, ntp8918_i2c_id);
diff --git a/sound/soc/codecs/pcm186x-i2c.c b/sound/soc/codecs/pcm186x-i2c.c
index a514ebd1b68a..a50f9f6e39c1 100644
--- a/sound/soc/codecs/pcm186x-i2c.c
+++ b/sound/soc/codecs/pcm186x-i2c.c
@@ -33,8 +33,7 @@ MODULE_DEVICE_TABLE(i2c, pcm186x_i2c_id);
static int pcm186x_i2c_probe(struct i2c_client *i2c)
{
- const struct i2c_device_id *id = i2c_match_id(pcm186x_i2c_id, i2c);
- const enum pcm186x_type type = (enum pcm186x_type)id->driver_data;
+ const enum pcm186x_type type = (uintptr_t)i2c_get_match_data(i2c);
int irq = i2c->irq;
struct regmap *regmap;
diff --git a/sound/soc/codecs/pcm6240.c b/sound/soc/codecs/pcm6240.c
index 5d99877f8839..4ff39e0b95b2 100644
--- a/sound/soc/codecs/pcm6240.c
+++ b/sound/soc/codecs/pcm6240.c
@@ -2059,7 +2059,6 @@ static char *str_to_upper(char *str)
static int pcmdevice_i2c_probe(struct i2c_client *i2c)
{
- const struct i2c_device_id *id = i2c_match_id(pcmdevice_i2c_id, i2c);
struct pcmdevice_priv *pcm_dev;
struct device_node *np;
unsigned int dev_addrs[PCMDEVICE_MAX_I2C_DEVICES];
@@ -2069,7 +2068,7 @@ static int pcmdevice_i2c_probe(struct i2c_client *i2c)
if (!pcm_dev)
return -ENOMEM;
- pcm_dev->chip_id = (id != NULL) ? id->driver_data : 0;
+ pcm_dev->chip_id = (uintptr_t)i2c_get_match_data(i2c);
pcm_dev->dev = &i2c->dev;
pcm_dev->client = i2c;
diff --git a/sound/soc/codecs/peb2466.c b/sound/soc/codecs/peb2466.c
index bb9ca6354ae1..a989cfe058f0 100644
--- a/sound/soc/codecs/peb2466.c
+++ b/sound/soc/codecs/peb2466.c
@@ -26,8 +26,7 @@ struct peb2466_lookup {
unsigned int count;
};
-#define PEB2466_TLV_SIZE (sizeof((unsigned int []){TLV_DB_SCALE_ITEM(0, 0, 0)}) / \
- sizeof(unsigned int))
+#define PEB2466_TLV_SIZE ARRAY_SIZE(((unsigned int[]){TLV_DB_SCALE_ITEM(0, 0, 0)}))
struct peb2466_lkup_ctrl {
int reg;
diff --git a/sound/soc/codecs/rt5682-i2c.c b/sound/soc/codecs/rt5682-i2c.c
index ff9e14fad0cd..a8820435d1e0 100644
--- a/sound/soc/codecs/rt5682-i2c.c
+++ b/sound/soc/codecs/rt5682-i2c.c
@@ -186,6 +186,12 @@ static int rt5682_i2c_probe(struct i2c_client *i2c)
return -ENODEV;
}
+ regmap_read(rt5682->regmap, RT5682_INT_DEVICE_ID, &val);
+ if (val == 0x6956) {
+ dev_dbg(&i2c->dev, "ALC5682I-VE device\n");
+ rt5682->ve_ic = true;
+ }
+
mutex_init(&rt5682->calibrate_mutex);
rt5682_calibrate(rt5682);
diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c
index aa163ec40862..b4d72fc4a44d 100644
--- a/sound/soc/codecs/rt5682.c
+++ b/sound/soc/codecs/rt5682.c
@@ -395,6 +395,7 @@ bool rt5682_volatile_register(struct device *dev, unsigned int reg)
case RT5682_4BTN_IL_CMD_1:
case RT5682_AJD1_CTRL:
case RT5682_HP_CALIB_CTRL_1:
+ case RT5682_INT_DEVICE_ID:
case RT5682_DEVICE_ID:
case RT5682_I2C_MODE:
case RT5682_HP_CALIB_CTRL_10:
@@ -419,6 +420,7 @@ bool rt5682_readable_register(struct device *dev, unsigned int reg)
{
switch (reg) {
case RT5682_RESET:
+ case RT5682_INT_DEVICE_ID:
case RT5682_VERSION_ID:
case RT5682_VENDOR_ID:
case RT5682_DEVICE_ID:
@@ -3139,7 +3141,10 @@ void rt5682_calibrate(struct rt5682_priv *rt5682)
regmap_write(rt5682->regmap, RT5682_PWR_DIG_1, 0x0100);
regmap_write(rt5682->regmap, RT5682_HP_IMP_SENS_CTRL_19, 0x3800);
regmap_write(rt5682->regmap, RT5682_CHOP_DAC, 0x3000);
- regmap_write(rt5682->regmap, RT5682_CALIB_ADC_CTRL, 0x7005);
+ if (rt5682->ve_ic)
+ regmap_write(rt5682->regmap, RT5682_CHOP_ADC, 0x7005);
+ else
+ regmap_write(rt5682->regmap, RT5682_CALIB_ADC_CTRL, 0x7005);
regmap_write(rt5682->regmap, RT5682_STO1_ADC_MIXER, 0x686c);
regmap_write(rt5682->regmap, RT5682_CAL_REC, 0x0d0d);
regmap_write(rt5682->regmap, RT5682_HP_CALIB_CTRL_2, 0x0321);
@@ -3168,7 +3173,10 @@ void rt5682_calibrate(struct rt5682_priv *rt5682)
regmap_write(rt5682->regmap, RT5682_GLB_CLK, 0x0000);
regmap_write(rt5682->regmap, RT5682_PWR_DIG_1, 0x0000);
regmap_write(rt5682->regmap, RT5682_CHOP_DAC, 0x2000);
- regmap_write(rt5682->regmap, RT5682_CALIB_ADC_CTRL, 0x2005);
+ if (rt5682->ve_ic)
+ regmap_write(rt5682->regmap, RT5682_CHOP_ADC, 0x2005);
+ else
+ regmap_write(rt5682->regmap, RT5682_CALIB_ADC_CTRL, 0x2005);
regmap_write(rt5682->regmap, RT5682_STO1_ADC_MIXER, 0xc0c4);
regmap_write(rt5682->regmap, RT5682_CAL_REC, 0x0c0c);
diff --git a/sound/soc/codecs/rt5682.h b/sound/soc/codecs/rt5682.h
index b2d9e87af259..de43a5d99403 100644
--- a/sound/soc/codecs/rt5682.h
+++ b/sound/soc/codecs/rt5682.h
@@ -22,6 +22,7 @@
/* Info */
#define RT5682_RESET 0x0000
+#define RT5682_INT_DEVICE_ID 0x00f9
#define RT5682_VERSION_ID 0x00fd
#define RT5682_VENDOR_ID 0x00fe
#define RT5682_DEVICE_ID 0x00ff
@@ -1446,6 +1447,7 @@ struct rt5682_priv {
bool hw_init;
bool first_hw_init;
bool is_sdw;
+ bool ve_ic;
#ifdef CONFIG_COMMON_CLK
struct clk_hw dai_clks_hw[RT5682_DAI_NUM_CLKS];
diff --git a/sound/soc/codecs/rt715-sdw.c b/sound/soc/codecs/rt715-sdw.c
index ec255ada44e0..cd702574c84b 100644
--- a/sound/soc/codecs/rt715-sdw.c
+++ b/sound/soc/codecs/rt715-sdw.c
@@ -372,47 +372,6 @@ static const struct regmap_config rt715_sdw_regmap = {
.use_single_write = true,
};
-int hda_to_sdw(unsigned int nid, unsigned int verb, unsigned int payload,
- unsigned int *sdw_addr_h, unsigned int *sdw_data_h,
- unsigned int *sdw_addr_l, unsigned int *sdw_data_l)
-{
- unsigned int offset_h, offset_l, e_verb;
-
- if (((verb & 0xff) != 0) || verb == 0xf00) { /* 12 bits command */
- if (verb == 0x7ff) /* special case */
- offset_h = 0;
- else
- offset_h = 0x3000;
-
- if (verb & 0x800) /* get command */
- e_verb = (verb - 0xf00) | 0x80;
- else /* set command */
- e_verb = (verb - 0x700);
-
- *sdw_data_h = payload; /* 7 bits payload */
- *sdw_addr_l = *sdw_data_l = 0;
- } else { /* 4 bits command */
- if ((verb & 0x800) == 0x800) { /* read */
- offset_h = 0x9000;
- offset_l = 0xa000;
- } else { /* write */
- offset_h = 0x7000;
- offset_l = 0x8000;
- }
- e_verb = verb >> 8;
- *sdw_data_h = (payload >> 8); /* 16 bits payload [15:8] */
- *sdw_addr_l = (e_verb << 8) | nid | 0x80; /* 0x80: valid bit */
- *sdw_addr_l += offset_l;
- *sdw_data_l = payload & 0xff;
- }
-
- *sdw_addr_h = (e_verb << 8) | nid;
- *sdw_addr_h += offset_h;
-
- return 0;
-}
-EXPORT_SYMBOL(hda_to_sdw);
-
static int rt715_update_status(struct sdw_slave *slave,
enum sdw_slave_status status)
{
diff --git a/sound/soc/codecs/rt715.h b/sound/soc/codecs/rt715.h
index 6e37bf64e12f..a0c56aa1003a 100644
--- a/sound/soc/codecs/rt715.h
+++ b/sound/soc/codecs/rt715.h
@@ -220,8 +220,5 @@ int rt715_io_init(struct device *dev, struct sdw_slave *slave);
int rt715_init(struct device *dev, struct regmap *sdw_regmap,
struct regmap *regmap, struct sdw_slave *slave);
-int hda_to_sdw(unsigned int nid, unsigned int verb, unsigned int payload,
- unsigned int *sdw_addr_h, unsigned int *sdw_data_h,
- unsigned int *sdw_addr_l, unsigned int *sdw_data_l);
int rt715_clock_config(struct device *dev);
#endif /* __RT715_H__ */
diff --git a/sound/soc/codecs/sma1307.c b/sound/soc/codecs/sma1307.c
index f2cea6186d98..480bcea48541 100644
--- a/sound/soc/codecs/sma1307.c
+++ b/sound/soc/codecs/sma1307.c
@@ -2011,8 +2011,8 @@ static void sma1307_i2c_remove(struct i2c_client *client)
}
static const struct i2c_device_id sma1307_i2c_id[] = {
- { "sma1307a", 0 },
- { "sma1307aq", 0 },
+ { "sma1307a" },
+ { "sma1307aq" },
{ }
};
diff --git a/sound/soc/codecs/ssm2602-i2c.c b/sound/soc/codecs/ssm2602-i2c.c
index 596096466cd4..49c74cba17c7 100644
--- a/sound/soc/codecs/ssm2602-i2c.c
+++ b/sound/soc/codecs/ssm2602-i2c.c
@@ -13,8 +13,6 @@
#include "ssm2602.h"
-static const struct i2c_device_id ssm2602_i2c_id[];
-
/*
* ssm2602 2 wire address is determined by GPIO5
* state during powerup.
@@ -23,8 +21,7 @@ static const struct i2c_device_id ssm2602_i2c_id[];
*/
static int ssm2602_i2c_probe(struct i2c_client *client)
{
- const struct i2c_device_id *id = i2c_match_id(ssm2602_i2c_id, client);
- return ssm2602_probe(&client->dev, id->driver_data,
+ return ssm2602_probe(&client->dev, (uintptr_t)i2c_get_match_data(client),
devm_regmap_init_i2c(client, &ssm2602_regmap_config));
}
diff --git a/sound/soc/codecs/tas2562.c b/sound/soc/codecs/tas2562.c
index 54561ae598b8..fef7ce39f664 100644
--- a/sound/soc/codecs/tas2562.c
+++ b/sound/soc/codecs/tas2562.c
@@ -731,16 +731,14 @@ static int tas2562_probe(struct i2c_client *client)
struct device *dev = &client->dev;
struct tas2562_data *data;
int ret;
- const struct i2c_device_id *id;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- id = i2c_match_id(tas2562_id, client);
data->client = client;
data->dev = &client->dev;
- data->model_id = id->driver_data;
+ data->model_id = (uintptr_t)i2c_get_match_data(client);
tas2562_parse_dt(data);
diff --git a/sound/soc/codecs/tas2781-i2c.c b/sound/soc/codecs/tas2781-i2c.c
index 728bf78ae71f..a730ab6ad4e3 100644
--- a/sound/soc/codecs/tas2781-i2c.c
+++ b/sound/soc/codecs/tas2781-i2c.c
@@ -489,14 +489,11 @@ static int tas2563_calib_start_put(struct snd_kcontrol *kcontrol,
struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
struct tasdevice_priv *tas_priv = snd_soc_component_get_drvdata(comp);
const int sum = ARRAY_SIZE(tas2563_cali_start_reg);
- int rc = 1;
int i, j;
guard(mutex)(&tas_priv->codec_lock);
- if (tas_priv->chip_id != TAS2563) {
- rc = -1;
- goto out;
- }
+ if (tas_priv->chip_id != TAS2563)
+ return -1;
for (i = 0; i < tas_priv->ndev; i++) {
struct tasdevice *tasdev = tas_priv->tasdevice;
@@ -523,8 +520,8 @@ static int tas2563_calib_start_put(struct snd_kcontrol *kcontrol,
q[j].val, 4);
}
}
-out:
- return rc;
+
+ return 1;
}
static void tas2563_calib_stop_put(struct tasdevice_priv *tas_priv)
@@ -576,7 +573,7 @@ static int tasdev_cali_data_put(struct snd_kcontrol *kcontrol,
struct cali_reg *p = &cali_data->cali_reg_array;
unsigned char *src = ucontrol->value.bytes.data;
unsigned char *dst = cali_data->data;
- int rc = 1, i = 0;
+ int i = 0;
int j;
guard(mutex)(&priv->codec_lock);
@@ -605,7 +602,7 @@ static int tasdev_cali_data_put(struct snd_kcontrol *kcontrol,
i += 3;
memcpy(dst, &src[i], cali_data->total_sz);
- return rc;
+ return 1;
}
static int tas2781_latch_reg_get(struct snd_kcontrol *kcontrol,
@@ -1115,25 +1112,21 @@ static int tasdevice_dsp_create_ctrls(struct tasdevice_priv *tas_priv)
char *conf_name, *prog_name;
int nr_controls = 4;
int mix_index = 0;
- int ret;
/* Alloc kcontrol via devm_kzalloc, which don't manually
* free the kcontrol
*/
dsp_ctrls = devm_kcalloc(tas_priv->dev, nr_controls,
sizeof(dsp_ctrls[0]), GFP_KERNEL);
- if (!dsp_ctrls) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!dsp_ctrls)
+ return -ENOMEM;
/* Create mixer items for selecting the active Program and Config */
prog_name = devm_kstrdup(tas_priv->dev, "Speaker Program Id",
GFP_KERNEL);
- if (!prog_name) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!prog_name)
+ return -ENOMEM;
+
dsp_ctrls[mix_index].name = prog_name;
dsp_ctrls[mix_index].iface = SNDRV_CTL_ELEM_IFACE_MIXER;
dsp_ctrls[mix_index].info = tasdevice_info_programs;
@@ -1143,10 +1136,9 @@ static int tasdevice_dsp_create_ctrls(struct tasdevice_priv *tas_priv)
conf_name = devm_kstrdup(tas_priv->dev, "Speaker Config Id",
GFP_KERNEL);
- if (!conf_name) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!conf_name)
+ return -ENOMEM;
+
dsp_ctrls[mix_index].name = conf_name;
dsp_ctrls[mix_index].iface = SNDRV_CTL_ELEM_IFACE_MIXER;
dsp_ctrls[mix_index].info = tasdevice_info_configurations;
@@ -1156,10 +1148,9 @@ static int tasdevice_dsp_create_ctrls(struct tasdevice_priv *tas_priv)
active_dev_num = devm_kstrdup(tas_priv->dev, "Activate Tasdevice Num",
GFP_KERNEL);
- if (!active_dev_num) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!active_dev_num)
+ return -ENOMEM;
+
dsp_ctrls[mix_index].name = active_dev_num;
dsp_ctrls[mix_index].iface = SNDRV_CTL_ELEM_IFACE_MIXER;
dsp_ctrls[mix_index].info = tasdevice_info_active_num;
@@ -1168,21 +1159,17 @@ static int tasdevice_dsp_create_ctrls(struct tasdevice_priv *tas_priv)
mix_index++;
chip_id = devm_kstrdup(tas_priv->dev, "Tasdevice Chip Id", GFP_KERNEL);
- if (!chip_id) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!chip_id)
+ return -ENOMEM;
+
dsp_ctrls[mix_index].name = chip_id;
dsp_ctrls[mix_index].iface = SNDRV_CTL_ELEM_IFACE_MIXER;
dsp_ctrls[mix_index].info = tasdevice_info_chip_id;
dsp_ctrls[mix_index].get = tasdevice_get_chip_id;
mix_index++;
- ret = snd_soc_add_component_controls(tas_priv->codec, dsp_ctrls,
+ return snd_soc_add_component_controls(tas_priv->codec, dsp_ctrls,
nr_controls < mix_index ? nr_controls : mix_index);
-
-out:
- return ret;
}
static int tasdevice_create_cali_ctrls(struct tasdevice_priv *priv)
@@ -1469,7 +1456,6 @@ static int tasdevice_hw_params(struct snd_pcm_substream *substream,
unsigned int slot_width;
unsigned int fsrate;
int bclk_rate;
- int rc = 0;
fsrate = params_rate(params);
switch (fsrate) {
@@ -1479,8 +1465,7 @@ static int tasdevice_hw_params(struct snd_pcm_substream *substream,
default:
dev_err(tas_priv->dev, "%s: incorrect sample rate = %u\n",
__func__, fsrate);
- rc = -EINVAL;
- goto out;
+ return -EINVAL;
}
slot_width = params_width(params);
@@ -1493,20 +1478,17 @@ static int tasdevice_hw_params(struct snd_pcm_substream *substream,
default:
dev_err(tas_priv->dev, "%s: incorrect slot width = %u\n",
__func__, slot_width);
- rc = -EINVAL;
- goto out;
+ return -EINVAL;
}
bclk_rate = snd_soc_params_to_bclk(params);
if (bclk_rate < 0) {
dev_err(tas_priv->dev, "%s: incorrect bclk rate = %d\n",
__func__, bclk_rate);
- rc = bclk_rate;
- goto out;
+ return bclk_rate;
}
-out:
- return rc;
+ return 0;
}
static int tasdevice_set_dai_sysclk(struct snd_soc_dai *codec_dai,
@@ -1663,7 +1645,6 @@ static void tasdevice_parse_dt(struct tasdevice_priv *tas_priv)
static int tasdevice_i2c_probe(struct i2c_client *i2c)
{
- const struct i2c_device_id *id = i2c_match_id(tasdevice_id, i2c);
const struct acpi_device_id *acpi_id;
struct tasdevice_priv *tas_priv;
int ret;
@@ -1685,7 +1666,7 @@ static int tasdevice_i2c_probe(struct i2c_client *i2c)
tas_priv->chip_id = acpi_id->driver_data;
tas_priv->isacpi = true;
} else {
- tas_priv->chip_id = id ? id->driver_data : 0;
+ tas_priv->chip_id = (uintptr_t)i2c_get_match_data(i2c);
tas_priv->isacpi = false;
}
diff --git a/sound/soc/codecs/tas5720.c b/sound/soc/codecs/tas5720.c
index 6dd6c0896eff..f0361822061f 100644
--- a/sound/soc/codecs/tas5720.c
+++ b/sound/soc/codecs/tas5720.c
@@ -43,7 +43,6 @@ static const char * const tas5720_supply_names[] = {
struct tas5720_data {
struct snd_soc_component *component;
struct regmap *regmap;
- struct i2c_client *tas5720_client;
enum tas572x_type devtype;
struct regulator_bulk_data supplies[TAS5720_NUM_SUPPLIES];
struct delayed_work fault_check_work;
@@ -729,7 +728,6 @@ static int tas5720_probe(struct i2c_client *client)
struct device *dev = &client->dev;
struct tas5720_data *data;
const struct regmap_config *regmap_config;
- const struct i2c_device_id *id;
int ret;
int i;
@@ -737,11 +735,9 @@ static int tas5720_probe(struct i2c_client *client)
if (!data)
return -ENOMEM;
- id = i2c_match_id(tas5720_id, client);
- data->tas5720_client = client;
- data->devtype = id->driver_data;
+ data->devtype = (uintptr_t)i2c_get_match_data(client);
- switch (id->driver_data) {
+ switch (data->devtype) {
case TAS5720:
regmap_config = &tas5720_regmap_config;
break;
@@ -774,7 +770,7 @@ static int tas5720_probe(struct i2c_client *client)
dev_set_drvdata(dev, data);
- switch (id->driver_data) {
+ switch (data->devtype) {
case TAS5720:
ret = devm_snd_soc_register_component(&client->dev,
&soc_component_dev_tas5720,
diff --git a/sound/soc/codecs/tlv320adc3xxx.c b/sound/soc/codecs/tlv320adc3xxx.c
index 868e8a91e05b..1a50ff675244 100644
--- a/sound/soc/codecs/tlv320adc3xxx.c
+++ b/sound/soc/codecs/tlv320adc3xxx.c
@@ -1401,7 +1401,6 @@ static int adc3xxx_i2c_probe(struct i2c_client *i2c)
{
struct device *dev = &i2c->dev;
struct adc3xxx *adc3xxx = NULL;
- const struct i2c_device_id *id;
int ret;
adc3xxx = devm_kzalloc(dev, sizeof(struct adc3xxx), GFP_KERNEL);
@@ -1466,8 +1465,7 @@ static int adc3xxx_i2c_probe(struct i2c_client *i2c)
i2c_set_clientdata(i2c, adc3xxx);
- id = i2c_match_id(adc3xxx_i2c_id, i2c);
- adc3xxx->type = id->driver_data;
+ adc3xxx->type = (uintptr_t)i2c_get_match_data(i2c);
/* Reset codec chip */
gpiod_set_value_cansleep(adc3xxx->rst_pin, 1);
diff --git a/sound/soc/codecs/tlv320aic31xx.c b/sound/soc/codecs/tlv320aic31xx.c
index d81ab9c25c29..4b3f9128ec37 100644
--- a/sound/soc/codecs/tlv320aic31xx.c
+++ b/sound/soc/codecs/tlv320aic31xx.c
@@ -1736,12 +1736,8 @@ static int aic31xx_i2c_probe(struct i2c_client *i2c)
{
struct aic31xx_priv *aic31xx;
unsigned int micbias_value = MICBIAS_2_0V;
- const struct i2c_device_id *id = i2c_match_id(aic31xx_i2c_id, i2c);
int i, ret;
- dev_dbg(&i2c->dev, "## %s: %s codec_type = %d\n", __func__,
- id->name, (int)id->driver_data);
-
aic31xx = devm_kzalloc(&i2c->dev, sizeof(*aic31xx), GFP_KERNEL);
if (!aic31xx)
return -ENOMEM;
@@ -1758,7 +1754,7 @@ static int aic31xx_i2c_probe(struct i2c_client *i2c)
aic31xx->dev = &i2c->dev;
aic31xx->irq = i2c->irq;
- aic31xx->codec_type = id->driver_data;
+ aic31xx->codec_type = (uintptr_t)i2c_get_match_data(i2c);
dev_set_drvdata(aic31xx->dev, aic31xx);
diff --git a/sound/soc/codecs/tlv320aic3x-i2c.c b/sound/soc/codecs/tlv320aic3x-i2c.c
index bb33fd3dfb4f..0b585925c1ac 100644
--- a/sound/soc/codecs/tlv320aic3x-i2c.c
+++ b/sound/soc/codecs/tlv320aic3x-i2c.c
@@ -31,14 +31,13 @@ static int aic3x_i2c_probe(struct i2c_client *i2c)
{
struct regmap *regmap;
struct regmap_config config;
- const struct i2c_device_id *id = i2c_match_id(aic3x_i2c_id, i2c);
config = aic3x_regmap;
config.reg_bits = 8;
config.val_bits = 8;
regmap = devm_regmap_init_i2c(i2c, &config);
- return aic3x_probe(&i2c->dev, regmap, id->driver_data);
+ return aic3x_probe(&i2c->dev, regmap, (uintptr_t)i2c_get_match_data(i2c));
}
static void aic3x_i2c_remove(struct i2c_client *i2c)
diff --git a/sound/soc/codecs/tpa6130a2.c b/sound/soc/codecs/tpa6130a2.c
index 5bc486283fde..b5472fa1bdda 100644
--- a/sound/soc/codecs/tpa6130a2.c
+++ b/sound/soc/codecs/tpa6130a2.c
@@ -222,7 +222,6 @@ static int tpa6130a2_probe(struct i2c_client *client)
struct tpa6130a2_data *data;
struct tpa6130a2_platform_data *pdata = client->dev.platform_data;
struct device_node *np = client->dev.of_node;
- const struct i2c_device_id *id;
const char *regulator;
unsigned int version;
int ret;
@@ -251,8 +250,7 @@ static int tpa6130a2_probe(struct i2c_client *client)
i2c_set_clientdata(client, data);
- id = i2c_match_id(tpa6130a2_id, client);
- data->id = id->driver_data;
+ data->id = (uintptr_t)i2c_get_match_data(client);
if (data->power_gpio >= 0) {
ret = devm_gpio_request(dev, data->power_gpio,
diff --git a/sound/soc/codecs/uda1342.c b/sound/soc/codecs/uda1342.c
index 3d49a7869948..b0b29012842d 100644
--- a/sound/soc/codecs/uda1342.c
+++ b/sound/soc/codecs/uda1342.c
@@ -319,7 +319,7 @@ static DEFINE_RUNTIME_DEV_PM_OPS(uda1342_pm_ops,
uda1342_suspend, uda1342_resume, NULL);
static const struct i2c_device_id uda1342_i2c_id[] = {
- { "uda1342", 0 },
+ { "uda1342" },
{ }
};
MODULE_DEVICE_TABLE(i2c, uda1342_i2c_id);
diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c
index a2521e16c099..7cef43bb2a88 100644
--- a/sound/soc/codecs/wcd9335.c
+++ b/sound/soc/codecs/wcd9335.c
@@ -159,6 +159,8 @@
{"AMIC MUX" #id, "ADC5", "ADC5"}, \
{"AMIC MUX" #id, "ADC6", "ADC6"}
+#define NUM_CODEC_DAIS 7
+
enum {
WCD9335_RX0 = 0,
WCD9335_RX1,
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
index 829bf055622a..aef82532f8cf 100644
--- a/sound/soc/codecs/wm8904.c
+++ b/sound/soc/codecs/wm8904.c
@@ -2196,18 +2196,7 @@ static int wm8904_i2c_probe(struct i2c_client *i2c)
return ret;
}
- if (i2c->dev.of_node) {
- const struct of_device_id *match;
-
- match = of_match_node(wm8904_of_match, i2c->dev.of_node);
- if (match == NULL)
- return -EINVAL;
- wm8904->devtype = (uintptr_t)match->data;
- } else {
- const struct i2c_device_id *id =
- i2c_match_id(wm8904_i2c_id, i2c);
- wm8904->devtype = id->driver_data;
- }
+ wm8904->devtype = (uintptr_t)i2c_get_match_data(i2c);
i2c_set_clientdata(i2c, wm8904);
wm8904->pdata = i2c->dev.platform_data;
diff --git a/sound/soc/codecs/wm8985.c b/sound/soc/codecs/wm8985.c
index 8606e0752a60..da00db5b0172 100644
--- a/sound/soc/codecs/wm8985.c
+++ b/sound/soc/codecs/wm8985.c
@@ -1166,12 +1166,10 @@ static struct spi_driver wm8985_spi_driver = {
#endif
#if IS_ENABLED(CONFIG_I2C)
-static const struct i2c_device_id wm8985_i2c_id[];
static int wm8985_i2c_probe(struct i2c_client *i2c)
{
struct wm8985_priv *wm8985;
- const struct i2c_device_id *id = i2c_match_id(wm8985_i2c_id, i2c);
int ret;
wm8985 = devm_kzalloc(&i2c->dev, sizeof *wm8985, GFP_KERNEL);
@@ -1180,7 +1178,7 @@ static int wm8985_i2c_probe(struct i2c_client *i2c)
i2c_set_clientdata(i2c, wm8985);
- wm8985->dev_type = id->driver_data;
+ wm8985->dev_type = (uintptr_t)i2c_get_match_data(i2c);
wm8985->regmap = devm_regmap_init_i2c(i2c, &wm8985_regmap);
if (IS_ERR(wm8985->regmap)) {
diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig
index 8e88830e8e57..e5fbf5305ea2 100644
--- a/sound/soc/fsl/Kconfig
+++ b/sound/soc/fsl/Kconfig
@@ -8,6 +8,8 @@ config SND_SOC_FSL_ASRC
depends on HAS_DMA
select REGMAP_MMIO
select SND_SOC_GENERIC_DMAENGINE_PCM
+ select SND_COMPRESS_ACCEL
+ select SND_COMPRESS_OFFLOAD
help
Say Y if you want to add Asynchronous Sample Rate Converter (ASRC)
support for the Freescale CPUs.
@@ -29,8 +31,8 @@ config SND_SOC_FSL_SAI
config SND_SOC_FSL_MQS
tristate "Medium Quality Sound (MQS) module support"
depends on SND_SOC_FSL_SAI
+ depends on IMX_SCMI_MISC_DRV || !IMX_SCMI_MISC_DRV
select REGMAP_MMIO
- select IMX_SCMI_MISC_DRV if IMX_SCMI_MISC_EXT !=n
help
Say Y if you want to add Medium Quality Sound (MQS)
support for the Freescale CPUs.
diff --git a/sound/soc/fsl/Makefile b/sound/soc/fsl/Makefile
index ad97244b5cc3..d656a9ab54e3 100644
--- a/sound/soc/fsl/Makefile
+++ b/sound/soc/fsl/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_SND_SOC_P1022_RDK) += snd-soc-p1022-rdk.o
# Freescale SSI/DMA/SAI/SPDIF Support
snd-soc-fsl-audmix-y := fsl_audmix.o
snd-soc-fsl-asoc-card-y := fsl-asoc-card.o
-snd-soc-fsl-asrc-y := fsl_asrc.o fsl_asrc_dma.o
+snd-soc-fsl-asrc-y := fsl_asrc.o fsl_asrc_dma.o fsl_asrc_m2m.o
snd-soc-fsl-lpc3xxx-y := lpc3xxx-pcm.o lpc3xxx-i2s.o
snd-soc-fsl-sai-y := fsl_sai.o
snd-soc-fsl-ssi-y := fsl_ssi.o
diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
index 02e1594e8223..2bad9cb1daaf 100644
--- a/sound/soc/fsl/fsl-asoc-card.c
+++ b/sound/soc/fsl/fsl-asoc-card.c
@@ -932,7 +932,7 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
if (!asrc_pdev)
priv->card.num_dapm_routes /= 2;
- if (of_property_read_bool(np, "audio-routing")) {
+ if (of_property_present(np, "audio-routing")) {
ret = snd_soc_of_parse_audio_routing(&priv->card, "audio-routing");
if (ret) {
dev_err(&pdev->dev, "failed to parse audio-routing: %d\n", ret);
diff --git a/sound/soc/fsl/fsl_asrc.c b/sound/soc/fsl/fsl_asrc.c
index bd5c46d763c0..677529916dc0 100644
--- a/sound/soc/fsl/fsl_asrc.c
+++ b/sound/soc/fsl/fsl_asrc.c
@@ -1063,6 +1063,139 @@ static int fsl_asrc_get_fifo_addr(u8 dir, enum asrc_pair_index index)
return REG_ASRDx(dir, index);
}
+/* Get sample numbers in FIFO */
+static unsigned int fsl_asrc_get_output_fifo_size(struct fsl_asrc_pair *pair)
+{
+ struct fsl_asrc *asrc = pair->asrc;
+ enum asrc_pair_index index = pair->index;
+ u32 val;
+
+ regmap_read(asrc->regmap, REG_ASRFST(index), &val);
+
+ val &= ASRFSTi_OUTPUT_FIFO_MASK;
+
+ return val >> ASRFSTi_OUTPUT_FIFO_SHIFT;
+}
+
+static int fsl_asrc_m2m_prepare(struct fsl_asrc_pair *pair)
+{
+ struct fsl_asrc_pair_priv *pair_priv = pair->private;
+ struct fsl_asrc *asrc = pair->asrc;
+ struct device *dev = &asrc->pdev->dev;
+ struct asrc_config config;
+ int ret;
+
+ /* fill config */
+ config.pair = pair->index;
+ config.channel_num = pair->channels;
+ config.input_sample_rate = pair->rate[IN];
+ config.output_sample_rate = pair->rate[OUT];
+ config.input_format = pair->sample_format[IN];
+ config.output_format = pair->sample_format[OUT];
+ config.inclk = INCLK_NONE;
+ config.outclk = OUTCLK_ASRCK1_CLK;
+
+ pair_priv->config = &config;
+ ret = fsl_asrc_config_pair(pair, true);
+ if (ret) {
+ dev_err(dev, "failed to config pair: %d\n", ret);
+ return ret;
+ }
+
+ pair->first_convert = 1;
+
+ return 0;
+}
+
+static int fsl_asrc_m2m_start(struct fsl_asrc_pair *pair)
+{
+ if (pair->first_convert) {
+ fsl_asrc_start_pair(pair);
+ pair->first_convert = 0;
+ }
+ /*
+ * Clear DMA request during the stall state of ASRC:
+ * During STALL state, the remaining in input fifo would never be
+ * smaller than the input threshold while the output fifo would not
+ * be bigger than output one. Thus the DMA request would be cleared.
+ */
+ fsl_asrc_set_watermarks(pair, ASRC_FIFO_THRESHOLD_MIN,
+ ASRC_FIFO_THRESHOLD_MAX);
+
+ /* Update the real input threshold to raise DMA request */
+ fsl_asrc_set_watermarks(pair, ASRC_M2M_INPUTFIFO_WML,
+ ASRC_M2M_OUTPUTFIFO_WML);
+
+ return 0;
+}
+
+static int fsl_asrc_m2m_stop(struct fsl_asrc_pair *pair)
+{
+ if (!pair->first_convert) {
+ fsl_asrc_stop_pair(pair);
+ pair->first_convert = 1;
+ }
+
+ return 0;
+}
+
+/* calculate capture data length according to output data length and sample rate */
+static int fsl_asrc_m2m_calc_out_len(struct fsl_asrc_pair *pair, int input_buffer_length)
+{
+ unsigned int in_width, out_width;
+ unsigned int channels = pair->channels;
+ unsigned int in_samples, out_samples;
+ unsigned int out_length;
+
+ in_width = snd_pcm_format_physical_width(pair->sample_format[IN]) / 8;
+ out_width = snd_pcm_format_physical_width(pair->sample_format[OUT]) / 8;
+
+ in_samples = input_buffer_length / in_width / channels;
+ out_samples = pair->rate[OUT] * in_samples / pair->rate[IN];
+ out_length = (out_samples - ASRC_OUTPUT_LAST_SAMPLE) * out_width * channels;
+
+ return out_length;
+}
+
+static int fsl_asrc_m2m_get_maxburst(u8 dir, struct fsl_asrc_pair *pair)
+{
+ struct fsl_asrc *asrc = pair->asrc;
+ struct fsl_asrc_priv *asrc_priv = asrc->private;
+ int wml = (dir == IN) ? ASRC_M2M_INPUTFIFO_WML : ASRC_M2M_OUTPUTFIFO_WML;
+
+ if (!asrc_priv->soc->use_edma)
+ return wml * pair->channels;
+ else
+ return 1;
+}
+
+static int fsl_asrc_m2m_get_cap(struct fsl_asrc_m2m_cap *cap)
+{
+ cap->fmt_in = FSL_ASRC_FORMATS;
+ cap->fmt_out = FSL_ASRC_FORMATS | SNDRV_PCM_FMTBIT_S8;
+
+ cap->rate_in = supported_asrc_rate;
+ cap->rate_in_count = ARRAY_SIZE(supported_asrc_rate);
+ cap->rate_out = supported_asrc_rate;
+ cap->rate_out_count = ARRAY_SIZE(supported_asrc_rate);
+ cap->chan_min = 1;
+ cap->chan_max = 10;
+
+ return 0;
+}
+
+static int fsl_asrc_m2m_pair_resume(struct fsl_asrc_pair *pair)
+{
+ struct fsl_asrc *asrc = pair->asrc;
+ int i;
+
+ for (i = 0; i < pair->channels * 4; i++)
+ regmap_write(asrc->regmap, REG_ASRDI(pair->index), 0);
+
+ pair->first_convert = 1;
+ return 0;
+}
+
static int fsl_asrc_runtime_resume(struct device *dev);
static int fsl_asrc_runtime_suspend(struct device *dev);
@@ -1147,6 +1280,15 @@ static int fsl_asrc_probe(struct platform_device *pdev)
asrc->get_fifo_addr = fsl_asrc_get_fifo_addr;
asrc->pair_priv_size = sizeof(struct fsl_asrc_pair_priv);
+ asrc->m2m_prepare = fsl_asrc_m2m_prepare;
+ asrc->m2m_start = fsl_asrc_m2m_start;
+ asrc->m2m_stop = fsl_asrc_m2m_stop;
+ asrc->get_output_fifo_size = fsl_asrc_get_output_fifo_size;
+ asrc->m2m_calc_out_len = fsl_asrc_m2m_calc_out_len;
+ asrc->m2m_get_maxburst = fsl_asrc_m2m_get_maxburst;
+ asrc->m2m_pair_resume = fsl_asrc_m2m_pair_resume;
+ asrc->m2m_get_cap = fsl_asrc_m2m_get_cap;
+
if (of_device_is_compatible(np, "fsl,imx35-asrc")) {
asrc_priv->clk_map[IN] = input_clk_map_imx35;
asrc_priv->clk_map[OUT] = output_clk_map_imx35;
@@ -1242,6 +1384,12 @@ static int fsl_asrc_probe(struct platform_device *pdev)
goto err_pm_get_sync;
}
+ ret = fsl_asrc_m2m_init(asrc);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to init m2m device %d\n", ret);
+ return ret;
+ }
+
return 0;
err_pm_get_sync:
@@ -1254,6 +1402,10 @@ err_pm_disable:
static void fsl_asrc_remove(struct platform_device *pdev)
{
+ struct fsl_asrc *asrc = dev_get_drvdata(&pdev->dev);
+
+ fsl_asrc_m2m_exit(asrc);
+
pm_runtime_disable(&pdev->dev);
if (!pm_runtime_status_suspended(&pdev->dev))
fsl_asrc_runtime_suspend(&pdev->dev);
@@ -1355,10 +1507,29 @@ static int fsl_asrc_runtime_suspend(struct device *dev)
return 0;
}
+static int fsl_asrc_suspend(struct device *dev)
+{
+ struct fsl_asrc *asrc = dev_get_drvdata(dev);
+ int ret;
+
+ fsl_asrc_m2m_suspend(asrc);
+ ret = pm_runtime_force_suspend(dev);
+ return ret;
+}
+
+static int fsl_asrc_resume(struct device *dev)
+{
+ struct fsl_asrc *asrc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_runtime_force_resume(dev);
+ fsl_asrc_m2m_resume(asrc);
+ return ret;
+}
+
static const struct dev_pm_ops fsl_asrc_pm = {
- SET_RUNTIME_PM_OPS(fsl_asrc_runtime_suspend, fsl_asrc_runtime_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
+ RUNTIME_PM_OPS(fsl_asrc_runtime_suspend, fsl_asrc_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(fsl_asrc_suspend, fsl_asrc_resume)
};
static const struct fsl_asrc_soc_data fsl_asrc_imx35_data = {
@@ -1396,7 +1567,7 @@ static struct platform_driver fsl_asrc_driver = {
.driver = {
.name = "fsl-asrc",
.of_match_table = fsl_asrc_ids,
- .pm = &fsl_asrc_pm,
+ .pm = pm_ptr(&fsl_asrc_pm),
},
};
module_platform_driver(fsl_asrc_driver);
diff --git a/sound/soc/fsl/fsl_asrc.h b/sound/soc/fsl/fsl_asrc.h
index 86d2422ad606..1c492eb237f5 100644
--- a/sound/soc/fsl/fsl_asrc.h
+++ b/sound/soc/fsl/fsl_asrc.h
@@ -12,6 +12,8 @@
#include "fsl_asrc_common.h"
+#define ASRC_M2M_INPUTFIFO_WML 0x4
+#define ASRC_M2M_OUTPUTFIFO_WML 0x2
#define ASRC_DMA_BUFFER_NUM 2
#define ASRC_INPUTFIFO_THRESHOLD 32
#define ASRC_OUTPUTFIFO_THRESHOLD 32
diff --git a/sound/soc/fsl/fsl_asrc_common.h b/sound/soc/fsl/fsl_asrc_common.h
index 7e1c13ca37f1..0cd595b0f629 100644
--- a/sound/soc/fsl/fsl_asrc_common.h
+++ b/sound/soc/fsl/fsl_asrc_common.h
@@ -22,6 +22,26 @@ enum asrc_pair_index {
#define PAIR_CTX_NUM 0x4
/**
+ * struct fsl_asrc_m2m_cap - capability data
+ * @fmt_in: input sample format
+ * @fmt_out: output sample format
+ * @chan_min: minimum channel number
+ * @chan_max: maximum channel number
+ * @rate_in: minimum rate
+ * @rate_out: maximum rete
+ */
+struct fsl_asrc_m2m_cap {
+ u64 fmt_in;
+ u64 fmt_out;
+ int chan_min;
+ int chan_max;
+ const unsigned int *rate_in;
+ int rate_in_count;
+ const unsigned int *rate_out;
+ int rate_out_count;
+};
+
+/**
* fsl_asrc_pair: ASRC Pair common data
*
* @asrc: pointer to its parent module
@@ -34,6 +54,14 @@ enum asrc_pair_index {
* @pos: hardware pointer position
* @req_dma_chan: flag to release dev_to_dev chan
* @private: pair private area
+ * @complete: dma task complete
+ * @sample_format: format of m2m
+ * @rate: rate of m2m
+ * @buf_len: buffer length of m2m
+ * @dma_buffer: buffer pointers
+ * @first_convert: start of conversion
+ * @ratio_mod_flag: flag for new ratio modifier
+ * @ratio_mod: ratio modification
*/
struct fsl_asrc_pair {
struct fsl_asrc *asrc;
@@ -49,6 +77,16 @@ struct fsl_asrc_pair {
bool req_dma_chan;
void *private;
+
+ /* used for m2m */
+ struct completion complete[2];
+ snd_pcm_format_t sample_format[2];
+ unsigned int rate[2];
+ unsigned int buf_len[2];
+ struct snd_dma_buffer dma_buffer[2];
+ unsigned int first_convert;
+ bool ratio_mod_flag;
+ unsigned int ratio_mod;
};
/**
@@ -62,6 +100,7 @@ struct fsl_asrc_pair {
* @mem_clk: clock source to access register
* @ipg_clk: clock source to drive peripheral
* @spba_clk: SPBA clock (optional, depending on SoC design)
+ * @card: compress sound card
* @lock: spin lock for resource protection
* @pair: pair pointers
* @channel_avail: non-occupied channel numbers
@@ -72,6 +111,17 @@ struct fsl_asrc_pair {
* @request_pair: function pointer
* @release_pair: function pointer
* @get_fifo_addr: function pointer
+ * @m2m_get_cap: function pointer
+ * @m2m_prepare: function pointer
+ * @m2m_start: function pointer
+ * @m2m_unprepare: function pointer
+ * @m2m_stop: function pointer
+ * @m2m_calc_out_len: function pointer
+ * @m2m_get_maxburst: function pointer
+ * @m2m_pair_suspend: function pointer
+ * @m2m_pair_resume: function pointer
+ * @m2m_set_ratio_mod: function pointer
+ * @get_output_fifo_size: function pointer
* @pair_priv_size: size of pair private struct.
* @private: private data structure
*/
@@ -84,6 +134,7 @@ struct fsl_asrc {
struct clk *mem_clk;
struct clk *ipg_clk;
struct clk *spba_clk;
+ struct snd_card *card;
spinlock_t lock; /* spin lock for resource protection */
struct fsl_asrc_pair *pair[PAIR_CTX_NUM];
@@ -97,6 +148,20 @@ struct fsl_asrc {
int (*request_pair)(int channels, struct fsl_asrc_pair *pair);
void (*release_pair)(struct fsl_asrc_pair *pair);
int (*get_fifo_addr)(u8 dir, enum asrc_pair_index index);
+ int (*m2m_get_cap)(struct fsl_asrc_m2m_cap *cap);
+
+ int (*m2m_prepare)(struct fsl_asrc_pair *pair);
+ int (*m2m_start)(struct fsl_asrc_pair *pair);
+ int (*m2m_unprepare)(struct fsl_asrc_pair *pair);
+ int (*m2m_stop)(struct fsl_asrc_pair *pair);
+
+ int (*m2m_calc_out_len)(struct fsl_asrc_pair *pair, int input_buffer_length);
+ int (*m2m_get_maxburst)(u8 dir, struct fsl_asrc_pair *pair);
+ int (*m2m_pair_suspend)(struct fsl_asrc_pair *pair);
+ int (*m2m_pair_resume)(struct fsl_asrc_pair *pair);
+ int (*m2m_set_ratio_mod)(struct fsl_asrc_pair *pair, int val);
+
+ unsigned int (*get_output_fifo_size)(struct fsl_asrc_pair *pair);
size_t pair_priv_size;
void *private;
@@ -105,4 +170,9 @@ struct fsl_asrc {
#define DRV_NAME "fsl-asrc-dai"
extern struct snd_soc_component_driver fsl_asrc_component;
+int fsl_asrc_m2m_init(struct fsl_asrc *asrc);
+void fsl_asrc_m2m_exit(struct fsl_asrc *asrc);
+int fsl_asrc_m2m_resume(struct fsl_asrc *asrc);
+int fsl_asrc_m2m_suspend(struct fsl_asrc *asrc);
+
#endif /* _FSL_ASRC_COMMON_H */
diff --git a/sound/soc/fsl/fsl_asrc_m2m.c b/sound/soc/fsl/fsl_asrc_m2m.c
new file mode 100644
index 000000000000..4906843e2a8f
--- /dev/null
+++ b/sound/soc/fsl/fsl_asrc_m2m.c
@@ -0,0 +1,727 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+// Copyright (C) 2019-2024 NXP
+//
+// Freescale ASRC Memory to Memory (M2M) driver
+
+#include <linux/dma/imx-dma.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
+#include <sound/asound.h>
+#include <sound/dmaengine_pcm.h>
+#include <sound/initval.h>
+
+#include "fsl_asrc_common.h"
+
+#define DIR_STR(dir) (dir) == IN ? "in" : "out"
+
+#define ASRC_xPUT_DMA_CALLBACK(dir) \
+ (((dir) == IN) ? asrc_input_dma_callback \
+ : asrc_output_dma_callback)
+
+/* Maximum output and capture buffer size */
+#define ASRC_M2M_BUFFER_SIZE (512 * 1024)
+
+/* Maximum output and capture period size */
+#define ASRC_M2M_PERIOD_SIZE (48 * 1024)
+
+/* dma complete callback */
+static void asrc_input_dma_callback(void *data)
+{
+ struct fsl_asrc_pair *pair = (struct fsl_asrc_pair *)data;
+
+ complete(&pair->complete[IN]);
+}
+
+/* dma complete callback */
+static void asrc_output_dma_callback(void *data)
+{
+ struct fsl_asrc_pair *pair = (struct fsl_asrc_pair *)data;
+
+ complete(&pair->complete[OUT]);
+}
+
+/**
+ *asrc_read_last_fifo: read all the remaining data from FIFO
+ *@pair: Structure pointer of fsl_asrc_pair
+ *@dma_vaddr: virtual address of capture buffer
+ *@length: payload length of capture buffer
+ */
+static void asrc_read_last_fifo(struct fsl_asrc_pair *pair, void *dma_vaddr, u32 *length)
+{
+ struct fsl_asrc *asrc = pair->asrc;
+ enum asrc_pair_index index = pair->index;
+ u32 i, reg, size, t_size = 0, width;
+ u32 *reg32 = NULL;
+ u16 *reg16 = NULL;
+ u8 *reg24 = NULL;
+
+ width = snd_pcm_format_physical_width(pair->sample_format[OUT]);
+ if (width == 32)
+ reg32 = dma_vaddr + *length;
+ else if (width == 16)
+ reg16 = dma_vaddr + *length;
+ else
+ reg24 = dma_vaddr + *length;
+retry:
+ size = asrc->get_output_fifo_size(pair);
+ if (size + *length > ASRC_M2M_BUFFER_SIZE)
+ goto end;
+
+ for (i = 0; i < size * pair->channels; i++) {
+ regmap_read(asrc->regmap, asrc->get_fifo_addr(OUT, index), &reg);
+ if (reg32) {
+ *reg32++ = reg;
+ } else if (reg16) {
+ *reg16++ = (u16)reg;
+ } else {
+ *reg24++ = (u8)reg;
+ *reg24++ = (u8)(reg >> 8);
+ *reg24++ = (u8)(reg >> 16);
+ }
+ }
+ t_size += size;
+
+ /* In case there is data left in FIFO */
+ if (size)
+ goto retry;
+end:
+ /* Update payload length */
+ if (reg32)
+ *length += t_size * pair->channels * 4;
+ else if (reg16)
+ *length += t_size * pair->channels * 2;
+ else
+ *length += t_size * pair->channels * 3;
+}
+
+/* config dma channel */
+static int asrc_dmaconfig(struct fsl_asrc_pair *pair,
+ struct dma_chan *chan,
+ u32 dma_addr, dma_addr_t buf_addr, u32 buf_len,
+ int dir, int width)
+{
+ struct fsl_asrc *asrc = pair->asrc;
+ struct device *dev = &asrc->pdev->dev;
+ struct dma_slave_config slave_config;
+ enum dma_slave_buswidth buswidth;
+ unsigned int sg_len, max_period_size;
+ struct scatterlist *sg;
+ int ret, i;
+
+ switch (width) {
+ case 8:
+ buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ break;
+ case 16:
+ buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ break;
+ case 24:
+ buswidth = DMA_SLAVE_BUSWIDTH_3_BYTES;
+ break;
+ case 32:
+ buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ break;
+ default:
+ dev_err(dev, "invalid word width\n");
+ return -EINVAL;
+ }
+
+ memset(&slave_config, 0, sizeof(slave_config));
+ if (dir == IN) {
+ slave_config.direction = DMA_MEM_TO_DEV;
+ slave_config.dst_addr = dma_addr;
+ slave_config.dst_addr_width = buswidth;
+ slave_config.dst_maxburst = asrc->m2m_get_maxburst(IN, pair);
+ } else {
+ slave_config.direction = DMA_DEV_TO_MEM;
+ slave_config.src_addr = dma_addr;
+ slave_config.src_addr_width = buswidth;
+ slave_config.src_maxburst = asrc->m2m_get_maxburst(OUT, pair);
+ }
+
+ ret = dmaengine_slave_config(chan, &slave_config);
+ if (ret) {
+ dev_err(dev, "failed to config dmaengine for %s task: %d\n",
+ DIR_STR(dir), ret);
+ return -EINVAL;
+ }
+
+ max_period_size = rounddown(ASRC_M2M_PERIOD_SIZE, width * pair->channels / 8);
+ /* scatter gather mode */
+ sg_len = buf_len / max_period_size;
+ if (buf_len % max_period_size)
+ sg_len += 1;
+
+ sg = kmalloc_array(sg_len, sizeof(*sg), GFP_KERNEL);
+ if (!sg)
+ return -ENOMEM;
+
+ sg_init_table(sg, sg_len);
+ for (i = 0; i < (sg_len - 1); i++) {
+ sg_dma_address(&sg[i]) = buf_addr + i * max_period_size;
+ sg_dma_len(&sg[i]) = max_period_size;
+ }
+ sg_dma_address(&sg[i]) = buf_addr + i * max_period_size;
+ sg_dma_len(&sg[i]) = buf_len - i * max_period_size;
+
+ pair->desc[dir] = dmaengine_prep_slave_sg(chan, sg, sg_len,
+ slave_config.direction,
+ DMA_PREP_INTERRUPT);
+ kfree(sg);
+ if (!pair->desc[dir]) {
+ dev_err(dev, "failed to prepare dmaengine for %s task\n", DIR_STR(dir));
+ return -EINVAL;
+ }
+
+ pair->desc[dir]->callback = ASRC_xPUT_DMA_CALLBACK(dir);
+ pair->desc[dir]->callback_param = pair;
+
+ return 0;
+}
+
+/* main function of converter */
+static void asrc_m2m_device_run(struct fsl_asrc_pair *pair, struct snd_compr_task_runtime *task)
+{
+ struct fsl_asrc *asrc = pair->asrc;
+ struct device *dev = &asrc->pdev->dev;
+ enum asrc_pair_index index = pair->index;
+ struct snd_dma_buffer *src_buf, *dst_buf;
+ unsigned int in_buf_len;
+ unsigned int out_dma_len;
+ unsigned int width;
+ u32 fifo_addr;
+ int ret;
+
+ /* set ratio mod */
+ if (asrc->m2m_set_ratio_mod) {
+ if (pair->ratio_mod_flag) {
+ asrc->m2m_set_ratio_mod(pair, pair->ratio_mod);
+ pair->ratio_mod_flag = false;
+ }
+ }
+
+ src_buf = &pair->dma_buffer[IN];
+ dst_buf = &pair->dma_buffer[OUT];
+
+ width = snd_pcm_format_physical_width(pair->sample_format[IN]);
+ fifo_addr = asrc->paddr + asrc->get_fifo_addr(IN, index);
+
+ in_buf_len = task->input_size;
+
+ if (in_buf_len < width * pair->channels / 8 ||
+ in_buf_len > ASRC_M2M_BUFFER_SIZE ||
+ in_buf_len % (width * pair->channels / 8)) {
+ dev_err(dev, "out buffer size is error: [%d]\n", in_buf_len);
+ goto end;
+ }
+
+ /* dma config for output dma channel */
+ ret = asrc_dmaconfig(pair,
+ pair->dma_chan[IN],
+ fifo_addr,
+ src_buf->addr,
+ in_buf_len, IN, width);
+ if (ret) {
+ dev_err(dev, "out dma config error\n");
+ goto end;
+ }
+
+ width = snd_pcm_format_physical_width(pair->sample_format[OUT]);
+ fifo_addr = asrc->paddr + asrc->get_fifo_addr(OUT, index);
+ out_dma_len = asrc->m2m_calc_out_len(pair, in_buf_len);
+ if (out_dma_len > 0 && out_dma_len <= ASRC_M2M_BUFFER_SIZE) {
+ /* dma config for capture dma channel */
+ ret = asrc_dmaconfig(pair,
+ pair->dma_chan[OUT],
+ fifo_addr,
+ dst_buf->addr,
+ out_dma_len, OUT, width);
+ if (ret) {
+ dev_err(dev, "cap dma config error\n");
+ goto end;
+ }
+ } else if (out_dma_len > ASRC_M2M_BUFFER_SIZE) {
+ dev_err(dev, "cap buffer size error\n");
+ goto end;
+ }
+
+ reinit_completion(&pair->complete[IN]);
+ reinit_completion(&pair->complete[OUT]);
+
+ /* Submit DMA request */
+ dmaengine_submit(pair->desc[IN]);
+ dma_async_issue_pending(pair->desc[IN]->chan);
+ if (out_dma_len > 0) {
+ dmaengine_submit(pair->desc[OUT]);
+ dma_async_issue_pending(pair->desc[OUT]->chan);
+ }
+
+ asrc->m2m_start(pair);
+
+ if (!wait_for_completion_interruptible_timeout(&pair->complete[IN], 10 * HZ)) {
+ dev_err(dev, "out DMA task timeout\n");
+ goto end;
+ }
+
+ if (out_dma_len > 0) {
+ if (!wait_for_completion_interruptible_timeout(&pair->complete[OUT], 10 * HZ)) {
+ dev_err(dev, "cap DMA task timeout\n");
+ goto end;
+ }
+ }
+
+ /* read the last words from FIFO */
+ asrc_read_last_fifo(pair, dst_buf->area, &out_dma_len);
+ /* update payload length for capture */
+ task->output_size = out_dma_len;
+end:
+ return;
+}
+
+static int fsl_asrc_m2m_comp_open(struct snd_compr_stream *stream)
+{
+ struct fsl_asrc *asrc = stream->private_data;
+ struct snd_compr_runtime *runtime = stream->runtime;
+ struct device *dev = &asrc->pdev->dev;
+ struct fsl_asrc_pair *pair;
+ int size, ret;
+
+ pair = kzalloc(sizeof(*pair) + asrc->pair_priv_size, GFP_KERNEL);
+ if (!pair)
+ return -ENOMEM;
+
+ pair->private = (void *)pair + sizeof(struct fsl_asrc_pair);
+ pair->asrc = asrc;
+
+ init_completion(&pair->complete[IN]);
+ init_completion(&pair->complete[OUT]);
+
+ runtime->private_data = pair;
+
+ size = ASRC_M2M_BUFFER_SIZE;
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, dev, size, &pair->dma_buffer[IN]);
+ if (ret)
+ goto error_alloc_in_buf;
+
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, dev, size, &pair->dma_buffer[OUT]);
+ if (ret)
+ goto error_alloc_out_buf;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to power up asrc\n");
+ goto err_pm_runtime;
+ }
+
+ return 0;
+
+err_pm_runtime:
+ snd_dma_free_pages(&pair->dma_buffer[OUT]);
+error_alloc_out_buf:
+ snd_dma_free_pages(&pair->dma_buffer[IN]);
+error_alloc_in_buf:
+ kfree(pair);
+ return ret;
+}
+
+static int fsl_asrc_m2m_comp_release(struct snd_compr_stream *stream)
+{
+ struct fsl_asrc *asrc = stream->private_data;
+ struct snd_compr_runtime *runtime = stream->runtime;
+ struct fsl_asrc_pair *pair = runtime->private_data;
+ struct device *dev = &asrc->pdev->dev;
+
+ pm_runtime_put_sync(dev);
+
+ snd_dma_free_pages(&pair->dma_buffer[IN]);
+ snd_dma_free_pages(&pair->dma_buffer[OUT]);
+
+ kfree(runtime->private_data);
+
+ return 0;
+}
+
+static int fsl_asrc_m2m_comp_set_params(struct snd_compr_stream *stream,
+ struct snd_compr_params *params)
+{
+ struct fsl_asrc *asrc = stream->private_data;
+ struct snd_compr_runtime *runtime = stream->runtime;
+ struct fsl_asrc_pair *pair = runtime->private_data;
+ struct fsl_asrc_m2m_cap cap;
+ int ret, i;
+
+ ret = asrc->m2m_get_cap(&cap);
+ if (ret)
+ return -EINVAL;
+
+ if (pcm_format_to_bits((__force snd_pcm_format_t)params->codec.format) & cap.fmt_in)
+ pair->sample_format[IN] = (__force snd_pcm_format_t)params->codec.format;
+ else
+ return -EINVAL;
+
+ if (pcm_format_to_bits((__force snd_pcm_format_t)params->codec.pcm_format) & cap.fmt_out)
+ pair->sample_format[OUT] = (__force snd_pcm_format_t)params->codec.pcm_format;
+ else
+ return -EINVAL;
+
+ /* check input rate is in scope */
+ for (i = 0; i < cap.rate_in_count; i++)
+ if (params->codec.sample_rate == cap.rate_in[i]) {
+ pair->rate[IN] = params->codec.sample_rate;
+ break;
+ }
+ if (i == cap.rate_in_count)
+ return -EINVAL;
+
+ /* check output rate is in scope */
+ for (i = 0; i < cap.rate_out_count; i++)
+ if (params->codec.options.src_d.out_sample_rate == cap.rate_out[i]) {
+ pair->rate[OUT] = params->codec.options.src_d.out_sample_rate;
+ break;
+ }
+ if (i == cap.rate_out_count)
+ return -EINVAL;
+
+ if (params->codec.ch_in != params->codec.ch_out ||
+ params->codec.ch_in < cap.chan_min ||
+ params->codec.ch_in > cap.chan_max)
+ return -EINVAL;
+
+ pair->channels = params->codec.ch_in;
+ pair->buf_len[IN] = params->buffer.fragment_size;
+ pair->buf_len[OUT] = params->buffer.fragment_size;
+
+ return 0;
+}
+
+static int fsl_asrc_m2m_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+ struct snd_dma_buffer *dmab = dmabuf->priv;
+
+ return snd_dma_buffer_mmap(dmab, vma);
+}
+
+static struct sg_table *fsl_asrc_m2m_map_dma_buf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction)
+{
+ struct snd_dma_buffer *dmab = attachment->dmabuf->priv;
+ struct sg_table *sgt;
+
+ sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ return NULL;
+
+ if (dma_get_sgtable(attachment->dev, sgt, dmab->area, dmab->addr, dmab->bytes) < 0)
+ goto free;
+
+ if (dma_map_sgtable(attachment->dev, sgt, direction, 0))
+ goto free;
+
+ return sgt;
+
+free:
+ sg_free_table(sgt);
+ kfree(sgt);
+ return NULL;
+}
+
+static void fsl_asrc_m2m_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *table,
+ enum dma_data_direction direction)
+{
+ dma_unmap_sgtable(attachment->dev, table, direction, 0);
+}
+
+static void fsl_asrc_m2m_release(struct dma_buf *dmabuf)
+{
+ /* buffer is released by fsl_asrc_m2m_comp_release() */
+}
+
+static const struct dma_buf_ops fsl_asrc_m2m_dma_buf_ops = {
+ .mmap = fsl_asrc_m2m_mmap,
+ .map_dma_buf = fsl_asrc_m2m_map_dma_buf,
+ .unmap_dma_buf = fsl_asrc_m2m_unmap_dma_buf,
+ .release = fsl_asrc_m2m_release,
+};
+
+static int fsl_asrc_m2m_comp_task_create(struct snd_compr_stream *stream,
+ struct snd_compr_task_runtime *task)
+{
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info_in);
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info_out);
+ struct fsl_asrc *asrc = stream->private_data;
+ struct snd_compr_runtime *runtime = stream->runtime;
+ struct fsl_asrc_pair *pair = runtime->private_data;
+ struct device *dev = &asrc->pdev->dev;
+ int ret;
+
+ exp_info_in.ops = &fsl_asrc_m2m_dma_buf_ops;
+ exp_info_in.size = ASRC_M2M_BUFFER_SIZE;
+ exp_info_in.flags = O_RDWR;
+ exp_info_in.priv = &pair->dma_buffer[IN];
+ task->input = dma_buf_export(&exp_info_in);
+ if (IS_ERR(task->input)) {
+ ret = PTR_ERR(task->input);
+ return ret;
+ }
+
+ exp_info_out.ops = &fsl_asrc_m2m_dma_buf_ops;
+ exp_info_out.size = ASRC_M2M_BUFFER_SIZE;
+ exp_info_out.flags = O_RDWR;
+ exp_info_out.priv = &pair->dma_buffer[OUT];
+ task->output = dma_buf_export(&exp_info_out);
+ if (IS_ERR(task->output)) {
+ ret = PTR_ERR(task->output);
+ return ret;
+ }
+
+ /* Request asrc pair/context */
+ ret = asrc->request_pair(pair->channels, pair);
+ if (ret) {
+ dev_err(dev, "failed to request pair: %d\n", ret);
+ goto err_request_pair;
+ }
+
+ ret = asrc->m2m_prepare(pair);
+ if (ret) {
+ dev_err(dev, "failed to start pair part one: %d\n", ret);
+ goto err_start_part_one;
+ }
+
+ /* Request dma channels */
+ pair->dma_chan[IN] = asrc->get_dma_channel(pair, IN);
+ if (!pair->dma_chan[IN]) {
+ dev_err(dev, "[ctx%d] failed to get input DMA channel\n", pair->index);
+ ret = -EBUSY;
+ goto err_dma_channel_in;
+ }
+
+ pair->dma_chan[OUT] = asrc->get_dma_channel(pair, OUT);
+ if (!pair->dma_chan[OUT]) {
+ dev_err(dev, "[ctx%d] failed to get output DMA channel\n", pair->index);
+ ret = -EBUSY;
+ goto err_dma_channel_out;
+ }
+
+ return 0;
+
+err_dma_channel_out:
+ dma_release_channel(pair->dma_chan[IN]);
+err_dma_channel_in:
+ if (asrc->m2m_unprepare)
+ asrc->m2m_unprepare(pair);
+err_start_part_one:
+ asrc->release_pair(pair);
+err_request_pair:
+ return ret;
+}
+
+static int fsl_asrc_m2m_comp_task_start(struct snd_compr_stream *stream,
+ struct snd_compr_task_runtime *task)
+{
+ struct snd_compr_runtime *runtime = stream->runtime;
+ struct fsl_asrc_pair *pair = runtime->private_data;
+
+ asrc_m2m_device_run(pair, task);
+
+ return 0;
+}
+
+static int fsl_asrc_m2m_comp_task_stop(struct snd_compr_stream *stream,
+ struct snd_compr_task_runtime *task)
+{
+ return 0;
+}
+
+static int fsl_asrc_m2m_comp_task_free(struct snd_compr_stream *stream,
+ struct snd_compr_task_runtime *task)
+{
+ struct fsl_asrc *asrc = stream->private_data;
+ struct snd_compr_runtime *runtime = stream->runtime;
+ struct fsl_asrc_pair *pair = runtime->private_data;
+
+ /* Stop & release pair/context */
+ if (asrc->m2m_stop)
+ asrc->m2m_stop(pair);
+
+ if (asrc->m2m_unprepare)
+ asrc->m2m_unprepare(pair);
+ asrc->release_pair(pair);
+
+ /* Release dma channel */
+ if (pair->dma_chan[IN])
+ dma_release_channel(pair->dma_chan[IN]);
+ if (pair->dma_chan[OUT])
+ dma_release_channel(pair->dma_chan[OUT]);
+
+ return 0;
+}
+
+static int fsl_asrc_m2m_get_caps(struct snd_compr_stream *cstream,
+ struct snd_compr_caps *caps)
+{
+ caps->num_codecs = 1;
+ caps->min_fragment_size = 4096;
+ caps->max_fragment_size = 4096;
+ caps->min_fragments = 1;
+ caps->max_fragments = 1;
+ caps->codecs[0] = SND_AUDIOCODEC_PCM;
+
+ return 0;
+}
+
+static int fsl_asrc_m2m_fill_codec_caps(struct fsl_asrc *asrc,
+ struct snd_compr_codec_caps *codec)
+{
+ struct fsl_asrc_m2m_cap cap;
+ snd_pcm_format_t k;
+ int j = 0;
+ int ret;
+
+ ret = asrc->m2m_get_cap(&cap);
+ if (ret)
+ return -EINVAL;
+
+ pcm_for_each_format(k) {
+ if (pcm_format_to_bits(k) & cap.fmt_in) {
+ codec->descriptor[j].max_ch = cap.chan_max;
+ memcpy(codec->descriptor[j].sample_rates,
+ cap.rate_in,
+ cap.rate_in_count * sizeof(__u32));
+ codec->descriptor[j].num_sample_rates = cap.rate_in_count;
+ codec->descriptor[j].formats = (__force __u32)k;
+ codec->descriptor[j].pcm_formats = cap.fmt_out;
+ codec->descriptor[j].src.out_sample_rate_min = cap.rate_out[0];
+ codec->descriptor[j].src.out_sample_rate_max =
+ cap.rate_out[cap.rate_out_count - 1];
+ j++;
+ }
+ }
+
+ codec->codec = SND_AUDIOCODEC_PCM;
+ codec->num_descriptors = j;
+ return 0;
+}
+
+static int fsl_asrc_m2m_get_codec_caps(struct snd_compr_stream *stream,
+ struct snd_compr_codec_caps *codec)
+{
+ struct fsl_asrc *asrc = stream->private_data;
+
+ return fsl_asrc_m2m_fill_codec_caps(asrc, codec);
+}
+
+static struct snd_compr_ops fsl_asrc_m2m_compr_ops = {
+ .open = fsl_asrc_m2m_comp_open,
+ .free = fsl_asrc_m2m_comp_release,
+ .set_params = fsl_asrc_m2m_comp_set_params,
+ .get_caps = fsl_asrc_m2m_get_caps,
+ .get_codec_caps = fsl_asrc_m2m_get_codec_caps,
+ .task_create = fsl_asrc_m2m_comp_task_create,
+ .task_start = fsl_asrc_m2m_comp_task_start,
+ .task_stop = fsl_asrc_m2m_comp_task_stop,
+ .task_free = fsl_asrc_m2m_comp_task_free,
+};
+
+int fsl_asrc_m2m_suspend(struct fsl_asrc *asrc)
+{
+ struct fsl_asrc_pair *pair;
+ int i;
+
+ for (i = 0; i < PAIR_CTX_NUM; i++) {
+ pair = asrc->pair[i];
+ if (!pair)
+ continue;
+ if (!completion_done(&pair->complete[IN])) {
+ if (pair->dma_chan[IN])
+ dmaengine_terminate_all(pair->dma_chan[IN]);
+ asrc_input_dma_callback((void *)pair);
+ }
+ if (!completion_done(&pair->complete[OUT])) {
+ if (pair->dma_chan[OUT])
+ dmaengine_terminate_all(pair->dma_chan[OUT]);
+ asrc_output_dma_callback((void *)pair);
+ }
+
+ if (asrc->m2m_pair_suspend)
+ asrc->m2m_pair_suspend(pair);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fsl_asrc_m2m_suspend);
+
+int fsl_asrc_m2m_resume(struct fsl_asrc *asrc)
+{
+ struct fsl_asrc_pair *pair;
+ int i;
+
+ for (i = 0; i < PAIR_CTX_NUM; i++) {
+ pair = asrc->pair[i];
+ if (!pair)
+ continue;
+ if (asrc->m2m_pair_resume)
+ asrc->m2m_pair_resume(pair);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fsl_asrc_m2m_resume);
+
+int fsl_asrc_m2m_init(struct fsl_asrc *asrc)
+{
+ struct device *dev = &asrc->pdev->dev;
+ struct snd_card *card;
+ struct snd_compr *compr;
+ int ret;
+
+ ret = snd_card_new(dev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
+ THIS_MODULE, 0, &card);
+ if (ret < 0)
+ return ret;
+
+ strscpy(card->driver, "fsl-asrc-m2m", sizeof(card->driver));
+ strscpy(card->shortname, "ASRC-M2M", sizeof(card->shortname));
+ strscpy(card->longname, "ASRC-M2M", sizeof(card->shortname));
+
+ asrc->card = card;
+
+ compr = devm_kzalloc(dev, sizeof(*compr), GFP_KERNEL);
+ if (!compr) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ compr->ops = &fsl_asrc_m2m_compr_ops;
+ compr->private_data = asrc;
+
+ ret = snd_compress_new(card, 0, SND_COMPRESS_ACCEL, "ASRC M2M", compr);
+ if (ret < 0)
+ goto err;
+
+ ret = snd_card_register(card);
+ if (ret < 0)
+ goto err;
+
+ return 0;
+err:
+ snd_card_free(card);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(fsl_asrc_m2m_init);
+
+void fsl_asrc_m2m_exit(struct fsl_asrc *asrc)
+{
+ struct snd_card *card = asrc->card;
+
+ snd_card_free(card);
+}
+EXPORT_SYMBOL_GPL(fsl_asrc_m2m_exit);
+
+MODULE_IMPORT_NS("DMA_BUF");
+MODULE_AUTHOR("Shengjiu Wang <Shengjiu.Wang@nxp.com>");
+MODULE_DESCRIPTION("Freescale ASRC M2M driver");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/fsl/fsl_easrc.c b/sound/soc/fsl/fsl_easrc.c
index d22f0621c465..f404a39009e1 100644
--- a/sound/soc/fsl/fsl_easrc.c
+++ b/sound/soc/fsl/fsl_easrc.c
@@ -1861,6 +1861,224 @@ static int fsl_easrc_get_fifo_addr(u8 dir, enum asrc_pair_index index)
return REG_EASRC_FIFO(dir, index);
}
+/* Get sample numbers in FIFO */
+static unsigned int fsl_easrc_get_output_fifo_size(struct fsl_asrc_pair *pair)
+{
+ struct fsl_asrc *asrc = pair->asrc;
+ enum asrc_pair_index index = pair->index;
+ u32 val;
+
+ regmap_read(asrc->regmap, REG_EASRC_SFS(index), &val);
+ val &= EASRC_SFS_NSGO_MASK;
+
+ return val >> EASRC_SFS_NSGO_SHIFT;
+}
+
+static int fsl_easrc_m2m_prepare(struct fsl_asrc_pair *pair)
+{
+ struct fsl_easrc_ctx_priv *ctx_priv = pair->private;
+ struct fsl_asrc *asrc = pair->asrc;
+ struct device *dev = &asrc->pdev->dev;
+ int ret;
+
+ ctx_priv->in_params.sample_rate = pair->rate[IN];
+ ctx_priv->in_params.sample_format = pair->sample_format[IN];
+ ctx_priv->out_params.sample_rate = pair->rate[OUT];
+ ctx_priv->out_params.sample_format = pair->sample_format[OUT];
+
+ ctx_priv->in_params.fifo_wtmk = FSL_EASRC_INPUTFIFO_WML;
+ ctx_priv->out_params.fifo_wtmk = FSL_EASRC_OUTPUTFIFO_WML;
+ /* Fill the right half of the re-sampler with zeros */
+ ctx_priv->rs_init_mode = 0x2;
+ /* Zero fill the right half of the prefilter */
+ ctx_priv->pf_init_mode = 0x2;
+
+ ret = fsl_easrc_set_ctx_format(pair,
+ &ctx_priv->in_params.sample_format,
+ &ctx_priv->out_params.sample_format);
+ if (ret) {
+ dev_err(dev, "failed to set context format: %d\n", ret);
+ return ret;
+ }
+
+ ret = fsl_easrc_config_context(asrc, pair->index);
+ if (ret) {
+ dev_err(dev, "failed to config context %d\n", ret);
+ return ret;
+ }
+
+ ctx_priv->in_params.iterations = 1;
+ ctx_priv->in_params.group_len = pair->channels;
+ ctx_priv->in_params.access_len = pair->channels;
+ ctx_priv->out_params.iterations = 1;
+ ctx_priv->out_params.group_len = pair->channels;
+ ctx_priv->out_params.access_len = pair->channels;
+
+ ret = fsl_easrc_set_ctx_organziation(pair);
+ if (ret) {
+ dev_err(dev, "failed to set fifo organization\n");
+ return ret;
+ }
+
+ /* The context start flag */
+ pair->first_convert = 1;
+ return 0;
+}
+
+static int fsl_easrc_m2m_start(struct fsl_asrc_pair *pair)
+{
+ /* start context once */
+ if (pair->first_convert) {
+ fsl_easrc_start_context(pair);
+ pair->first_convert = 0;
+ }
+
+ return 0;
+}
+
+static int fsl_easrc_m2m_stop(struct fsl_asrc_pair *pair)
+{
+ /* Stop pair/context */
+ if (!pair->first_convert) {
+ fsl_easrc_stop_context(pair);
+ pair->first_convert = 1;
+ }
+
+ return 0;
+}
+
+/* calculate capture data length according to output data length and sample rate */
+static int fsl_easrc_m2m_calc_out_len(struct fsl_asrc_pair *pair, int input_buffer_length)
+{
+ struct fsl_asrc *easrc = pair->asrc;
+ struct fsl_easrc_priv *easrc_priv = easrc->private;
+ struct fsl_easrc_ctx_priv *ctx_priv = pair->private;
+ unsigned int in_rate = ctx_priv->in_params.norm_rate;
+ unsigned int out_rate = ctx_priv->out_params.norm_rate;
+ unsigned int channels = pair->channels;
+ unsigned int in_samples, out_samples;
+ unsigned int in_width, out_width;
+ unsigned int out_length;
+ unsigned int frac_bits;
+ u64 val1, val2;
+
+ switch (easrc_priv->rs_num_taps) {
+ case EASRC_RS_32_TAPS:
+ /* integer bits = 5; */
+ frac_bits = 39;
+ break;
+ case EASRC_RS_64_TAPS:
+ /* integer bits = 6; */
+ frac_bits = 38;
+ break;
+ case EASRC_RS_128_TAPS:
+ /* integer bits = 7; */
+ frac_bits = 37;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ val1 = (u64)in_rate << frac_bits;
+ do_div(val1, out_rate);
+ val1 += (s64)ctx_priv->ratio_mod << (frac_bits - 31);
+
+ in_width = snd_pcm_format_physical_width(ctx_priv->in_params.sample_format) / 8;
+ out_width = snd_pcm_format_physical_width(ctx_priv->out_params.sample_format) / 8;
+
+ ctx_priv->in_filled_len += input_buffer_length;
+ if (ctx_priv->in_filled_len <= ctx_priv->in_filled_sample * in_width * channels) {
+ out_length = 0;
+ } else {
+ in_samples = ctx_priv->in_filled_len / (in_width * channels) -
+ ctx_priv->in_filled_sample;
+
+ /* right shift 12 bit to make ratio in 32bit space */
+ val2 = (u64)in_samples << (frac_bits - 12);
+ val1 = val1 >> 12;
+ do_div(val2, val1);
+ out_samples = val2;
+
+ out_length = out_samples * out_width * channels;
+ ctx_priv->in_filled_len = ctx_priv->in_filled_sample * in_width * channels;
+ }
+
+ return out_length;
+}
+
+static int fsl_easrc_m2m_get_maxburst(u8 dir, struct fsl_asrc_pair *pair)
+{
+ struct fsl_easrc_ctx_priv *ctx_priv = pair->private;
+
+ if (dir == IN)
+ return ctx_priv->in_params.fifo_wtmk * pair->channels;
+ else
+ return ctx_priv->out_params.fifo_wtmk * pair->channels;
+}
+
+static int fsl_easrc_m2m_pair_suspend(struct fsl_asrc_pair *pair)
+{
+ fsl_easrc_stop_context(pair);
+
+ return 0;
+}
+
+static int fsl_easrc_m2m_pair_resume(struct fsl_asrc_pair *pair)
+{
+ struct fsl_easrc_ctx_priv *ctx_priv = pair->private;
+
+ pair->first_convert = 1;
+ ctx_priv->in_filled_len = 0;
+
+ return 0;
+}
+
+/* val is Q31 */
+static int fsl_easrc_m2m_set_ratio_mod(struct fsl_asrc_pair *pair, int val)
+{
+ struct fsl_easrc_ctx_priv *ctx_priv = pair->private;
+ struct fsl_asrc *easrc = pair->asrc;
+ struct fsl_easrc_priv *easrc_priv = easrc->private;
+ unsigned int frac_bits;
+
+ ctx_priv->ratio_mod += val;
+
+ switch (easrc_priv->rs_num_taps) {
+ case EASRC_RS_32_TAPS:
+ /* integer bits = 5; */
+ frac_bits = 39;
+ break;
+ case EASRC_RS_64_TAPS:
+ /* integer bits = 6; */
+ frac_bits = 38;
+ break;
+ case EASRC_RS_128_TAPS:
+ /* integer bits = 7; */
+ frac_bits = 37;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ val <<= (frac_bits - 31);
+ regmap_write(easrc->regmap, REG_EASRC_RUC(pair->index), EASRC_RSUC_RS_RM(val));
+
+ return 0;
+}
+
+static int fsl_easrc_m2m_get_cap(struct fsl_asrc_m2m_cap *cap)
+{
+ cap->fmt_in = FSL_EASRC_FORMATS;
+ cap->fmt_out = FSL_EASRC_FORMATS | SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE;
+ cap->rate_in = easrc_rates;
+ cap->rate_in_count = ARRAY_SIZE(easrc_rates);
+ cap->rate_out = easrc_rates;
+ cap->rate_out_count = ARRAY_SIZE(easrc_rates);
+ cap->chan_min = 1;
+ cap->chan_max = 32;
+ return 0;
+}
+
static const struct of_device_id fsl_easrc_dt_ids[] = {
{ .compatible = "fsl,imx8mn-easrc",},
{}
@@ -1926,6 +2144,16 @@ static int fsl_easrc_probe(struct platform_device *pdev)
easrc->release_pair = fsl_easrc_release_context;
easrc->get_fifo_addr = fsl_easrc_get_fifo_addr;
easrc->pair_priv_size = sizeof(struct fsl_easrc_ctx_priv);
+ easrc->m2m_prepare = fsl_easrc_m2m_prepare;
+ easrc->m2m_start = fsl_easrc_m2m_start;
+ easrc->m2m_stop = fsl_easrc_m2m_stop;
+ easrc->get_output_fifo_size = fsl_easrc_get_output_fifo_size;
+ easrc->m2m_calc_out_len = fsl_easrc_m2m_calc_out_len;
+ easrc->m2m_get_maxburst = fsl_easrc_m2m_get_maxburst;
+ easrc->m2m_pair_suspend = fsl_easrc_m2m_pair_suspend;
+ easrc->m2m_pair_resume = fsl_easrc_m2m_pair_resume;
+ easrc->m2m_set_ratio_mod = fsl_easrc_m2m_set_ratio_mod;
+ easrc->m2m_get_cap = fsl_easrc_m2m_get_cap;
easrc_priv->rs_num_taps = EASRC_RS_32_TAPS;
easrc_priv->const_coeff = 0x3FF0000000000000;
@@ -1976,6 +2204,12 @@ static int fsl_easrc_probe(struct platform_device *pdev)
goto err_pm_disable;
}
+ ret = fsl_asrc_m2m_init(easrc);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to init m2m device %d\n", ret);
+ return ret;
+ }
+
return 0;
err_pm_disable:
@@ -1985,6 +2219,10 @@ err_pm_disable:
static void fsl_easrc_remove(struct platform_device *pdev)
{
+ struct fsl_asrc *easrc = dev_get_drvdata(&pdev->dev);
+
+ fsl_asrc_m2m_exit(easrc);
+
pm_runtime_disable(&pdev->dev);
}
@@ -2085,10 +2323,29 @@ disable_mem_clk:
return ret;
}
+static int fsl_easrc_suspend(struct device *dev)
+{
+ struct fsl_asrc *easrc = dev_get_drvdata(dev);
+ int ret;
+
+ fsl_asrc_m2m_suspend(easrc);
+ ret = pm_runtime_force_suspend(dev);
+ return ret;
+}
+
+static int fsl_easrc_resume(struct device *dev)
+{
+ struct fsl_asrc *easrc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_runtime_force_resume(dev);
+ fsl_asrc_m2m_resume(easrc);
+ return ret;
+}
+
static const struct dev_pm_ops fsl_easrc_pm_ops = {
RUNTIME_PM_OPS(fsl_easrc_runtime_suspend, fsl_easrc_runtime_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
+ SYSTEM_SLEEP_PM_OPS(fsl_easrc_suspend, fsl_easrc_resume)
};
static struct platform_driver fsl_easrc_driver = {
diff --git a/sound/soc/fsl/fsl_easrc.h b/sound/soc/fsl/fsl_easrc.h
index 7c70dac52713..c9f770862662 100644
--- a/sound/soc/fsl/fsl_easrc.h
+++ b/sound/soc/fsl/fsl_easrc.h
@@ -601,6 +601,8 @@ struct fsl_easrc_slot {
* @out_missed_sample: sample missed in output
* @st1_addexp: exponent added for stage1
* @st2_addexp: exponent added for stage2
+ * @ratio_mod: update ratio
+ * @in_filled_len: input filled length
*/
struct fsl_easrc_ctx_priv {
struct fsl_easrc_io_params in_params;
@@ -618,6 +620,8 @@ struct fsl_easrc_ctx_priv {
int out_missed_sample;
int st1_addexp;
int st2_addexp;
+ int ratio_mod;
+ unsigned int in_filled_len;
};
/**
diff --git a/sound/soc/fsl/fsl_micfil.c b/sound/soc/fsl/fsl_micfil.c
index 8c15389c9a04..1075598a6647 100644
--- a/sound/soc/fsl/fsl_micfil.c
+++ b/sound/soc/fsl/fsl_micfil.c
@@ -35,6 +35,15 @@
#define MICFIL_AUDIO_PLL2 1
#define MICFIL_CLK_EXT3 2
+static const unsigned int fsl_micfil_rates[] = {
+ 8000, 11025, 16000, 22050, 32000, 44100, 48000,
+};
+
+static const struct snd_pcm_hw_constraint_list fsl_micfil_rate_constraints = {
+ .count = ARRAY_SIZE(fsl_micfil_rates),
+ .list = fsl_micfil_rates,
+};
+
enum quality {
QUALITY_HIGH,
QUALITY_MEDIUM,
@@ -80,6 +89,7 @@ struct fsl_micfil_soc_data {
bool use_verid;
bool volume_sx;
u64 formats;
+ int fifo_offset;
};
static struct fsl_micfil_soc_data fsl_micfil_imx8mm = {
@@ -89,6 +99,7 @@ static struct fsl_micfil_soc_data fsl_micfil_imx8mm = {
.dataline = 0xf,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.volume_sx = true,
+ .fifo_offset = 0,
};
static struct fsl_micfil_soc_data fsl_micfil_imx8mp = {
@@ -98,6 +109,7 @@ static struct fsl_micfil_soc_data fsl_micfil_imx8mp = {
.dataline = 0xf,
.formats = SNDRV_PCM_FMTBIT_S32_LE,
.volume_sx = false,
+ .fifo_offset = 0,
};
static struct fsl_micfil_soc_data fsl_micfil_imx93 = {
@@ -109,12 +121,26 @@ static struct fsl_micfil_soc_data fsl_micfil_imx93 = {
.use_edma = true,
.use_verid = true,
.volume_sx = false,
+ .fifo_offset = 0,
+};
+
+static struct fsl_micfil_soc_data fsl_micfil_imx943 = {
+ .imx = true,
+ .fifos = 8,
+ .fifo_depth = 32,
+ .dataline = 0xf,
+ .formats = SNDRV_PCM_FMTBIT_S32_LE,
+ .use_edma = true,
+ .use_verid = true,
+ .volume_sx = false,
+ .fifo_offset = -4,
};
static const struct of_device_id fsl_micfil_dt_ids[] = {
{ .compatible = "fsl,imx8mm-micfil", .data = &fsl_micfil_imx8mm },
{ .compatible = "fsl,imx8mp-micfil", .data = &fsl_micfil_imx8mp },
{ .compatible = "fsl,imx93-micfil", .data = &fsl_micfil_imx93 },
+ { .compatible = "fsl,imx943-micfil", .data = &fsl_micfil_imx943 },
{}
};
MODULE_DEVICE_TABLE(of, fsl_micfil_dt_ids);
@@ -486,29 +512,12 @@ static int fsl_micfil_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct fsl_micfil *micfil = snd_soc_dai_get_drvdata(dai);
- unsigned int rates[MICFIL_NUM_RATES] = {8000, 11025, 16000, 22050, 32000, 44100, 48000};
- int i, j, k = 0;
- u64 clk_rate;
if (!micfil) {
dev_err(dai->dev, "micfil dai priv_data not set\n");
return -EINVAL;
}
- micfil->constraint_rates.list = micfil->constraint_rates_list;
- micfil->constraint_rates.count = 0;
-
- for (j = 0; j < MICFIL_NUM_RATES; j++) {
- for (i = 0; i < MICFIL_CLK_SRC_NUM; i++) {
- clk_rate = clk_get_rate(micfil->clk_src[i]);
- if (clk_rate != 0 && do_div(clk_rate, rates[j]) == 0) {
- micfil->constraint_rates_list[k++] = rates[j];
- micfil->constraint_rates.count++;
- break;
- }
- }
- }
-
if (micfil->constraint_rates.count > 0)
snd_pcm_hw_constraint_list(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_RATE,
@@ -801,7 +810,7 @@ static int fsl_micfil_hw_params(struct snd_pcm_substream *substream,
ret = regmap_update_bits(micfil->regmap, REG_MICFIL_CTRL2,
MICFIL_CTRL2_CLKDIV | MICFIL_CTRL2_CICOSR,
FIELD_PREP(MICFIL_CTRL2_CLKDIV, clk_div) |
- FIELD_PREP(MICFIL_CTRL2_CICOSR, 16 - osr));
+ FIELD_PREP(MICFIL_CTRL2_CICOSR, 32 - osr));
/* Configure CIC OSR in VADCICOSR */
regmap_update_bits(micfil->regmap, REG_MICFIL_VAD0_CTRL1,
@@ -940,9 +949,39 @@ static const struct reg_default fsl_micfil_reg_defaults[] = {
{REG_MICFIL_VAD0_ZCD, 0x00000004},
};
+static const struct reg_default fsl_micfil_reg_defaults_v2[] = {
+ {REG_MICFIL_CTRL1, 0x00000000},
+ {REG_MICFIL_CTRL2, 0x00000000},
+ {REG_MICFIL_STAT, 0x00000000},
+ {REG_MICFIL_FIFO_CTRL, 0x0000001F},
+ {REG_MICFIL_FIFO_STAT, 0x00000000},
+ {REG_MICFIL_DATACH0 - 0x4, 0x00000000},
+ {REG_MICFIL_DATACH1 - 0x4, 0x00000000},
+ {REG_MICFIL_DATACH2 - 0x4, 0x00000000},
+ {REG_MICFIL_DATACH3 - 0x4, 0x00000000},
+ {REG_MICFIL_DATACH4 - 0x4, 0x00000000},
+ {REG_MICFIL_DATACH5 - 0x4, 0x00000000},
+ {REG_MICFIL_DATACH6 - 0x4, 0x00000000},
+ {REG_MICFIL_DATACH7 - 0x4, 0x00000000},
+ {REG_MICFIL_DC_CTRL, 0x00000000},
+ {REG_MICFIL_OUT_CTRL, 0x00000000},
+ {REG_MICFIL_OUT_STAT, 0x00000000},
+ {REG_MICFIL_VAD0_CTRL1, 0x00000000},
+ {REG_MICFIL_VAD0_CTRL2, 0x000A0000},
+ {REG_MICFIL_VAD0_STAT, 0x00000000},
+ {REG_MICFIL_VAD0_SCONFIG, 0x00000000},
+ {REG_MICFIL_VAD0_NCONFIG, 0x80000000},
+ {REG_MICFIL_VAD0_NDATA, 0x00000000},
+ {REG_MICFIL_VAD0_ZCD, 0x00000004},
+};
+
static bool fsl_micfil_readable_reg(struct device *dev, unsigned int reg)
{
struct fsl_micfil *micfil = dev_get_drvdata(dev);
+ int ofs = micfil->soc->fifo_offset;
+
+ if (reg >= (REG_MICFIL_DATACH0 + ofs) && reg <= (REG_MICFIL_DATACH7 + ofs))
+ return true;
switch (reg) {
case REG_MICFIL_CTRL1:
@@ -950,14 +989,6 @@ static bool fsl_micfil_readable_reg(struct device *dev, unsigned int reg)
case REG_MICFIL_STAT:
case REG_MICFIL_FIFO_CTRL:
case REG_MICFIL_FIFO_STAT:
- case REG_MICFIL_DATACH0:
- case REG_MICFIL_DATACH1:
- case REG_MICFIL_DATACH2:
- case REG_MICFIL_DATACH3:
- case REG_MICFIL_DATACH4:
- case REG_MICFIL_DATACH5:
- case REG_MICFIL_DATACH6:
- case REG_MICFIL_DATACH7:
case REG_MICFIL_DC_CTRL:
case REG_MICFIL_OUT_CTRL:
case REG_MICFIL_OUT_STAT:
@@ -1011,17 +1042,15 @@ static bool fsl_micfil_writeable_reg(struct device *dev, unsigned int reg)
static bool fsl_micfil_volatile_reg(struct device *dev, unsigned int reg)
{
+ struct fsl_micfil *micfil = dev_get_drvdata(dev);
+ int ofs = micfil->soc->fifo_offset;
+
+ if (reg >= (REG_MICFIL_DATACH0 + ofs) && reg <= (REG_MICFIL_DATACH7 + ofs))
+ return true;
+
switch (reg) {
case REG_MICFIL_STAT:
case REG_MICFIL_FIFO_STAT:
- case REG_MICFIL_DATACH0:
- case REG_MICFIL_DATACH1:
- case REG_MICFIL_DATACH2:
- case REG_MICFIL_DATACH3:
- case REG_MICFIL_DATACH4:
- case REG_MICFIL_DATACH5:
- case REG_MICFIL_DATACH6:
- case REG_MICFIL_DATACH7:
case REG_MICFIL_OUT_STAT:
case REG_MICFIL_VERID:
case REG_MICFIL_PARAM:
@@ -1047,6 +1076,20 @@ static const struct regmap_config fsl_micfil_regmap_config = {
.cache_type = REGCACHE_MAPLE,
};
+static const struct regmap_config fsl_micfil_regmap_config_v2 = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+
+ .max_register = REG_MICFIL_VAD0_ZCD,
+ .reg_defaults = fsl_micfil_reg_defaults_v2,
+ .num_reg_defaults = ARRAY_SIZE(fsl_micfil_reg_defaults_v2),
+ .readable_reg = fsl_micfil_readable_reg,
+ .volatile_reg = fsl_micfil_volatile_reg,
+ .writeable_reg = fsl_micfil_writeable_reg,
+ .cache_type = REGCACHE_MAPLE,
+};
+
/* END OF REGMAP */
static irqreturn_t micfil_isr(int irq, void *devid)
@@ -1239,14 +1282,26 @@ static int fsl_micfil_probe(struct platform_device *pdev)
if (IS_ERR(micfil->clk_src[MICFIL_CLK_EXT3]))
micfil->clk_src[MICFIL_CLK_EXT3] = NULL;
+ fsl_asoc_constrain_rates(&micfil->constraint_rates,
+ &fsl_micfil_rate_constraints,
+ micfil->clk_src[MICFIL_AUDIO_PLL1],
+ micfil->clk_src[MICFIL_AUDIO_PLL2],
+ micfil->clk_src[MICFIL_CLK_EXT3],
+ micfil->constraint_rates_list);
+
/* init regmap */
regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(regs))
return PTR_ERR(regs);
- micfil->regmap = devm_regmap_init_mmio(&pdev->dev,
- regs,
- &fsl_micfil_regmap_config);
+ if (of_device_is_compatible(np, "fsl,imx943-micfil"))
+ micfil->regmap = devm_regmap_init_mmio(&pdev->dev,
+ regs,
+ &fsl_micfil_regmap_config_v2);
+ else
+ micfil->regmap = devm_regmap_init_mmio(&pdev->dev,
+ regs,
+ &fsl_micfil_regmap_config);
if (IS_ERR(micfil->regmap)) {
dev_err(&pdev->dev, "failed to init MICFIL regmap: %ld\n",
PTR_ERR(micfil->regmap));
@@ -1315,7 +1370,7 @@ static int fsl_micfil_probe(struct platform_device *pdev)
}
micfil->dma_params_rx.chan_name = "rx";
- micfil->dma_params_rx.addr = res->start + REG_MICFIL_DATACH0;
+ micfil->dma_params_rx.addr = res->start + REG_MICFIL_DATACH0 + micfil->soc->fifo_offset;
micfil->dma_params_rx.maxburst = MICFIL_DMA_MAXBURST_RX;
platform_set_drvdata(pdev, micfil);
diff --git a/sound/soc/fsl/fsl_micfil.h b/sound/soc/fsl/fsl_micfil.h
index b7798a7cbf2a..aa3661ea4ffc 100644
--- a/sound/soc/fsl/fsl_micfil.h
+++ b/sound/soc/fsl/fsl_micfil.h
@@ -62,7 +62,7 @@
#define MICFIL_QSEL_VLOW1_QUALITY 5
#define MICFIL_QSEL_VLOW2_QUALITY 4
-#define MICFIL_CTRL2_CICOSR GENMASK(19, 16)
+#define MICFIL_CTRL2_CICOSR GENMASK(20, 16)
#define MICFIL_CTRL2_CLKDIV GENMASK(7, 0)
/* MICFIL Status Register -- REG_MICFIL_STAT 0x08 */
diff --git a/sound/soc/fsl/fsl_mqs.c b/sound/soc/fsl/fsl_mqs.c
index 0513e9e8402e..e34e5ea98de5 100644
--- a/sound/soc/fsl/fsl_mqs.c
+++ b/sound/soc/fsl/fsl_mqs.c
@@ -410,12 +410,40 @@ static const struct fsl_mqs_soc_data fsl_mqs_imx95_netc_data = {
.div_shift = 9,
};
+static const struct fsl_mqs_soc_data fsl_mqs_imx943_aon_data = {
+ .type = TYPE_REG_SM,
+ .ctrl_off = 0x88,
+ .en_mask = BIT(1),
+ .en_shift = 1,
+ .rst_mask = BIT(2),
+ .rst_shift = 2,
+ .osr_mask = BIT(3),
+ .osr_shift = 3,
+ .div_mask = GENMASK(15, 8),
+ .div_shift = 8,
+};
+
+static const struct fsl_mqs_soc_data fsl_mqs_imx943_wakeup_data = {
+ .type = TYPE_REG_GPR,
+ .ctrl_off = 0x10,
+ .en_mask = BIT(1),
+ .en_shift = 1,
+ .rst_mask = BIT(2),
+ .rst_shift = 2,
+ .osr_mask = BIT(3),
+ .osr_shift = 3,
+ .div_mask = GENMASK(15, 8),
+ .div_shift = 8,
+};
+
static const struct of_device_id fsl_mqs_dt_ids[] = {
{ .compatible = "fsl,imx8qm-mqs", .data = &fsl_mqs_imx8qm_data },
{ .compatible = "fsl,imx6sx-mqs", .data = &fsl_mqs_imx6sx_data },
{ .compatible = "fsl,imx93-mqs", .data = &fsl_mqs_imx93_data },
{ .compatible = "fsl,imx95-aonmix-mqs", .data = &fsl_mqs_imx95_aon_data },
{ .compatible = "fsl,imx95-netcmix-mqs", .data = &fsl_mqs_imx95_netc_data },
+ { .compatible = "fsl,imx943-aonmix-mqs", .data = &fsl_mqs_imx943_aon_data },
+ { .compatible = "fsl,imx943-wakeupmix-mqs", .data = &fsl_mqs_imx943_wakeup_data },
{}
};
MODULE_DEVICE_TABLE(of, fsl_mqs_dt_ids);
diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
index 634168d2bb6e..c4eb87c5d39e 100644
--- a/sound/soc/fsl/fsl_sai.c
+++ b/sound/soc/fsl/fsl_sai.c
@@ -885,7 +885,7 @@ static int fsl_sai_startup(struct snd_pcm_substream *substream,
sai->dma_params_rx.maxburst);
ret = snd_pcm_hw_constraint_list(substream->runtime, 0,
- SNDRV_PCM_HW_PARAM_RATE, &fsl_sai_rate_constraints);
+ SNDRV_PCM_HW_PARAM_RATE, &sai->constraint_rates);
return ret;
}
@@ -1442,6 +1442,11 @@ static int fsl_sai_probe(struct platform_device *pdev)
fsl_asoc_get_pll_clocks(&pdev->dev, &sai->pll8k_clk,
&sai->pll11k_clk);
+ fsl_asoc_constrain_rates(&sai->constraint_rates,
+ &fsl_sai_rate_constraints,
+ sai->pll8k_clk, sai->pll11k_clk, NULL,
+ sai->constraint_rates_list);
+
/* Use Multi FIFO mode depending on the support from SDMA script */
ret = of_property_read_u32_array(np, "dmas", dmas, 4);
if (!sai->soc_data->use_edma && !ret && dmas[2] == IMX_DMATYPE_MULTI_SAI)
diff --git a/sound/soc/fsl/fsl_sai.h b/sound/soc/fsl/fsl_sai.h
index 9c4d19fe22c6..0e25e2fc7ce0 100644
--- a/sound/soc/fsl/fsl_sai.h
+++ b/sound/soc/fsl/fsl_sai.h
@@ -9,6 +9,7 @@
#include <linux/dma/imx-dma.h>
#include <sound/dmaengine_pcm.h>
+#define FAL_SAI_NUM_RATES 20
#define FSL_SAI_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
SNDRV_PCM_FMTBIT_S20_3LE |\
SNDRV_PCM_FMTBIT_S24_LE |\
@@ -309,6 +310,8 @@ struct fsl_sai {
struct pinctrl *pinctrl;
struct pinctrl_state *pins_state;
struct sdma_peripheral_config audio_config[2];
+ struct snd_pcm_hw_constraint_list constraint_rates;
+ unsigned int constraint_rates_list[FAL_SAI_NUM_RATES];
};
#define TX 1
diff --git a/sound/soc/fsl/fsl_utils.c b/sound/soc/fsl/fsl_utils.c
index a5ab27c2f711..d69a6b9795bf 100644
--- a/sound/soc/fsl/fsl_utils.c
+++ b/sound/soc/fsl/fsl_utils.c
@@ -152,6 +152,51 @@ void fsl_asoc_reparent_pll_clocks(struct device *dev, struct clk *clk,
}
EXPORT_SYMBOL(fsl_asoc_reparent_pll_clocks);
+/**
+ * fsl_asoc_constrain_rates - constrain rates according to clocks
+ *
+ * @target_constr: target constraint
+ * @original_constr: original constraint
+ * @pll8k_clk: PLL clock pointer for 8kHz
+ * @pll11k_clk: PLL clock pointer for 11kHz
+ * @ext_clk: External clock pointer
+ * @target_rates: target rates array
+ *
+ * This function constrain rates according to clocks
+ */
+void fsl_asoc_constrain_rates(struct snd_pcm_hw_constraint_list *target_constr,
+ const struct snd_pcm_hw_constraint_list *original_constr,
+ struct clk *pll8k_clk, struct clk *pll11k_clk,
+ struct clk *ext_clk, int *target_rates)
+{
+ int i, j, k = 0;
+ u64 clk_rate[3];
+
+ *target_constr = *original_constr;
+ if (pll8k_clk || pll11k_clk || ext_clk) {
+ target_constr->list = target_rates;
+ target_constr->count = 0;
+ for (i = 0; i < original_constr->count; i++) {
+ clk_rate[0] = clk_get_rate(pll8k_clk);
+ clk_rate[1] = clk_get_rate(pll11k_clk);
+ clk_rate[2] = clk_get_rate(ext_clk);
+ for (j = 0; j < 3; j++) {
+ if (clk_rate[j] != 0 &&
+ do_div(clk_rate[j], original_constr->list[i]) == 0) {
+ target_rates[k++] = original_constr->list[i];
+ target_constr->count++;
+ break;
+ }
+ }
+ }
+
+ /* protection for if there is no proper rate found*/
+ if (!target_constr->count)
+ *target_constr = *original_constr;
+ }
+}
+EXPORT_SYMBOL(fsl_asoc_constrain_rates);
+
MODULE_AUTHOR("Timur Tabi <timur@freescale.com>");
MODULE_DESCRIPTION("Freescale ASoC utility code");
MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/fsl/fsl_utils.h b/sound/soc/fsl/fsl_utils.h
index 4d5f3d93bc81..21b25a11ecda 100644
--- a/sound/soc/fsl/fsl_utils.h
+++ b/sound/soc/fsl/fsl_utils.h
@@ -26,4 +26,9 @@ void fsl_asoc_get_pll_clocks(struct device *dev, struct clk **pll8k_clk,
void fsl_asoc_reparent_pll_clocks(struct device *dev, struct clk *clk,
struct clk *pll8k_clk,
struct clk *pll11k_clk, u64 ratio);
+
+void fsl_asoc_constrain_rates(struct snd_pcm_hw_constraint_list *target_constr,
+ const struct snd_pcm_hw_constraint_list *original_constr,
+ struct clk *pll8k_clk, struct clk *pll11k_clk,
+ struct clk *ext_clk, int *target_rates);
#endif /* _FSL_UTILS_H */
diff --git a/sound/soc/fsl/fsl_xcvr.c b/sound/soc/fsl/fsl_xcvr.c
index 9c184ab73468..c59c1af5a98a 100644
--- a/sound/soc/fsl/fsl_xcvr.c
+++ b/sound/soc/fsl/fsl_xcvr.c
@@ -19,6 +19,7 @@
#include "imx-pcm.h"
#define FSL_XCVR_CAPDS_SIZE 256
+#define SPDIF_NUM_RATES 7
enum fsl_xcvr_pll_verison {
PLL_MX8MP,
@@ -37,6 +38,8 @@ struct fsl_xcvr {
const struct fsl_xcvr_soc_data *soc_data;
struct platform_device *pdev;
struct regmap *regmap;
+ struct regmap *regmap_phy;
+ struct regmap *regmap_pll;
struct clk *ipg_clk;
struct clk *pll_ipg_clk;
struct clk *phy_clk;
@@ -55,6 +58,8 @@ struct fsl_xcvr {
u8 cap_ds[FSL_XCVR_CAPDS_SIZE];
struct work_struct work_rst;
spinlock_t lock; /* Protect hw_reset and trigger */
+ struct snd_pcm_hw_constraint_list spdif_constr_rates;
+ u32 spdif_constr_rates_list[SPDIF_NUM_RATES];
};
static const struct fsl_xcvr_pll_conf {
@@ -257,7 +262,7 @@ static int fsl_xcvr_ai_write(struct fsl_xcvr *xcvr, u8 reg, u32 data, bool phy)
idx = BIT(phy ? 26 : 24);
tidx = BIT(phy ? 27 : 25);
- regmap_write(xcvr->regmap, FSL_XCVR_PHY_AI_CTRL_CLR, 0xFF);
+ regmap_write(xcvr->regmap, FSL_XCVR_PHY_AI_CTRL_CLR, 0xFF | FSL_XCVR_PHY_AI_CTRL_AI_RWB);
regmap_write(xcvr->regmap, FSL_XCVR_PHY_AI_CTRL_SET, reg);
regmap_write(xcvr->regmap, FSL_XCVR_PHY_AI_WDATA, data);
regmap_write(xcvr->regmap, FSL_XCVR_PHY_AI_CTRL_TOG, idx);
@@ -271,6 +276,59 @@ static int fsl_xcvr_ai_write(struct fsl_xcvr *xcvr, u8 reg, u32 data, bool phy)
return ret;
}
+static int fsl_xcvr_ai_read(struct fsl_xcvr *xcvr, u8 reg, u32 *data, bool phy)
+{
+ struct device *dev = &xcvr->pdev->dev;
+ u32 val, idx, tidx;
+ int ret;
+
+ idx = BIT(phy ? 26 : 24);
+ tidx = BIT(phy ? 27 : 25);
+
+ regmap_write(xcvr->regmap, FSL_XCVR_PHY_AI_CTRL_CLR, 0xFF | FSL_XCVR_PHY_AI_CTRL_AI_RWB);
+ regmap_write(xcvr->regmap, FSL_XCVR_PHY_AI_CTRL_SET, reg | FSL_XCVR_PHY_AI_CTRL_AI_RWB);
+ regmap_write(xcvr->regmap, FSL_XCVR_PHY_AI_CTRL_TOG, idx);
+
+ ret = regmap_read_poll_timeout(xcvr->regmap, FSL_XCVR_PHY_AI_CTRL, val,
+ (val & idx) == ((val & tidx) >> 1),
+ 10, 10000);
+ if (ret)
+ dev_err(dev, "AI timeout: failed to read %s reg 0x%02x\n",
+ phy ? "PHY" : "PLL", reg);
+
+ regmap_read(xcvr->regmap, FSL_XCVR_PHY_AI_RDATA, data);
+
+ return ret;
+}
+
+static int fsl_xcvr_phy_reg_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct fsl_xcvr *xcvr = context;
+
+ return fsl_xcvr_ai_read(xcvr, reg, val, 1);
+}
+
+static int fsl_xcvr_phy_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct fsl_xcvr *xcvr = context;
+
+ return fsl_xcvr_ai_write(xcvr, reg, val, 1);
+}
+
+static int fsl_xcvr_pll_reg_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct fsl_xcvr *xcvr = context;
+
+ return fsl_xcvr_ai_read(xcvr, reg, val, 0);
+}
+
+static int fsl_xcvr_pll_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct fsl_xcvr *xcvr = context;
+
+ return fsl_xcvr_ai_write(xcvr, reg, val, 0);
+}
+
static int fsl_xcvr_en_phy_pll(struct fsl_xcvr *xcvr, u32 freq, bool tx)
{
struct device *dev = &xcvr->pdev->dev;
@@ -303,55 +361,55 @@ static int fsl_xcvr_en_phy_pll(struct fsl_xcvr *xcvr, u32 freq, bool tx)
switch (xcvr->soc_data->pll_ver) {
case PLL_MX8MP:
/* PLL: BANDGAP_SET: EN_VBG (enable bandgap) */
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_PLL_BANDGAP_SET,
- FSL_XCVR_PLL_BANDGAP_EN_VBG, 0);
+ regmap_set_bits(xcvr->regmap_pll, FSL_XCVR_PLL_BANDGAP,
+ FSL_XCVR_PLL_BANDGAP_EN_VBG);
/* PLL: CTRL0: DIV_INTEGER */
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_PLL_CTRL0, fsl_xcvr_pll_cfg[i].mfi, 0);
+ regmap_write(xcvr->regmap_pll, FSL_XCVR_PLL_CTRL0, fsl_xcvr_pll_cfg[i].mfi);
/* PLL: NUMERATOR: MFN */
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_PLL_NUM, fsl_xcvr_pll_cfg[i].mfn, 0);
+ regmap_write(xcvr->regmap_pll, FSL_XCVR_PLL_NUM, fsl_xcvr_pll_cfg[i].mfn);
/* PLL: DENOMINATOR: MFD */
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_PLL_DEN, fsl_xcvr_pll_cfg[i].mfd, 0);
+ regmap_write(xcvr->regmap_pll, FSL_XCVR_PLL_DEN, fsl_xcvr_pll_cfg[i].mfd);
/* PLL: CTRL0_SET: HOLD_RING_OFF, POWER_UP */
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_PLL_CTRL0_SET,
- FSL_XCVR_PLL_CTRL0_HROFF | FSL_XCVR_PLL_CTRL0_PWP, 0);
+ regmap_set_bits(xcvr->regmap_pll, FSL_XCVR_PLL_CTRL0,
+ FSL_XCVR_PLL_CTRL0_HROFF | FSL_XCVR_PLL_CTRL0_PWP);
udelay(25);
/* PLL: CTRL0: Clear Hold Ring Off */
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_PLL_CTRL0_CLR,
- FSL_XCVR_PLL_CTRL0_HROFF, 0);
+ regmap_clear_bits(xcvr->regmap_pll, FSL_XCVR_PLL_CTRL0,
+ FSL_XCVR_PLL_CTRL0_HROFF);
udelay(100);
if (tx) { /* TX is enabled for SPDIF only */
/* PLL: POSTDIV: PDIV0 */
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_PLL_PDIV,
- FSL_XCVR_PLL_PDIVx(log2, 0), 0);
+ regmap_write(xcvr->regmap_pll, FSL_XCVR_PLL_PDIV,
+ FSL_XCVR_PLL_PDIVx(log2, 0));
/* PLL: CTRL_SET: CLKMUX0_EN */
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_PLL_CTRL0_SET,
- FSL_XCVR_PLL_CTRL0_CM0_EN, 0);
+ regmap_set_bits(xcvr->regmap_pll, FSL_XCVR_PLL_CTRL0,
+ FSL_XCVR_PLL_CTRL0_CM0_EN);
} else if (xcvr->mode == FSL_XCVR_MODE_EARC) { /* eARC RX */
/* PLL: POSTDIV: PDIV1 */
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_PLL_PDIV,
- FSL_XCVR_PLL_PDIVx(log2, 1), 0);
+ regmap_write(xcvr->regmap_pll, FSL_XCVR_PLL_PDIV,
+ FSL_XCVR_PLL_PDIVx(log2, 1));
/* PLL: CTRL_SET: CLKMUX1_EN */
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_PLL_CTRL0_SET,
- FSL_XCVR_PLL_CTRL0_CM1_EN, 0);
+ regmap_set_bits(xcvr->regmap_pll, FSL_XCVR_PLL_CTRL0,
+ FSL_XCVR_PLL_CTRL0_CM1_EN);
} else { /* SPDIF / ARC RX */
/* PLL: POSTDIV: PDIV2 */
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_PLL_PDIV,
- FSL_XCVR_PLL_PDIVx(log2, 2), 0);
+ regmap_write(xcvr->regmap_pll, FSL_XCVR_PLL_PDIV,
+ FSL_XCVR_PLL_PDIVx(log2, 2));
/* PLL: CTRL_SET: CLKMUX2_EN */
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_PLL_CTRL0_SET,
- FSL_XCVR_PLL_CTRL0_CM2_EN, 0);
+ regmap_set_bits(xcvr->regmap_pll, FSL_XCVR_PLL_CTRL0,
+ FSL_XCVR_PLL_CTRL0_CM2_EN);
}
break;
case PLL_MX95:
val = fsl_xcvr_pll_cfg[i].mfi << FSL_XCVR_GP_PLL_DIV_MFI_SHIFT | div;
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_GP_PLL_DIV, val, 0);
+ regmap_write(xcvr->regmap_pll, FSL_XCVR_GP_PLL_DIV, val);
val = fsl_xcvr_pll_cfg[i].mfn << FSL_XCVR_GP_PLL_NUMERATOR_MFN_SHIFT;
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_GP_PLL_NUMERATOR, val, 0);
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_GP_PLL_DENOMINATOR,
- fsl_xcvr_pll_cfg[i].mfd, 0);
+ regmap_write(xcvr->regmap_pll, FSL_XCVR_GP_PLL_NUMERATOR, val);
+ regmap_write(xcvr->regmap_pll, FSL_XCVR_GP_PLL_DENOMINATOR,
+ fsl_xcvr_pll_cfg[i].mfd);
val = FSL_XCVR_GP_PLL_CTRL_POWERUP | FSL_XCVR_GP_PLL_CTRL_CLKMUX_EN;
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_GP_PLL_CTRL, val, 0);
+ regmap_write(xcvr->regmap_pll, FSL_XCVR_GP_PLL_CTRL, val);
break;
default:
dev_err(dev, "Error for PLL version %d\n", xcvr->soc_data->pll_ver);
@@ -360,22 +418,22 @@ static int fsl_xcvr_en_phy_pll(struct fsl_xcvr *xcvr, u32 freq, bool tx)
if (xcvr->mode == FSL_XCVR_MODE_EARC) { /* eARC mode */
/* PHY: CTRL_SET: TX_DIFF_OE, PHY_EN */
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_PHY_CTRL_SET,
- FSL_XCVR_PHY_CTRL_TSDIFF_OE |
- FSL_XCVR_PHY_CTRL_PHY_EN, 1);
+ regmap_set_bits(xcvr->regmap_phy, FSL_XCVR_PHY_CTRL,
+ FSL_XCVR_PHY_CTRL_TSDIFF_OE |
+ FSL_XCVR_PHY_CTRL_PHY_EN);
/* PHY: CTRL2_SET: EARC_TX_MODE */
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_PHY_CTRL2_SET,
- FSL_XCVR_PHY_CTRL2_EARC_TXMS, 1);
+ regmap_set_bits(xcvr->regmap_phy, FSL_XCVR_PHY_CTRL2,
+ FSL_XCVR_PHY_CTRL2_EARC_TXMS);
} else if (!tx) { /* SPDIF / ARC RX mode */
if (xcvr->mode == FSL_XCVR_MODE_SPDIF)
/* PHY: CTRL_SET: SPDIF_EN */
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_PHY_CTRL_SET,
- FSL_XCVR_PHY_CTRL_SPDIF_EN, 1);
+ regmap_set_bits(xcvr->regmap_phy, FSL_XCVR_PHY_CTRL,
+ FSL_XCVR_PHY_CTRL_SPDIF_EN);
else /* PHY: CTRL_SET: ARC RX setup */
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_PHY_CTRL_SET,
- FSL_XCVR_PHY_CTRL_PHY_EN |
- FSL_XCVR_PHY_CTRL_RX_CM_EN |
- fsl_xcvr_phy_arc_cfg[xcvr->arc_mode], 1);
+ regmap_set_bits(xcvr->regmap_phy, FSL_XCVR_PHY_CTRL,
+ FSL_XCVR_PHY_CTRL_PHY_EN |
+ FSL_XCVR_PHY_CTRL_RX_CM_EN |
+ fsl_xcvr_phy_arc_cfg[xcvr->arc_mode]);
}
dev_dbg(dev, "PLL Fexp: %u, Fout: %u, mfi: %u, mfn: %u, mfd: %d, div: %u, pdiv0: %u\n",
@@ -416,17 +474,17 @@ static int fsl_xcvr_en_aud_pll(struct fsl_xcvr *xcvr, u32 freq)
if (xcvr->mode == FSL_XCVR_MODE_EARC) { /* eARC mode */
/* PHY: CTRL_SET: TX_DIFF_OE, PHY_EN */
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_PHY_CTRL_SET,
- FSL_XCVR_PHY_CTRL_TSDIFF_OE |
- FSL_XCVR_PHY_CTRL_PHY_EN, 1);
+ regmap_set_bits(xcvr->regmap_phy, FSL_XCVR_PHY_CTRL,
+ FSL_XCVR_PHY_CTRL_TSDIFF_OE |
+ FSL_XCVR_PHY_CTRL_PHY_EN);
/* PHY: CTRL2_SET: EARC_TX_MODE */
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_PHY_CTRL2_SET,
- FSL_XCVR_PHY_CTRL2_EARC_TXMS, 1);
+ regmap_set_bits(xcvr->regmap_phy, FSL_XCVR_PHY_CTRL2,
+ FSL_XCVR_PHY_CTRL2_EARC_TXMS);
} else { /* SPDIF mode */
/* PHY: CTRL_SET: TX_CLK_AUD_SS | SPDIF_EN */
- fsl_xcvr_ai_write(xcvr, FSL_XCVR_PHY_CTRL_SET,
- FSL_XCVR_PHY_CTRL_TX_CLK_AUD_SS |
- FSL_XCVR_PHY_CTRL_SPDIF_EN, 1);
+ regmap_set_bits(xcvr->regmap_phy, FSL_XCVR_PHY_CTRL,
+ FSL_XCVR_PHY_CTRL_TX_CLK_AUD_SS |
+ FSL_XCVR_PHY_CTRL_SPDIF_EN);
}
dev_dbg(dev, "PLL Fexp: %u\n", freq);
@@ -448,7 +506,7 @@ static int fsl_xcvr_prepare(struct snd_pcm_substream *substream,
switch (xcvr->mode) {
case FSL_XCVR_MODE_SPDIF:
if (xcvr->soc_data->spdif_only && tx) {
- ret = regmap_update_bits(xcvr->regmap, FSL_XCVR_TX_DPTH_CTRL_SET,
+ ret = regmap_update_bits(xcvr->regmap, FSL_XCVR_TX_DPTH_CTRL,
FSL_XCVR_TX_DPTH_CTRL_BYPASS_FEM,
FSL_XCVR_TX_DPTH_CTRL_BYPASS_FEM);
if (ret < 0) {
@@ -466,8 +524,8 @@ static int fsl_xcvr_prepare(struct snd_pcm_substream *substream,
return ret;
}
- ret = regmap_write(xcvr->regmap, FSL_XCVR_TX_DPTH_CTRL_SET,
- FSL_XCVR_TX_DPTH_CTRL_FRM_FMT);
+ ret = regmap_set_bits(xcvr->regmap, FSL_XCVR_TX_DPTH_CTRL,
+ FSL_XCVR_TX_DPTH_CTRL_FRM_FMT);
if (ret < 0) {
dev_err(dai->dev, "Failed to set TX_DPTH: %d\n", ret);
return ret;
@@ -484,11 +542,11 @@ static int fsl_xcvr_prepare(struct snd_pcm_substream *substream,
* Clear RX FIFO, flip RX FIFO bits,
* disable eARC related HW mode detects
*/
- ret = regmap_write(xcvr->regmap, FSL_XCVR_RX_DPTH_CTRL_SET,
- FSL_XCVR_RX_DPTH_CTRL_STORE_FMT |
- FSL_XCVR_RX_DPTH_CTRL_CLR_RX_FIFO |
- FSL_XCVR_RX_DPTH_CTRL_COMP |
- FSL_XCVR_RX_DPTH_CTRL_LAYB_CTRL);
+ ret = regmap_set_bits(xcvr->regmap, FSL_XCVR_RX_DPTH_CTRL,
+ FSL_XCVR_RX_DPTH_CTRL_STORE_FMT |
+ FSL_XCVR_RX_DPTH_CTRL_CLR_RX_FIFO |
+ FSL_XCVR_RX_DPTH_CTRL_COMP |
+ FSL_XCVR_RX_DPTH_CTRL_LAYB_CTRL);
if (ret < 0) {
dev_err(dai->dev, "Failed to set RX_DPTH: %d\n", ret);
return ret;
@@ -505,18 +563,18 @@ static int fsl_xcvr_prepare(struct snd_pcm_substream *substream,
case FSL_XCVR_MODE_EARC:
if (!tx) {
/** Clear RX FIFO, flip RX FIFO bits */
- ret = regmap_write(xcvr->regmap, FSL_XCVR_RX_DPTH_CTRL_SET,
- FSL_XCVR_RX_DPTH_CTRL_STORE_FMT |
- FSL_XCVR_RX_DPTH_CTRL_CLR_RX_FIFO);
+ ret = regmap_set_bits(xcvr->regmap, FSL_XCVR_RX_DPTH_CTRL,
+ FSL_XCVR_RX_DPTH_CTRL_STORE_FMT |
+ FSL_XCVR_RX_DPTH_CTRL_CLR_RX_FIFO);
if (ret < 0) {
dev_err(dai->dev, "Failed to set RX_DPTH: %d\n", ret);
return ret;
}
/** Enable eARC related HW mode detects */
- ret = regmap_write(xcvr->regmap, FSL_XCVR_RX_DPTH_CTRL_CLR,
- FSL_XCVR_RX_DPTH_CTRL_COMP |
- FSL_XCVR_RX_DPTH_CTRL_LAYB_CTRL);
+ ret = regmap_clear_bits(xcvr->regmap, FSL_XCVR_RX_DPTH_CTRL,
+ FSL_XCVR_RX_DPTH_CTRL_COMP |
+ FSL_XCVR_RX_DPTH_CTRL_LAYB_CTRL);
if (ret < 0) {
dev_err(dai->dev, "Failed to clr TX_DPTH: %d\n", ret);
return ret;
@@ -585,8 +643,12 @@ static int fsl_xcvr_startup(struct snd_pcm_substream *substream,
switch (xcvr->mode) {
case FSL_XCVR_MODE_SPDIF:
case FSL_XCVR_MODE_ARC:
- ret = fsl_xcvr_constr(substream, &fsl_xcvr_spdif_channels_constr,
- &fsl_xcvr_spdif_rates_constr);
+ if (xcvr->soc_data->spdif_only && tx)
+ ret = fsl_xcvr_constr(substream, &fsl_xcvr_spdif_channels_constr,
+ &xcvr->spdif_constr_rates);
+ else
+ ret = fsl_xcvr_constr(substream, &fsl_xcvr_spdif_channels_constr,
+ &fsl_xcvr_spdif_rates_constr);
break;
case FSL_XCVR_MODE_EARC:
ret = fsl_xcvr_constr(substream, &fsl_xcvr_earc_channels_constr,
@@ -696,9 +758,9 @@ static int fsl_xcvr_trigger(struct snd_pcm_substream *substream, int cmd,
}
fallthrough;
case FSL_XCVR_MODE_SPDIF:
- ret = regmap_write(xcvr->regmap,
- FSL_XCVR_TX_DPTH_CTRL_SET,
- FSL_XCVR_TX_DPTH_CTRL_STRT_DATA_TX);
+ ret = regmap_set_bits(xcvr->regmap,
+ FSL_XCVR_TX_DPTH_CTRL,
+ FSL_XCVR_TX_DPTH_CTRL_STRT_DATA_TX);
if (ret < 0) {
dev_err(dai->dev, "Failed to start DATA_TX: %d\n", ret);
goto release_lock;
@@ -754,9 +816,9 @@ static int fsl_xcvr_trigger(struct snd_pcm_substream *substream, int cmd,
if (tx) {
switch (xcvr->mode) {
case FSL_XCVR_MODE_SPDIF:
- ret = regmap_write(xcvr->regmap,
- FSL_XCVR_TX_DPTH_CTRL_CLR,
- FSL_XCVR_TX_DPTH_CTRL_STRT_DATA_TX);
+ ret = regmap_clear_bits(xcvr->regmap,
+ FSL_XCVR_TX_DPTH_CTRL,
+ FSL_XCVR_TX_DPTH_CTRL_STRT_DATA_TX);
if (ret < 0) {
dev_err(dai->dev, "Failed to stop DATA_TX: %d\n", ret);
goto release_lock;
@@ -1169,6 +1231,7 @@ static bool fsl_xcvr_writeable_reg(struct device *dev, unsigned int reg)
case FSL_XCVR_RX_DPTH_CNTR_CTRL_SET:
case FSL_XCVR_RX_DPTH_CNTR_CTRL_CLR:
case FSL_XCVR_RX_DPTH_CNTR_CTRL_TOG:
+ case FSL_XCVR_TX_DPTH_CTRL:
case FSL_XCVR_TX_DPTH_CTRL_SET:
case FSL_XCVR_TX_DPTH_CTRL_CLR:
case FSL_XCVR_TX_DPTH_CTRL_TOG:
@@ -1190,7 +1253,49 @@ static bool fsl_xcvr_writeable_reg(struct device *dev, unsigned int reg)
static bool fsl_xcvr_volatile_reg(struct device *dev, unsigned int reg)
{
- return fsl_xcvr_readable_reg(dev, reg);
+ switch (reg) {
+ case FSL_XCVR_EXT_STATUS:
+ case FSL_XCVR_EXT_ISR:
+ case FSL_XCVR_EXT_ISR_SET:
+ case FSL_XCVR_EXT_ISR_CLR:
+ case FSL_XCVR_EXT_ISR_TOG:
+ case FSL_XCVR_ISR:
+ case FSL_XCVR_ISR_SET:
+ case FSL_XCVR_ISR_CLR:
+ case FSL_XCVR_ISR_TOG:
+ case FSL_XCVR_PHY_AI_CTRL:
+ case FSL_XCVR_PHY_AI_CTRL_SET:
+ case FSL_XCVR_PHY_AI_CTRL_CLR:
+ case FSL_XCVR_PHY_AI_CTRL_TOG:
+ case FSL_XCVR_PHY_AI_RDATA:
+ case FSL_XCVR_RX_CS_DATA_0:
+ case FSL_XCVR_RX_CS_DATA_1:
+ case FSL_XCVR_RX_CS_DATA_2:
+ case FSL_XCVR_RX_CS_DATA_3:
+ case FSL_XCVR_RX_CS_DATA_4:
+ case FSL_XCVR_RX_CS_DATA_5:
+ case FSL_XCVR_RX_DPTH_CNTR_CTRL:
+ case FSL_XCVR_RX_DPTH_CNTR_CTRL_SET:
+ case FSL_XCVR_RX_DPTH_CNTR_CTRL_CLR:
+ case FSL_XCVR_RX_DPTH_CNTR_CTRL_TOG:
+ case FSL_XCVR_RX_DPTH_TSCR:
+ case FSL_XCVR_RX_DPTH_BCR:
+ case FSL_XCVR_RX_DPTH_BCTR:
+ case FSL_XCVR_RX_DPTH_BCRR:
+ case FSL_XCVR_TX_DPTH_CNTR_CTRL:
+ case FSL_XCVR_TX_DPTH_CNTR_CTRL_SET:
+ case FSL_XCVR_TX_DPTH_CNTR_CTRL_CLR:
+ case FSL_XCVR_TX_DPTH_CNTR_CTRL_TOG:
+ case FSL_XCVR_TX_DPTH_TSCR:
+ case FSL_XCVR_TX_DPTH_BCR:
+ case FSL_XCVR_TX_DPTH_BCTR:
+ case FSL_XCVR_TX_DPTH_BCRR:
+ case FSL_XCVR_DEBUG_REG_0:
+ case FSL_XCVR_DEBUG_REG_1:
+ return true;
+ default:
+ return false;
+ }
}
static const struct regmap_config fsl_xcvr_regmap_cfg = {
@@ -1206,6 +1311,49 @@ static const struct regmap_config fsl_xcvr_regmap_cfg = {
.cache_type = REGCACHE_FLAT,
};
+static const struct reg_default fsl_xcvr_phy_reg_defaults[] = {
+ { FSL_XCVR_PHY_CTRL, 0x58200804 },
+ { FSL_XCVR_PHY_STATUS, 0x00000000 },
+ { FSL_XCVR_PHY_ANALOG_TRIM, 0x00260F13 },
+ { FSL_XCVR_PHY_SLEW_RATE_TRIM, 0x00000411 },
+ { FSL_XCVR_PHY_DATA_TEST_DELAY, 0x00990000 },
+ { FSL_XCVR_PHY_TEST_CTRL, 0x00000000 },
+ { FSL_XCVR_PHY_DIFF_CDR_CTRL, 0x016D0009 },
+ { FSL_XCVR_PHY_CTRL2, 0x80000000 },
+};
+
+static const struct regmap_config fsl_xcvr_regmap_phy_cfg = {
+ .reg_bits = 8,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = FSL_XCVR_PHY_CTRL2_TOG,
+ .reg_defaults = fsl_xcvr_phy_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(fsl_xcvr_phy_reg_defaults),
+ .cache_type = REGCACHE_FLAT,
+ .reg_read = fsl_xcvr_phy_reg_read,
+ .reg_write = fsl_xcvr_phy_reg_write,
+};
+
+static const struct regmap_config fsl_xcvr_regmap_pllv0_cfg = {
+ .reg_bits = 8,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = FSL_XCVR_PLL_STAT0_TOG,
+ .cache_type = REGCACHE_FLAT,
+ .reg_read = fsl_xcvr_pll_reg_read,
+ .reg_write = fsl_xcvr_pll_reg_write,
+};
+
+static const struct regmap_config fsl_xcvr_regmap_pllv1_cfg = {
+ .reg_bits = 8,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = FSL_XCVR_GP_PLL_STATUS_TOG,
+ .cache_type = REGCACHE_FLAT,
+ .reg_read = fsl_xcvr_pll_reg_read,
+ .reg_write = fsl_xcvr_pll_reg_write,
+};
+
static void reset_rx_work(struct work_struct *work)
{
struct fsl_xcvr *xcvr = container_of(work, struct fsl_xcvr, work_rst);
@@ -1405,6 +1553,15 @@ static int fsl_xcvr_probe(struct platform_device *pdev)
fsl_asoc_get_pll_clocks(dev, &xcvr->pll8k_clk,
&xcvr->pll11k_clk);
+ if (xcvr->soc_data->spdif_only) {
+ if (!(xcvr->pll8k_clk || xcvr->pll11k_clk))
+ xcvr->pll8k_clk = xcvr->phy_clk;
+ fsl_asoc_constrain_rates(&xcvr->spdif_constr_rates,
+ &fsl_xcvr_spdif_rates_constr,
+ xcvr->pll8k_clk, xcvr->pll11k_clk, NULL,
+ xcvr->spdif_constr_rates_list);
+ }
+
xcvr->ram_addr = devm_platform_ioremap_resource_byname(pdev, "ram");
if (IS_ERR(xcvr->ram_addr))
return PTR_ERR(xcvr->ram_addr);
@@ -1421,6 +1578,40 @@ static int fsl_xcvr_probe(struct platform_device *pdev)
return PTR_ERR(xcvr->regmap);
}
+ if (xcvr->soc_data->use_phy) {
+ xcvr->regmap_phy = devm_regmap_init(dev, NULL, xcvr,
+ &fsl_xcvr_regmap_phy_cfg);
+ if (IS_ERR(xcvr->regmap_phy)) {
+ dev_err(dev, "failed to init XCVR PHY regmap: %ld\n",
+ PTR_ERR(xcvr->regmap_phy));
+ return PTR_ERR(xcvr->regmap_phy);
+ }
+
+ switch (xcvr->soc_data->pll_ver) {
+ case PLL_MX8MP:
+ xcvr->regmap_pll = devm_regmap_init(dev, NULL, xcvr,
+ &fsl_xcvr_regmap_pllv0_cfg);
+ if (IS_ERR(xcvr->regmap_pll)) {
+ dev_err(dev, "failed to init XCVR PLL regmap: %ld\n",
+ PTR_ERR(xcvr->regmap_pll));
+ return PTR_ERR(xcvr->regmap_pll);
+ }
+ break;
+ case PLL_MX95:
+ xcvr->regmap_pll = devm_regmap_init(dev, NULL, xcvr,
+ &fsl_xcvr_regmap_pllv1_cfg);
+ if (IS_ERR(xcvr->regmap_pll)) {
+ dev_err(dev, "failed to init XCVR PLL regmap: %ld\n",
+ PTR_ERR(xcvr->regmap_pll));
+ return PTR_ERR(xcvr->regmap_pll);
+ }
+ break;
+ default:
+ dev_err(dev, "Error for PLL version %d\n", xcvr->soc_data->pll_ver);
+ return -EINVAL;
+ }
+ }
+
xcvr->reset = devm_reset_control_get_optional_exclusive(dev, NULL);
if (IS_ERR(xcvr->reset)) {
dev_err(dev, "failed to get XCVR reset control\n");
@@ -1454,6 +1645,10 @@ static int fsl_xcvr_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, xcvr);
pm_runtime_enable(dev);
regcache_cache_only(xcvr->regmap, true);
+ if (xcvr->soc_data->use_phy) {
+ regcache_cache_only(xcvr->regmap_phy, true);
+ regcache_cache_only(xcvr->regmap_pll, true);
+ }
/*
* Register platform component before registering cpu dai for there
@@ -1492,7 +1687,8 @@ static int fsl_xcvr_runtime_suspend(struct device *dev)
struct fsl_xcvr *xcvr = dev_get_drvdata(dev);
int ret;
- if (!xcvr->soc_data->spdif_only) {
+ if (!xcvr->soc_data->spdif_only &&
+ xcvr->mode == FSL_XCVR_MODE_EARC) {
/* Assert M0+ reset */
ret = regmap_update_bits(xcvr->regmap, FSL_XCVR_EXT_CTRL,
FSL_XCVR_EXT_CTRL_CORE_RESET,
@@ -1502,6 +1698,10 @@ static int fsl_xcvr_runtime_suspend(struct device *dev)
}
regcache_cache_only(xcvr->regmap, true);
+ if (xcvr->soc_data->use_phy) {
+ regcache_cache_only(xcvr->regmap_phy, true);
+ regcache_cache_only(xcvr->regmap_pll, true);
+ }
clk_disable_unprepare(xcvr->spba_clk);
clk_disable_unprepare(xcvr->phy_clk);
@@ -1546,6 +1746,12 @@ static int fsl_xcvr_runtime_resume(struct device *dev)
goto stop_phy_clk;
}
+ ret = reset_control_deassert(xcvr->reset);
+ if (ret) {
+ dev_err(dev, "failed to deassert M0+ reset.\n");
+ goto stop_spba_clk;
+ }
+
regcache_cache_only(xcvr->regmap, false);
regcache_mark_dirty(xcvr->regmap);
ret = regcache_sync(xcvr->regmap);
@@ -1555,31 +1761,49 @@ static int fsl_xcvr_runtime_resume(struct device *dev)
goto stop_spba_clk;
}
- if (xcvr->soc_data->spdif_only)
- return 0;
+ if (xcvr->soc_data->use_phy) {
+ ret = regmap_write(xcvr->regmap, FSL_XCVR_PHY_AI_CTRL_SET,
+ FSL_XCVR_PHY_AI_CTRL_AI_RESETN);
+ if (ret < 0) {
+ dev_err(dev, "Error while release PHY reset: %d\n", ret);
+ goto stop_spba_clk;
+ }
- ret = reset_control_deassert(xcvr->reset);
- if (ret) {
- dev_err(dev, "failed to deassert M0+ reset.\n");
- goto stop_spba_clk;
- }
+ regcache_cache_only(xcvr->regmap_phy, false);
+ regcache_mark_dirty(xcvr->regmap_phy);
+ ret = regcache_sync(xcvr->regmap_phy);
+ if (ret) {
+ dev_err(dev, "failed to sync phy regcache.\n");
+ goto stop_spba_clk;
+ }
- ret = fsl_xcvr_load_firmware(xcvr);
- if (ret) {
- dev_err(dev, "failed to load firmware.\n");
- goto stop_spba_clk;
+ regcache_cache_only(xcvr->regmap_pll, false);
+ regcache_mark_dirty(xcvr->regmap_pll);
+ ret = regcache_sync(xcvr->regmap_pll);
+ if (ret) {
+ dev_err(dev, "failed to sync pll regcache.\n");
+ goto stop_spba_clk;
+ }
}
- /* Release M0+ reset */
- ret = regmap_update_bits(xcvr->regmap, FSL_XCVR_EXT_CTRL,
- FSL_XCVR_EXT_CTRL_CORE_RESET, 0);
- if (ret < 0) {
- dev_err(dev, "M0+ core release failed: %d\n", ret);
- goto stop_spba_clk;
- }
+ if (xcvr->mode == FSL_XCVR_MODE_EARC) {
+ ret = fsl_xcvr_load_firmware(xcvr);
+ if (ret) {
+ dev_err(dev, "failed to load firmware.\n");
+ goto stop_spba_clk;
+ }
+
+ /* Release M0+ reset */
+ ret = regmap_update_bits(xcvr->regmap, FSL_XCVR_EXT_CTRL,
+ FSL_XCVR_EXT_CTRL_CORE_RESET, 0);
+ if (ret < 0) {
+ dev_err(dev, "M0+ core release failed: %d\n", ret);
+ goto stop_spba_clk;
+ }
- /* Let M0+ core complete firmware initialization */
- msleep(50);
+ /* Let M0+ core complete firmware initialization */
+ msleep(50);
+ }
return 0;
diff --git a/sound/soc/fsl/fsl_xcvr.h b/sound/soc/fsl/fsl_xcvr.h
index c72cb05184df..dade3945cc0c 100644
--- a/sound/soc/fsl/fsl_xcvr.h
+++ b/sound/soc/fsl/fsl_xcvr.h
@@ -234,6 +234,7 @@
#define FSL_XCVR_TX_DPTH_CTRL_TM_NO_PRE_BME GENMASK(31, 30)
#define FSL_XCVR_PHY_AI_CTRL_AI_RESETN BIT(15)
+#define FSL_XCVR_PHY_AI_CTRL_AI_RWB BIT(31)
#define FSL_XCVR_PLL_CTRL0 0x00
#define FSL_XCVR_PLL_CTRL0_SET 0x04
@@ -241,13 +242,25 @@
#define FSL_XCVR_PLL_NUM 0x20
#define FSL_XCVR_PLL_DEN 0x30
#define FSL_XCVR_PLL_PDIV 0x40
+#define FSL_XCVR_PLL_BANDGAP 0x50
#define FSL_XCVR_PLL_BANDGAP_SET 0x54
+#define FSL_XCVR_PLL_STAT0 0x60
+#define FSL_XCVR_PLL_STAT0_TOG 0x6c
+
#define FSL_XCVR_PHY_CTRL 0x00
#define FSL_XCVR_PHY_CTRL_SET 0x04
#define FSL_XCVR_PHY_CTRL_CLR 0x08
+#define FSL_XCVR_PHY_CTRL_TOG 0x0c
+#define FSL_XCVR_PHY_STATUS 0x10
+#define FSL_XCVR_PHY_ANALOG_TRIM 0x20
+#define FSL_XCVR_PHY_SLEW_RATE_TRIM 0x30
+#define FSL_XCVR_PHY_DATA_TEST_DELAY 0x40
+#define FSL_XCVR_PHY_TEST_CTRL 0x50
+#define FSL_XCVR_PHY_DIFF_CDR_CTRL 0x60
#define FSL_XCVR_PHY_CTRL2 0x70
#define FSL_XCVR_PHY_CTRL2_SET 0x74
#define FSL_XCVR_PHY_CTRL2_CLR 0x78
+#define FSL_XCVR_PHY_CTRL2_TOG 0x7c
#define FSL_XCVR_PLL_BANDGAP_EN_VBG BIT(0)
#define FSL_XCVR_PLL_CTRL0_HROFF BIT(13)
diff --git a/sound/soc/fsl/imx-audmux.c b/sound/soc/fsl/imx-audmux.c
index 43e14f2eca8d..cc2918ee2cf5 100644
--- a/sound/soc/fsl/imx-audmux.c
+++ b/sound/soc/fsl/imx-audmux.c
@@ -237,7 +237,7 @@ static int imx_audmux_parse_dt_defaults(struct platform_device *pdev,
child);
continue;
}
- if (!of_property_read_bool(child, "fsl,port-config")) {
+ if (!of_property_present(child, "fsl,port-config")) {
dev_warn(&pdev->dev, "child node \"%pOF\" does not have property fsl,port-config\n",
child);
continue;
diff --git a/sound/soc/fsl/imx-card.c b/sound/soc/fsl/imx-card.c
index 95a57fda0250..ac043ad367ac 100644
--- a/sound/soc/fsl/imx-card.c
+++ b/sound/soc/fsl/imx-card.c
@@ -529,7 +529,7 @@ static int imx_card_parse_of(struct imx_card_data *data)
}
/* DAPM routes */
- if (of_property_read_bool(dev->of_node, "audio-routing")) {
+ if (of_property_present(dev->of_node, "audio-routing")) {
ret = snd_soc_of_parse_audio_routing(card, "audio-routing");
if (ret)
return ret;
diff --git a/sound/soc/fsl/imx-rpmsg.c b/sound/soc/fsl/imx-rpmsg.c
index ce98d2288193..7cd3aa4c8706 100644
--- a/sound/soc/fsl/imx-rpmsg.c
+++ b/sound/soc/fsl/imx-rpmsg.c
@@ -218,7 +218,7 @@ static int imx_rpmsg_probe(struct platform_device *pdev)
if (ret)
goto fail;
- if (of_property_read_bool(np, "audio-routing")) {
+ if (of_property_present(np, "audio-routing")) {
ret = snd_soc_of_parse_audio_routing(&data->card, "audio-routing");
if (ret) {
dev_err(&pdev->dev, "failed to parse audio-routing: %d\n", ret);
diff --git a/sound/soc/generic/audio-graph-card.c b/sound/soc/generic/audio-graph-card.c
index 7655425a3deb..7c422535b01a 100644
--- a/sound/soc/generic/audio-graph-card.c
+++ b/sound/soc/generic/audio-graph-card.c
@@ -81,18 +81,14 @@ static void graph_parse_convert(struct device *dev,
struct simple_util_data *adata)
{
struct device_node *top = dev->of_node;
- struct device_node *port = ep_to_port(ep);
- struct device_node *ports = port_to_ports(port);
- struct device_node *node = of_graph_get_port_parent(ep);
+ struct device_node *port __free(device_node) = ep_to_port(ep);
+ struct device_node *ports __free(device_node) = port_to_ports(port);
+ struct device_node *node __free(device_node) = of_graph_get_port_parent(ep);
simple_util_parse_convert(top, NULL, adata);
simple_util_parse_convert(ports, NULL, adata);
simple_util_parse_convert(port, NULL, adata);
simple_util_parse_convert(ep, NULL, adata);
-
- of_node_put(port);
- of_node_put(ports);
- of_node_put(node);
}
static int graph_parse_node(struct simple_util_priv *priv,
@@ -140,10 +136,10 @@ static int graph_link_init(struct simple_util_priv *priv,
struct device_node *top = dev->of_node;
struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link);
struct simple_dai_props *dai_props = simple_priv_to_props(priv, li->link);
- struct device_node *port_cpu = ep_to_port(ep_cpu);
- struct device_node *port_codec = ep_to_port(ep_codec);
- struct device_node *ports_cpu = port_to_ports(port_cpu);
- struct device_node *ports_codec = port_to_ports(port_codec);
+ struct device_node *port_cpu __free(device_node) = ep_to_port(ep_cpu);
+ struct device_node *port_codec __free(device_node) = ep_to_port(ep_codec);
+ struct device_node *ports_cpu __free(device_node) = port_to_ports(port_cpu);
+ struct device_node *ports_codec __free(device_node) = port_to_ports(port_codec);
enum snd_soc_trigger_order trigger_start = SND_SOC_TRIGGER_ORDER_DEFAULT;
enum snd_soc_trigger_order trigger_stop = SND_SOC_TRIGGER_ORDER_DEFAULT;
bool playback_only = 0, capture_only = 0;
@@ -152,7 +148,7 @@ static int graph_link_init(struct simple_util_priv *priv,
ret = simple_util_parse_daifmt(dev, ep_cpu, ep_codec,
NULL, &dai_link->dai_fmt);
if (ret < 0)
- goto init_end;
+ return ret;
graph_util_parse_link_direction(top, &playback_only, &capture_only);
graph_util_parse_link_direction(port_cpu, &playback_only, &capture_only);
@@ -187,14 +183,7 @@ static int graph_link_init(struct simple_util_priv *priv,
if (priv->ops)
dai_link->ops = priv->ops;
- ret = simple_util_set_dailink_name(dev, dai_link, name);
-init_end:
- of_node_put(ports_cpu);
- of_node_put(ports_codec);
- of_node_put(port_cpu);
- of_node_put(port_codec);
-
- return ret;
+ return simple_util_set_dailink_name(dev, dai_link, name);
}
static int graph_dai_link_of_dpcm(struct simple_util_priv *priv,
@@ -250,8 +239,6 @@ static int graph_dai_link_of_dpcm(struct simple_util_priv *priv,
} else {
struct snd_soc_codec_conf *cconf = simple_props_to_codec_conf(dai_props, 0);
struct snd_soc_dai_link_component *codecs = snd_soc_link_to_codec(dai_link, 0);
- struct device_node *port;
- struct device_node *ports;
/* CPU is dummy */
@@ -267,14 +254,12 @@ static int graph_dai_link_of_dpcm(struct simple_util_priv *priv,
"be.%pOFP.%s", codecs->of_node, codecs->dai_name);
/* check "prefix" from top node */
- port = ep_to_port(ep);
- ports = port_to_ports(port);
+ struct device_node *port __free(device_node) = ep_to_port(ep);
+ struct device_node *ports __free(device_node) = port_to_ports(port);
+
snd_soc_of_parse_node_prefix(top, cconf, codecs->of_node, "prefix");
snd_soc_of_parse_node_prefix(ports, cconf, codecs->of_node, "prefix");
snd_soc_of_parse_node_prefix(port, cconf, codecs->of_node, "prefix");
-
- of_node_put(ports);
- of_node_put(port);
}
graph_parse_convert(dev, ep, &dai_props->adata);
@@ -361,8 +346,6 @@ static int __graph_for_each_link(struct simple_util_priv *priv,
struct device *dev = simple_priv_to_dev(priv);
struct device_node *node = dev->of_node;
struct device_node *cpu_port;
- struct device_node *codec_ep;
- struct device_node *codec_port;
struct device_node *codec_port_old = NULL;
struct simple_util_data adata;
int rc, ret = 0;
@@ -374,8 +357,8 @@ static int __graph_for_each_link(struct simple_util_priv *priv,
/* loop for all CPU endpoint */
for_each_of_graph_port_endpoint(cpu_port, cpu_ep) {
/* get codec */
- codec_ep = of_graph_get_remote_endpoint(cpu_ep);
- codec_port = ep_to_port(codec_ep);
+ struct device_node *codec_ep __free(device_node) = of_graph_get_remote_endpoint(cpu_ep);
+ struct device_node *codec_port __free(device_node) = ep_to_port(codec_ep);
/* get convert-xxx property */
memset(&adata, 0, sizeof(adata));
@@ -399,9 +382,6 @@ static int __graph_for_each_link(struct simple_util_priv *priv,
ret = func_noml(priv, cpu_ep, codec_ep, li);
}
- of_node_put(codec_ep);
- of_node_put(codec_port);
-
if (ret < 0)
return ret;
diff --git a/sound/soc/generic/audio-graph-card2.c b/sound/soc/generic/audio-graph-card2.c
index 1f5c4e8ff1b9..c36b1a2ac949 100644
--- a/sound/soc/generic/audio-graph-card2.c
+++ b/sound/soc/generic/audio-graph-card2.c
@@ -331,10 +331,9 @@ static int graph_lnk_is_multi(struct device_node *lnk)
return __graph_get_type(lnk) == GRAPH_MULTI;
}
-static struct device_node *graph_get_next_multi_ep(struct device_node **port)
+static struct device_node *graph_get_next_multi_ep(struct device_node **port, int idx)
{
- struct device_node *ports = port_to_ports(*port);
- struct device_node *ep = NULL;
+ struct device_node *ports __free(device_node) = port_to_ports(*port);
struct device_node *rep = NULL;
/*
@@ -352,15 +351,22 @@ static struct device_node *graph_get_next_multi_ep(struct device_node **port)
* port@1 { rep1 };
* };
*/
- *port = of_graph_get_next_port(ports, *port);
+
+ /*
+ * Don't use of_graph_get_next_port() here
+ *
+ * In overlay case, "port" are not necessarily in order. So we need to use
+ * of_graph_get_port_by_id() instead
+ */
+ of_node_put(*port);
+
+ *port = of_graph_get_port_by_id(ports, idx);
if (*port) {
- ep = of_graph_get_next_port_endpoint(*port, NULL);
+ struct device_node *ep __free(device_node) = of_graph_get_next_port_endpoint(*port, NULL);
+
rep = of_graph_get_remote_endpoint(ep);
}
- of_node_put(ep);
- of_node_put(ports);
-
return rep;
}
@@ -373,16 +379,13 @@ static const struct snd_soc_ops graph_ops = {
static void graph_parse_convert(struct device_node *ep,
struct simple_dai_props *props)
{
- struct device_node *port = ep_to_port(ep);
- struct device_node *ports = port_to_ports(port);
+ struct device_node *port __free(device_node) = ep_to_port(ep);
+ struct device_node *ports __free(device_node) = port_to_ports(port);
struct simple_util_data *adata = &props->adata;
simple_util_parse_convert(ports, NULL, adata);
simple_util_parse_convert(port, NULL, adata);
simple_util_parse_convert(ep, NULL, adata);
-
- of_node_put(port);
- of_node_put(ports);
}
static int __graph_parse_node(struct simple_util_priv *priv,
@@ -471,14 +474,11 @@ static int __graph_parse_node(struct simple_util_priv *priv,
if (!is_cpu && gtype == GRAPH_DPCM) {
struct snd_soc_dai_link_component *codecs = snd_soc_link_to_codec(dai_link, idx);
struct snd_soc_codec_conf *cconf = simple_props_to_codec_conf(dai_props, idx);
- struct device_node *rport = ep_to_port(ep);
- struct device_node *rports = port_to_ports(rport);
+ struct device_node *rport __free(device_node) = ep_to_port(ep);
+ struct device_node *rports __free(device_node) = port_to_ports(rport);
snd_soc_of_parse_node_prefix(rports, cconf, codecs->of_node, "prefix");
snd_soc_of_parse_node_prefix(rport, cconf, codecs->of_node, "prefix");
-
- of_node_put(rport);
- of_node_put(rports);
}
if (is_cpu) {
@@ -526,25 +526,21 @@ static int graph_parse_node_multi_nm(struct snd_soc_dai_link *dai_link,
* };
* };
*/
- struct device_node *mcpu_ep = of_graph_get_next_port_endpoint(mcpu_port, NULL);
- struct device_node *mcpu_ports = port_to_ports(mcpu_port);
- struct device_node *mcpu_port_top = of_graph_get_next_port(mcpu_ports, NULL);
- struct device_node *mcpu_ep_top = of_graph_get_next_port_endpoint(mcpu_port_top, NULL);
- struct device_node *mcodec_ep_top = of_graph_get_remote_endpoint(mcpu_ep_top);
- struct device_node *mcodec_port_top = ep_to_port(mcodec_ep_top);
- struct device_node *mcodec_ports = port_to_ports(mcodec_port_top);
+ struct device_node *mcpu_ep __free(device_node) = of_graph_get_next_port_endpoint(mcpu_port, NULL);
+ struct device_node *mcpu_ports __free(device_node) = port_to_ports(mcpu_port);
+ struct device_node *mcpu_port_top __free(device_node) = of_graph_get_next_port(mcpu_ports, NULL);
+ struct device_node *mcpu_ep_top __free(device_node) = of_graph_get_next_port_endpoint(mcpu_port_top, NULL);
+ struct device_node *mcodec_ep_top __free(device_node) = of_graph_get_remote_endpoint(mcpu_ep_top);
+ struct device_node *mcodec_port_top __free(device_node) = ep_to_port(mcodec_ep_top);
+ struct device_node *mcodec_ports __free(device_node) = port_to_ports(mcodec_port_top);
int nm_max = max(dai_link->num_cpus, dai_link->num_codecs);
int ret = 0;
- if (cpu_idx > dai_link->num_cpus) {
- ret = -EINVAL;
- goto mcpu_err;
- }
+ if (cpu_idx > dai_link->num_cpus)
+ return -EINVAL;
for_each_of_graph_port_endpoint(mcpu_port, mcpu_ep_n) {
- struct device_node *mcodec_ep_n;
- struct device_node *mcodec_port;
- int codec_idx;
+ int codec_idx = 0;
/* ignore 1st ep which is for element */
if (mcpu_ep_n == mcpu_ep)
@@ -553,16 +549,13 @@ static int graph_parse_node_multi_nm(struct snd_soc_dai_link *dai_link,
if (*nm_idx > nm_max)
break;
- mcodec_ep_n = of_graph_get_remote_endpoint(mcpu_ep_n);
- mcodec_port = ep_to_port(mcodec_ep_n);
-
- if (mcodec_ports != port_to_ports(mcodec_port)) {
- ret = -EINVAL;
- goto mcpu_err;
- }
+ struct device_node *mcodec_ep_n __free(device_node) = of_graph_get_remote_endpoint(mcpu_ep_n);
+ struct device_node *mcodec_port __free(device_node) = ep_to_port(mcodec_ep_n);
- codec_idx = 0;
ret = -EINVAL;
+ if (mcodec_ports != port_to_ports(mcodec_port))
+ break;
+
for_each_of_graph_port(mcodec_ports, mcodec_port_i) {
/* ignore 1st port which is for pair connection */
@@ -582,18 +575,9 @@ static int graph_parse_node_multi_nm(struct snd_soc_dai_link *dai_link,
}
codec_idx++;
}
- of_node_put(mcodec_port);
- of_node_put(mcodec_ep_n);
if (ret < 0)
break;
}
-mcpu_err:
- of_node_put(mcpu_ep);
- of_node_put(mcpu_port_top);
- of_node_put(mcpu_ep_top);
- of_node_put(mcodec_ep_top);
- of_node_put(mcodec_port_top);
- of_node_put(mcodec_ports);
return ret;
}
@@ -605,7 +589,6 @@ static int graph_parse_node_multi(struct simple_util_priv *priv,
{
struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link);
struct device *dev = simple_priv_to_dev(priv);
- struct device_node *ep;
int ret = -ENOMEM;
int nm_idx = 0;
int nm_max = max(dai_link->num_cpus, dai_link->num_codecs);
@@ -640,12 +623,11 @@ static int graph_parse_node_multi(struct simple_util_priv *priv,
* };
* };
*/
- ep = graph_get_next_multi_ep(&port);
+ struct device_node *ep __free(device_node) = graph_get_next_multi_ep(&port, idx + 1);
if (!ep)
break;
ret = __graph_parse_node(priv, gtype, ep, li, is_cpu, idx);
- of_node_put(ep);
if (ret < 0)
goto multi_err;
@@ -669,12 +651,9 @@ static int graph_parse_node_single(struct simple_util_priv *priv,
struct device_node *port,
struct link_info *li, int is_cpu)
{
- struct device_node *ep = of_graph_get_next_port_endpoint(port, NULL);
- int ret = __graph_parse_node(priv, gtype, ep, li, is_cpu, 0);
+ struct device_node *ep __free(device_node) = of_graph_get_next_port_endpoint(port, NULL);
- of_node_put(ep);
-
- return ret;
+ return __graph_parse_node(priv, gtype, ep, li, is_cpu, 0);
}
static int graph_parse_node(struct simple_util_priv *priv,
@@ -688,8 +667,7 @@ static int graph_parse_node(struct simple_util_priv *priv,
return graph_parse_node_single(priv, gtype, port, li, is_cpu);
}
-static void graph_parse_daifmt(struct device_node *node,
- unsigned int *daifmt, unsigned int *bit_frame)
+static void graph_parse_daifmt(struct device_node *node, unsigned int *daifmt)
{
unsigned int fmt;
@@ -714,16 +692,6 @@ static void graph_parse_daifmt(struct device_node *node,
* };
*/
- /*
- * clock_provider:
- *
- * It can be judged it is provider
- * if (A) or (B) or (C) has bitclock-master / frame-master flag.
- *
- * use "or"
- */
- *bit_frame |= snd_soc_daifmt_parse_clock_provider_as_bitmap(node, NULL);
-
#define update_daifmt(name) \
if (!(*daifmt & SND_SOC_DAIFMT_##name##_MASK) && \
(fmt & SND_SOC_DAIFMT_##name##_MASK)) \
@@ -741,6 +709,17 @@ static void graph_parse_daifmt(struct device_node *node,
update_daifmt(INV);
}
+static unsigned int graph_parse_bitframe(struct device_node *ep)
+{
+ struct device_node *port __free(device_node) = ep_to_port(ep);
+ struct device_node *ports __free(device_node) = port_to_ports(port);
+
+ return snd_soc_daifmt_clock_provider_from_bitmap(
+ snd_soc_daifmt_parse_clock_provider_as_bitmap(ep, NULL) |
+ snd_soc_daifmt_parse_clock_provider_as_bitmap(port, NULL) |
+ snd_soc_daifmt_parse_clock_provider_as_bitmap(ports, NULL));
+}
+
static void graph_link_init(struct simple_util_priv *priv,
struct device_node *lnk,
struct device_node *port_cpu,
@@ -751,41 +730,44 @@ static void graph_link_init(struct simple_util_priv *priv,
struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link);
struct simple_dai_props *dai_props = simple_priv_to_props(priv, li->link);
struct device_node *ep_cpu, *ep_codec;
- struct device_node *ports_cpu, *ports_codec;
- unsigned int daifmt = 0, daiclk = 0;
+ struct device_node *multi_cpu_port = NULL, *multi_codec_port = NULL;
+ struct snd_soc_dai_link_component *dlc;
+ unsigned int daifmt = 0;
bool playback_only = 0, capture_only = 0;
enum snd_soc_trigger_order trigger_start = SND_SOC_TRIGGER_ORDER_DEFAULT;
enum snd_soc_trigger_order trigger_stop = SND_SOC_TRIGGER_ORDER_DEFAULT;
- unsigned int bit_frame = 0;
+ int multi_cpu_port_idx = 1, multi_codec_port_idx = 1;
+ int i;
of_node_get(port_cpu);
if (graph_lnk_is_multi(port_cpu)) {
- ep_cpu = graph_get_next_multi_ep(&port_cpu);
+ multi_cpu_port = port_cpu;
+ ep_cpu = graph_get_next_multi_ep(&multi_cpu_port, multi_cpu_port_idx++);
of_node_put(port_cpu);
port_cpu = ep_to_port(ep_cpu);
} else {
ep_cpu = of_graph_get_next_port_endpoint(port_cpu, NULL);
}
- ports_cpu = port_to_ports(port_cpu);
+ struct device_node *ports_cpu __free(device_node) = port_to_ports(port_cpu);
of_node_get(port_codec);
if (graph_lnk_is_multi(port_codec)) {
- ep_codec = graph_get_next_multi_ep(&port_codec);
+ multi_codec_port = port_codec;
+ ep_codec = graph_get_next_multi_ep(&multi_codec_port, multi_codec_port_idx++);
of_node_put(port_codec);
port_codec = ep_to_port(ep_codec);
} else {
ep_codec = of_graph_get_next_port_endpoint(port_codec, NULL);
}
- ports_codec = port_to_ports(port_codec);
-
+ struct device_node *ports_codec __free(device_node) = port_to_ports(port_codec);
- graph_parse_daifmt(ep_cpu, &daifmt, &bit_frame);
- graph_parse_daifmt(ep_codec, &daifmt, &bit_frame);
- graph_parse_daifmt(port_cpu, &daifmt, &bit_frame);
- graph_parse_daifmt(port_codec, &daifmt, &bit_frame);
- graph_parse_daifmt(ports_cpu, &daifmt, &bit_frame);
- graph_parse_daifmt(ports_codec, &daifmt, &bit_frame);
- graph_parse_daifmt(lnk, &daifmt, &bit_frame);
+ graph_parse_daifmt(ep_cpu, &daifmt);
+ graph_parse_daifmt(ep_codec, &daifmt);
+ graph_parse_daifmt(port_cpu, &daifmt);
+ graph_parse_daifmt(port_codec, &daifmt);
+ graph_parse_daifmt(ports_cpu, &daifmt);
+ graph_parse_daifmt(ports_codec, &daifmt);
+ graph_parse_daifmt(lnk, &daifmt);
graph_util_parse_link_direction(lnk, &playback_only, &capture_only);
graph_util_parse_link_direction(ports_cpu, &playback_only, &capture_only);
@@ -811,14 +793,21 @@ static void graph_link_init(struct simple_util_priv *priv,
graph_util_parse_trigger_order(priv, ep_cpu, &trigger_start, &trigger_stop);
graph_util_parse_trigger_order(priv, ep_codec, &trigger_start, &trigger_stop);
- /*
- * convert bit_frame
- * We need to flip clock_provider if it was CPU node,
- * because it is Codec base.
- */
- daiclk = snd_soc_daifmt_clock_provider_from_bitmap(bit_frame);
- if (is_cpu_node)
- daiclk = snd_soc_daifmt_clock_provider_flipped(daiclk);
+ for_each_link_cpus(dai_link, i, dlc) {
+ dlc->ext_fmt = graph_parse_bitframe(ep_cpu);
+
+ if (multi_cpu_port)
+ ep_cpu = graph_get_next_multi_ep(&multi_cpu_port, multi_cpu_port_idx++);
+ }
+
+ for_each_link_codecs(dai_link, i, dlc) {
+ dlc->ext_fmt = graph_parse_bitframe(ep_codec);
+
+ if (multi_codec_port)
+ ep_codec = graph_get_next_multi_ep(&multi_codec_port, multi_codec_port_idx++);
+ }
+
+ /*** Don't use port_cpu / port_codec after here ***/
dai_link->playback_only = playback_only;
dai_link->capture_only = capture_only;
@@ -826,14 +815,12 @@ static void graph_link_init(struct simple_util_priv *priv,
dai_link->trigger_start = trigger_start;
dai_link->trigger_stop = trigger_stop;
- dai_link->dai_fmt = daifmt | daiclk;
+ dai_link->dai_fmt = daifmt;
dai_link->init = simple_util_dai_init;
dai_link->ops = &graph_ops;
if (priv->ops)
dai_link->ops = priv->ops;
- of_node_put(ports_cpu);
- of_node_put(ports_codec);
of_node_put(port_cpu);
of_node_put(port_codec);
of_node_put(ep_cpu);
@@ -845,8 +832,8 @@ int audio_graph2_link_normal(struct simple_util_priv *priv,
struct link_info *li)
{
struct device_node *cpu_port = lnk;
- struct device_node *cpu_ep = of_graph_get_next_port_endpoint(cpu_port, NULL);
- struct device_node *codec_port = of_graph_get_remote_port(cpu_ep);
+ struct device_node *cpu_ep __free(device_node) = of_graph_get_next_port_endpoint(cpu_port, NULL);
+ struct device_node *codec_port __free(device_node) = of_graph_get_remote_port(cpu_ep);
int ret;
/*
@@ -856,19 +843,16 @@ int audio_graph2_link_normal(struct simple_util_priv *priv,
*/
ret = graph_parse_node(priv, GRAPH_NORMAL, codec_port, li, 0);
if (ret < 0)
- goto err;
+ return ret;
/*
* call CPU, and set DAI Name
*/
ret = graph_parse_node(priv, GRAPH_NORMAL, cpu_port, li, 1);
if (ret < 0)
- goto err;
+ return ret;
graph_link_init(priv, lnk, cpu_port, codec_port, li, 1);
-err:
- of_node_put(codec_port);
- of_node_put(cpu_ep);
return ret;
}
@@ -878,8 +862,8 @@ int audio_graph2_link_dpcm(struct simple_util_priv *priv,
struct device_node *lnk,
struct link_info *li)
{
- struct device_node *ep = of_graph_get_next_port_endpoint(lnk, NULL);
- struct device_node *rep = of_graph_get_remote_endpoint(ep);
+ struct device_node *ep __free(device_node) = of_graph_get_next_port_endpoint(lnk, NULL);
+ struct device_node *rep __free(device_node) = of_graph_get_remote_endpoint(ep);
struct device_node *cpu_port = NULL;
struct device_node *codec_port = NULL;
struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link);
@@ -963,8 +947,6 @@ int audio_graph2_link_dpcm(struct simple_util_priv *priv,
graph_link_init(priv, lnk, cpu_port, codec_port, li, is_cpu);
err:
- of_node_put(ep);
- of_node_put(rep);
of_node_put(cpu_port);
of_node_put(codec_port);
@@ -977,9 +959,9 @@ int audio_graph2_link_c2c(struct simple_util_priv *priv,
struct link_info *li)
{
struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link);
- struct device_node *port0, *port1, *ports;
- struct device_node *codec0_port, *codec1_port;
- struct device_node *ep0, *ep1;
+ struct device_node *port0 = lnk;
+ struct device_node *ports __free(device_node) = port_to_ports(port0);
+ struct device_node *port1 __free(device_node) = of_graph_get_next_port(ports, port0);
u32 val = 0;
int ret = -EINVAL;
@@ -999,10 +981,6 @@ int audio_graph2_link_c2c(struct simple_util_priv *priv,
* };
* };
*/
- of_node_get(lnk);
- port0 = lnk;
- ports = port_to_ports(port0);
- port1 = of_graph_get_next_port(ports, port0);
/*
* Card2 can use original Codec2Codec settings if DT has.
@@ -1019,7 +997,7 @@ int audio_graph2_link_c2c(struct simple_util_priv *priv,
c2c_conf = devm_kzalloc(dev, sizeof(*c2c_conf), GFP_KERNEL);
if (!c2c_conf)
- goto err1;
+ return ret;
c2c_conf->formats = SNDRV_PCM_FMTBIT_S32_LE; /* update ME */
c2c_conf->rates = SNDRV_PCM_RATE_8000_384000;
@@ -1032,11 +1010,11 @@ int audio_graph2_link_c2c(struct simple_util_priv *priv,
dai_link->num_c2c_params = 1;
}
- ep0 = of_graph_get_next_port_endpoint(port0, NULL);
- ep1 = of_graph_get_next_port_endpoint(port1, NULL);
+ struct device_node *ep0 __free(device_node) = of_graph_get_next_port_endpoint(port0, NULL);
+ struct device_node *ep1 __free(device_node) = of_graph_get_next_port_endpoint(port1, NULL);
- codec0_port = of_graph_get_remote_port(ep0);
- codec1_port = of_graph_get_remote_port(ep1);
+ struct device_node *codec0_port __free(device_node) = of_graph_get_remote_port(ep0);
+ struct device_node *codec1_port __free(device_node) = of_graph_get_remote_port(ep1);
/*
* call Codec first.
@@ -1045,25 +1023,16 @@ int audio_graph2_link_c2c(struct simple_util_priv *priv,
*/
ret = graph_parse_node(priv, GRAPH_C2C, codec1_port, li, 0);
if (ret < 0)
- goto err2;
+ return ret;
/*
* call CPU, and set DAI Name
*/
ret = graph_parse_node(priv, GRAPH_C2C, codec0_port, li, 1);
if (ret < 0)
- goto err2;
+ return ret;
graph_link_init(priv, lnk, codec0_port, codec1_port, li, 1);
-err2:
- of_node_put(ep0);
- of_node_put(ep1);
- of_node_put(codec0_port);
- of_node_put(codec1_port);
-err1:
- of_node_put(ports);
- of_node_put(port0);
- of_node_put(port1);
return ret;
}
@@ -1153,8 +1122,8 @@ static int graph_count_normal(struct simple_util_priv *priv,
struct link_info *li)
{
struct device_node *cpu_port = lnk;
- struct device_node *cpu_ep = of_graph_get_next_port_endpoint(cpu_port, NULL);
- struct device_node *codec_port = of_graph_get_remote_port(cpu_ep);
+ struct device_node *cpu_ep __free(device_node) = of_graph_get_next_port_endpoint(cpu_port, NULL);
+ struct device_node *codec_port __free(device_node) = of_graph_get_remote_port(cpu_ep);
/*
* CPU {
@@ -1171,9 +1140,6 @@ static int graph_count_normal(struct simple_util_priv *priv,
li->num[li->link].codecs = graph_counter(codec_port);
- of_node_put(cpu_ep);
- of_node_put(codec_port);
-
return 0;
}
@@ -1181,8 +1147,8 @@ static int graph_count_dpcm(struct simple_util_priv *priv,
struct device_node *lnk,
struct link_info *li)
{
- struct device_node *ep = of_graph_get_next_port_endpoint(lnk, NULL);
- struct device_node *rport = of_graph_get_remote_port(ep);
+ struct device_node *ep __free(device_node) = of_graph_get_next_port_endpoint(lnk, NULL);
+ struct device_node *rport __free(device_node) = of_graph_get_remote_port(ep);
/*
* dpcm {
@@ -1211,9 +1177,6 @@ static int graph_count_dpcm(struct simple_util_priv *priv,
li->num[li->link].codecs = graph_counter(rport); /* BE */
}
- of_node_put(ep);
- of_node_put(rport);
-
return 0;
}
@@ -1221,13 +1184,13 @@ static int graph_count_c2c(struct simple_util_priv *priv,
struct device_node *lnk,
struct link_info *li)
{
- struct device_node *ports = port_to_ports(lnk);
- struct device_node *port0 = lnk;
- struct device_node *port1 = of_graph_get_next_port(ports, of_node_get(port0));
- struct device_node *ep0 = of_graph_get_next_port_endpoint(port0, NULL);
- struct device_node *ep1 = of_graph_get_next_port_endpoint(port1, NULL);
- struct device_node *codec0 = of_graph_get_remote_port(ep0);
- struct device_node *codec1 = of_graph_get_remote_port(ep1);
+ struct device_node *ports __free(device_node) = port_to_ports(lnk);
+ struct device_node *port0 = of_node_get(lnk);
+ struct device_node *port1 = of_node_get(of_graph_get_next_port(ports, of_node_get(port0)));
+ struct device_node *ep0 __free(device_node) = of_graph_get_next_port_endpoint(port0, NULL);
+ struct device_node *ep1 __free(device_node) = of_graph_get_next_port_endpoint(port1, NULL);
+ struct device_node *codec0 __free(device_node) = of_graph_get_remote_port(ep0);
+ struct device_node *codec1 __free(device_node) = of_graph_get_remote_port(ep1);
/*
* codec2codec {
@@ -1247,13 +1210,6 @@ static int graph_count_c2c(struct simple_util_priv *priv,
li->num[li->link].codecs = graph_counter(codec1);
- of_node_put(ports);
- of_node_put(port1);
- of_node_put(ep0);
- of_node_put(ep1);
- of_node_put(codec0);
- of_node_put(codec1);
-
return 0;
}
diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c
index fefa67dd132b..dd414634b4ac 100644
--- a/sound/soc/generic/simple-card-utils.c
+++ b/sound/soc/generic/simple-card-utils.c
@@ -139,10 +139,9 @@ int simple_util_parse_tdm_width_map(struct device *dev, struct device_node *np,
int n, i, ret;
u32 *p;
- if (!of_property_read_bool(np, "dai-tdm-slot-width-map"))
- return 0;
-
n = of_property_count_elems_of_size(np, "dai-tdm-slot-width-map", sizeof(u32));
+ if (n <= 0)
+ return 0;
if (n % 3) {
dev_err(dev, "Invalid number of cells for dai-tdm-slot-width-map\n");
return -EINVAL;
@@ -365,8 +364,7 @@ void simple_util_shutdown(struct snd_pcm_substream *substream)
struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, i);
if (props->mclk_fs && !dai->clk_fixed && !snd_soc_dai_active(cpu_dai))
- snd_soc_dai_set_sysclk(cpu_dai,
- 0, 0, SND_SOC_CLOCK_OUT);
+ snd_soc_dai_set_sysclk(cpu_dai, 0, 0, dai->clk_direction);
simple_clk_disable(dai);
}
@@ -374,8 +372,7 @@ void simple_util_shutdown(struct snd_pcm_substream *substream)
struct snd_soc_dai *codec_dai = snd_soc_rtd_to_codec(rtd, i);
if (props->mclk_fs && !dai->clk_fixed && !snd_soc_dai_active(codec_dai))
- snd_soc_dai_set_sysclk(codec_dai,
- 0, 0, SND_SOC_CLOCK_IN);
+ snd_soc_dai_set_sysclk(codec_dai, 0, 0, dai->clk_direction);
simple_clk_disable(dai);
}
@@ -483,13 +480,15 @@ int simple_util_hw_params(struct snd_pcm_substream *substream,
}
for_each_rtd_codec_dais(rtd, i, sdai) {
- ret = snd_soc_dai_set_sysclk(sdai, 0, mclk, SND_SOC_CLOCK_IN);
+ pdai = simple_props_to_dai_codec(props, i);
+ ret = snd_soc_dai_set_sysclk(sdai, 0, mclk, pdai->clk_direction);
if (ret && ret != -ENOTSUPP)
return ret;
}
for_each_rtd_cpu_dais(rtd, i, sdai) {
- ret = snd_soc_dai_set_sysclk(sdai, 0, mclk, SND_SOC_CLOCK_OUT);
+ pdai = simple_props_to_dai_cpu(props, i);
+ ret = snd_soc_dai_set_sysclk(sdai, 0, mclk, pdai->clk_direction);
if (ret && ret != -ENOTSUPP)
return ret;
}
@@ -713,7 +712,7 @@ int simple_util_parse_routing(struct snd_soc_card *card,
snprintf(prop, sizeof(prop), "%s%s", prefix, "routing");
- if (!of_property_read_bool(node, prop))
+ if (!of_property_present(node, prop))
return 0;
return snd_soc_of_parse_audio_routing(card, prop);
@@ -731,7 +730,7 @@ int simple_util_parse_widgets(struct snd_soc_card *card,
snprintf(prop, sizeof(prop), "%s%s", prefix, "widgets");
- if (of_property_read_bool(node, prop))
+ if (of_property_present(node, prop))
return snd_soc_of_parse_audio_simple_widgets(card, prop);
/* no widgets is not error */
@@ -1005,36 +1004,27 @@ EXPORT_SYMBOL_GPL(graph_util_card_probe);
int graph_util_is_ports0(struct device_node *np)
{
- struct device_node *port, *ports, *ports0, *top;
- int ret;
+ struct device_node *parent __free(device_node) = of_get_parent(np);
+ struct device_node *port;
/* np is "endpoint" or "port" */
- if (of_node_name_eq(np, "endpoint")) {
- port = of_get_parent(np);
- } else {
+ if (of_node_name_eq(np, "endpoint"))
+ port = parent;
+ else
port = np;
- of_node_get(port);
- }
-
- ports = of_get_parent(port);
- top = of_get_parent(ports);
- ports0 = of_get_child_by_name(top, "ports");
-
- ret = ports0 == ports;
- of_node_put(port);
- of_node_put(ports);
- of_node_put(ports0);
- of_node_put(top);
+ struct device_node *ports __free(device_node) = of_get_parent(port);
+ struct device_node *top __free(device_node) = of_get_parent(ports);
+ struct device_node *ports0 __free(device_node) = of_get_child_by_name(top, "ports");
- return ret;
+ return ports0 == ports;
}
EXPORT_SYMBOL_GPL(graph_util_is_ports0);
static int graph_get_dai_id(struct device_node *ep)
{
- struct device_node *node;
- struct device_node *endpoint;
+ struct device_node *node __free(device_node) = of_graph_get_port_parent(ep);
+ struct device_node *port __free(device_node) = of_get_parent(ep);
struct of_endpoint info;
int i, id;
int ret;
@@ -1053,16 +1043,16 @@ static int graph_get_dai_id(struct device_node *ep)
* only of_graph_parse_endpoint().
* We need to check "reg" property
*/
- if (of_property_present(ep, "reg"))
- return info.id;
- node = of_get_parent(ep);
- ret = of_property_present(node, "reg");
- of_node_put(node);
+ /* check port first */
+ ret = of_property_present(port, "reg");
if (ret)
return info.port;
+
+ /* check endpoint 2nd as backup */
+ if (of_property_present(ep, "reg"))
+ return info.id;
}
- node = of_graph_get_port_parent(ep);
/*
* Non HDMI sound case, counting port/endpoint on its DT
@@ -1070,14 +1060,14 @@ static int graph_get_dai_id(struct device_node *ep)
*/
i = 0;
id = -1;
- for_each_endpoint_of_node(node, endpoint) {
- if (endpoint == ep)
+ for_each_of_graph_port(node, p) {
+ if (port == p) {
id = i;
+ break;
+ }
i++;
}
- of_node_put(node);
-
if (id < 0)
return -ENODEV;
@@ -1087,7 +1077,6 @@ static int graph_get_dai_id(struct device_node *ep)
int graph_util_parse_dai(struct device *dev, struct device_node *ep,
struct snd_soc_dai_link_component *dlc, int *is_single_link)
{
- struct device_node *node;
struct of_phandle_args args = {};
struct snd_soc_dai *dai;
int ret;
@@ -1095,7 +1084,7 @@ int graph_util_parse_dai(struct device *dev, struct device_node *ep,
if (!ep)
return 0;
- node = of_graph_get_port_parent(ep);
+ struct device_node *node __free(device_node) = of_graph_get_port_parent(ep);
/*
* Try to find from DAI node
@@ -1136,10 +1125,8 @@ int graph_util_parse_dai(struct device *dev, struct device_node *ep,
* if he unbinded CPU or Codec.
*/
ret = snd_soc_get_dlc(&args, dlc);
- if (ret < 0) {
- of_node_put(node);
+ if (ret < 0)
return ret;
- }
parse_dai_end:
if (is_single_link)
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index 76a1d05e2ebe..afe7e79ffdbd 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -120,14 +120,12 @@ static void simple_parse_convert(struct device *dev,
struct simple_util_data *adata)
{
struct device_node *top = dev->of_node;
- struct device_node *node = of_get_parent(np);
+ struct device_node *node __free(device_node) = of_get_parent(np);
simple_util_parse_convert(top, PREFIX, adata);
simple_util_parse_convert(node, PREFIX, adata);
simple_util_parse_convert(node, NULL, adata);
simple_util_parse_convert(np, NULL, adata);
-
- of_node_put(node);
}
static int simple_parse_node(struct simple_util_priv *priv,
@@ -176,7 +174,7 @@ static int simple_link_init(struct simple_util_priv *priv,
struct device_node *top = dev->of_node;
struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link);
struct simple_dai_props *dai_props = simple_priv_to_props(priv, li->link);
- struct device_node *node = of_get_parent(cpu);
+ struct device_node *node __free(device_node) = of_get_parent(cpu);
enum snd_soc_trigger_order trigger_start = SND_SOC_TRIGGER_ORDER_DEFAULT;
enum snd_soc_trigger_order trigger_stop = SND_SOC_TRIGGER_ORDER_DEFAULT;
bool playback_only = 0, capture_only = 0;
@@ -185,7 +183,7 @@ static int simple_link_init(struct simple_util_priv *priv,
ret = simple_util_parse_daifmt(dev, node, codec,
prefix, &dai_link->dai_fmt);
if (ret < 0)
- goto init_end;
+ return ret;
graph_util_parse_link_direction(top, &playback_only, &capture_only);
graph_util_parse_link_direction(node, &playback_only, &capture_only);
@@ -215,11 +213,7 @@ static int simple_link_init(struct simple_util_priv *priv,
dai_link->init = simple_util_dai_init;
dai_link->ops = &simple_ops;
- ret = simple_util_set_dailink_name(dev, dai_link, name);
-init_end:
- of_node_put(node);
-
- return ret;
+ return simple_util_set_dailink_name(dev, dai_link, name);
}
static int simple_dai_link_of_dpcm(struct simple_util_priv *priv,
@@ -232,7 +226,7 @@ static int simple_dai_link_of_dpcm(struct simple_util_priv *priv,
struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link);
struct simple_dai_props *dai_props = simple_priv_to_props(priv, li->link);
struct device_node *top = dev->of_node;
- struct device_node *node = of_get_parent(np);
+ struct device_node *node __free(device_node) = of_get_parent(np);
char *prefix = "";
char dai_name[64];
int ret;
@@ -296,7 +290,6 @@ static int simple_dai_link_of_dpcm(struct simple_util_priv *priv,
out_put_node:
li->link++;
- of_node_put(node);
return ret;
}
@@ -312,15 +305,13 @@ static int simple_dai_link_of(struct simple_util_priv *priv,
struct snd_soc_dai_link_component *codecs = snd_soc_link_to_codec(dai_link, 0);
struct snd_soc_dai_link_component *platforms = snd_soc_link_to_platform(dai_link, 0);
struct device_node *cpu = NULL;
- struct device_node *node = NULL;
- struct device_node *plat = NULL;
char dai_name[64];
char prop[128];
char *prefix = "";
int ret, single_cpu = 0;
cpu = np;
- node = of_get_parent(np);
+ struct device_node *node __free(device_node) = of_get_parent(np);
dev_dbg(dev, "link_of (%pOF)\n", node);
@@ -329,7 +320,7 @@ static int simple_dai_link_of(struct simple_util_priv *priv,
prefix = PREFIX;
snprintf(prop, sizeof(prop), "%splat", prefix);
- plat = of_get_child_by_name(node, prop);
+ struct device_node *plat __free(device_node) = of_get_child_by_name(node, prop);
ret = simple_parse_node(priv, cpu, li, prefix, &single_cpu);
if (ret < 0)
@@ -352,9 +343,6 @@ static int simple_dai_link_of(struct simple_util_priv *priv,
ret = simple_link_init(priv, cpu, codec, li, prefix, dai_name);
dai_link_of_err:
- of_node_put(plat);
- of_node_put(node);
-
li->link++;
return ret;
@@ -374,7 +362,6 @@ static int __simple_for_each_link(struct simple_util_priv *priv,
struct device *dev = simple_priv_to_dev(priv);
struct device_node *top = dev->of_node;
struct device_node *node;
- struct device_node *add_devs;
uintptr_t dpcm_selectable = (uintptr_t)of_device_get_match_data(dev);
bool is_top = 0;
int ret = 0;
@@ -386,14 +373,11 @@ static int __simple_for_each_link(struct simple_util_priv *priv,
is_top = 1;
}
- add_devs = of_get_child_by_name(top, PREFIX "additional-devs");
+ struct device_node *add_devs __free(device_node) = of_get_child_by_name(top, PREFIX "additional-devs");
/* loop for all dai-link */
do {
struct simple_util_data adata;
- struct device_node *codec;
- struct device_node *plat;
- struct device_node *np;
int num = of_get_child_count(node);
/* Skip additional-devs node */
@@ -403,26 +387,26 @@ static int __simple_for_each_link(struct simple_util_priv *priv,
}
/* get codec */
- codec = of_get_child_by_name(node, is_top ?
- PREFIX "codec" : "codec");
+ struct device_node *codec __free(device_node) =
+ of_get_child_by_name(node, is_top ? PREFIX "codec" : "codec");
if (!codec) {
ret = -ENODEV;
goto error;
}
/* get platform */
- plat = of_get_child_by_name(node, is_top ?
- PREFIX "plat" : "plat");
+ struct device_node *plat __free(device_node) =
+ of_get_child_by_name(node, is_top ? PREFIX "plat" : "plat");
/* get convert-xxx property */
memset(&adata, 0, sizeof(adata));
- for_each_child_of_node(node, np) {
+ for_each_child_of_node_scoped(node, np) {
if (np == add_devs)
continue;
simple_parse_convert(dev, np, &adata);
}
/* loop for all CPU/Codec node */
- for_each_child_of_node(node, np) {
+ for_each_child_of_node_scoped(node, np) {
if (plat == np || add_devs == np)
continue;
/*
@@ -452,22 +436,16 @@ static int __simple_for_each_link(struct simple_util_priv *priv,
ret = func_noml(priv, np, codec, li, is_top);
}
- if (ret < 0) {
- of_node_put(codec);
- of_node_put(plat);
- of_node_put(np);
+ if (ret < 0)
goto error;
- }
}
- of_node_put(codec);
- of_node_put(plat);
node = of_get_next_child(top, node);
} while (!is_top && node);
error:
- of_node_put(add_devs);
of_node_put(node);
+
return ret;
}
@@ -514,15 +492,13 @@ static void simple_depopulate_aux(void *data)
static int simple_populate_aux(struct simple_util_priv *priv)
{
struct device *dev = simple_priv_to_dev(priv);
- struct device_node *node;
+ struct device_node *node __free(device_node) = of_get_child_by_name(dev->of_node, PREFIX "additional-devs");
int ret;
- node = of_get_child_by_name(dev->of_node, PREFIX "additional-devs");
if (!node)
return 0;
ret = of_platform_populate(node, NULL, NULL, dev);
- of_node_put(node);
if (ret)
return ret;
diff --git a/sound/soc/intel/avs/apl.c b/sound/soc/intel/avs/apl.c
index a48d74daf48c..3dccf0a57a3a 100644
--- a/sound/soc/intel/avs/apl.c
+++ b/sound/soc/intel/avs/apl.c
@@ -126,7 +126,7 @@ int avs_apl_coredump(struct avs_dev *adev, union avs_notify_msg *msg)
struct avs_apl_log_buffer_layout layout;
void __iomem *addr, *buf;
size_t dump_size;
- u16 offset = 0;
+ u32 offset = 0;
u8 *dump, *pos;
dump_size = AVS_FW_REGS_SIZE + msg->ext.coredump.stack_dump_size;
diff --git a/sound/soc/intel/avs/core.c b/sound/soc/intel/avs/core.c
index 73d4bde9b2f7..0e750e9e01d9 100644
--- a/sound/soc/intel/avs/core.c
+++ b/sound/soc/intel/avs/core.c
@@ -829,10 +829,10 @@ static const struct avs_spec jsl_desc = {
.hipc = &cnl_hipc_spec,
};
-#define AVS_TGL_BASED_SPEC(sname) \
+#define AVS_TGL_BASED_SPEC(sname, min) \
static const struct avs_spec sname##_desc = { \
.name = #sname, \
- .min_fw_version = { 10, 29, 0, 5646 }, \
+ .min_fw_version = { 10, min, 0, 5646 }, \
.dsp_ops = &avs_tgl_dsp_ops, \
.core_init_mask = 1, \
.attributes = AVS_PLATATTR_IMR, \
@@ -840,11 +840,11 @@ static const struct avs_spec sname##_desc = { \
.hipc = &cnl_hipc_spec, \
}
-AVS_TGL_BASED_SPEC(lkf);
-AVS_TGL_BASED_SPEC(tgl);
-AVS_TGL_BASED_SPEC(ehl);
-AVS_TGL_BASED_SPEC(adl);
-AVS_TGL_BASED_SPEC(adl_n);
+AVS_TGL_BASED_SPEC(lkf, 28);
+AVS_TGL_BASED_SPEC(tgl, 29);
+AVS_TGL_BASED_SPEC(ehl, 30);
+AVS_TGL_BASED_SPEC(adl, 35);
+AVS_TGL_BASED_SPEC(adl_n, 35);
static const struct pci_device_id avs_ids[] = {
{ PCI_DEVICE_DATA(INTEL, HDA_SKL_LP, &skl_desc) },
@@ -902,3 +902,13 @@ MODULE_AUTHOR("Cezary Rojewski <cezary.rojewski@intel.com>");
MODULE_AUTHOR("Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>");
MODULE_DESCRIPTION("Intel cAVS sound driver");
MODULE_LICENSE("GPL");
+MODULE_FIRMWARE("intel/skl/dsp_basefw.bin");
+MODULE_FIRMWARE("intel/apl/dsp_basefw.bin");
+MODULE_FIRMWARE("intel/cnl/dsp_basefw.bin");
+MODULE_FIRMWARE("intel/icl/dsp_basefw.bin");
+MODULE_FIRMWARE("intel/jsl/dsp_basefw.bin");
+MODULE_FIRMWARE("intel/lkf/dsp_basefw.bin");
+MODULE_FIRMWARE("intel/tgl/dsp_basefw.bin");
+MODULE_FIRMWARE("intel/ehl/dsp_basefw.bin");
+MODULE_FIRMWARE("intel/adl/dsp_basefw.bin");
+MODULE_FIRMWARE("intel/adl_n/dsp_basefw.bin");
diff --git a/sound/soc/intel/avs/debugfs.c b/sound/soc/intel/avs/debugfs.c
index 1767ded4d983..8c4edda97f75 100644
--- a/sound/soc/intel/avs/debugfs.c
+++ b/sound/soc/intel/avs/debugfs.c
@@ -10,6 +10,7 @@
#include <linux/kfifo.h>
#include <linux/wait.h>
#include <linux/sched/signal.h>
+#include <linux/string_helpers.h>
#include <sound/soc.h>
#include "avs.h"
#include "messages.h"
diff --git a/sound/soc/intel/avs/ipc.c b/sound/soc/intel/avs/ipc.c
index 4fba46e77c47..08ed9d96738a 100644
--- a/sound/soc/intel/avs/ipc.c
+++ b/sound/soc/intel/avs/ipc.c
@@ -184,10 +184,11 @@ static void avs_dsp_receive_rx(struct avs_dev *adev, u64 header)
{
struct avs_ipc *ipc = adev->ipc;
union avs_reply_msg msg = AVS_MSG(header);
- u64 reg;
+ u32 sts, lec;
- reg = readq(avs_sram_addr(adev, AVS_FW_REGS_WINDOW));
- trace_avs_ipc_reply_msg(header, reg);
+ sts = snd_hdac_adsp_readl(adev, AVS_FW_REG_STATUS(adev));
+ lec = snd_hdac_adsp_readl(adev, AVS_FW_REG_ERROR(adev));
+ trace_avs_ipc_reply_msg(header, sts, lec);
ipc->rx.header = header;
/* Abort copying payload if request processing was unsuccessful. */
@@ -209,10 +210,11 @@ static void avs_dsp_process_notification(struct avs_dev *adev, u64 header)
union avs_notify_msg msg = AVS_MSG(header);
size_t data_size = 0;
void *data = NULL;
- u64 reg;
+ u32 sts, lec;
- reg = readq(avs_sram_addr(adev, AVS_FW_REGS_WINDOW));
- trace_avs_ipc_notify_msg(header, reg);
+ sts = snd_hdac_adsp_readl(adev, AVS_FW_REG_STATUS(adev));
+ lec = snd_hdac_adsp_readl(adev, AVS_FW_REG_ERROR(adev));
+ trace_avs_ipc_notify_msg(header, sts, lec);
/* Ignore spurious notifications until handshake is established. */
if (!adev->ipc->ready && msg.notify_msg_type != AVS_NOTIFY_FW_READY) {
@@ -367,13 +369,16 @@ static void avs_ipc_msg_init(struct avs_ipc *ipc, struct avs_ipc_msg *reply)
static void avs_dsp_send_tx(struct avs_dev *adev, struct avs_ipc_msg *tx, bool read_fwregs)
{
const struct avs_spec *const spec = adev->spec;
- u64 reg = ULONG_MAX;
+ u32 sts = UINT_MAX;
+ u32 lec = UINT_MAX;
tx->header |= spec->hipc->req_busy_mask;
- if (read_fwregs)
- reg = readq(avs_sram_addr(adev, AVS_FW_REGS_WINDOW));
+ if (read_fwregs) {
+ sts = snd_hdac_adsp_readl(adev, AVS_FW_REG_STATUS(adev));
+ lec = snd_hdac_adsp_readl(adev, AVS_FW_REG_ERROR(adev));
+ }
- trace_avs_request(tx, reg);
+ trace_avs_request(tx, sts, lec);
if (tx->size)
memcpy_toio(avs_downlink_addr(adev), tx->data, tx->size);
diff --git a/sound/soc/intel/avs/loader.c b/sound/soc/intel/avs/loader.c
index 890efd2f1fea..9ff7818395cd 100644
--- a/sound/soc/intel/avs/loader.c
+++ b/sound/soc/intel/avs/loader.c
@@ -167,7 +167,8 @@ int avs_cldma_load_basefw(struct avs_dev *adev, struct firmware *fw)
(reg & AVS_ROM_INIT_DONE) == AVS_ROM_INIT_DONE,
AVS_ROM_INIT_POLLING_US, SKL_ROM_INIT_TIMEOUT_US);
if (ret < 0) {
- dev_err(adev->dev, "rom init timeout: %d\n", ret);
+ dev_err(adev->dev, "rom init failed: %d, status: 0x%08x, lec: 0x%08x\n",
+ ret, reg, snd_hdac_adsp_readl(adev, AVS_FW_REG_ERROR(adev)));
avs_dsp_core_disable(adev, AVS_MAIN_CORE_MASK);
return ret;
}
@@ -180,7 +181,8 @@ int avs_cldma_load_basefw(struct avs_dev *adev, struct firmware *fw)
AVS_FW_INIT_POLLING_US, AVS_FW_INIT_TIMEOUT_US);
hda_cldma_stop(cl);
if (ret < 0) {
- dev_err(adev->dev, "transfer fw failed: %d\n", ret);
+ dev_err(adev->dev, "transfer fw failed: %d, status: 0x%08x, lec: 0x%08x\n",
+ ret, reg, snd_hdac_adsp_readl(adev, AVS_FW_REG_ERROR(adev)));
avs_dsp_core_disable(adev, AVS_MAIN_CORE_MASK);
return ret;
}
@@ -308,12 +310,13 @@ avs_hda_init_rom(struct avs_dev *adev, unsigned int dma_id, bool purge)
}
/* await ROM init */
- ret = snd_hdac_adsp_readq_poll(adev, spec->sram->rom_status_offset, reg,
+ ret = snd_hdac_adsp_readl_poll(adev, spec->sram->rom_status_offset, reg,
(reg & 0xF) == AVS_ROM_INIT_DONE ||
(reg & 0xF) == APL_ROM_FW_ENTERED,
AVS_ROM_INIT_POLLING_US, APL_ROM_INIT_TIMEOUT_US);
if (ret < 0) {
- dev_err(adev->dev, "rom init timeout: %d\n", ret);
+ dev_err(adev->dev, "rom init failed: %d, status: 0x%08x, lec: 0x%08x\n",
+ ret, reg, snd_hdac_adsp_readl(adev, AVS_FW_REG_ERROR(adev)));
goto err;
}
@@ -337,15 +340,15 @@ static int avs_imr_load_basefw(struct avs_dev *adev)
/* DMA id ignored when flashing from IMR as no transfer occurs. */
ret = avs_hda_init_rom(adev, 0, false);
- if (ret < 0) {
- dev_err(adev->dev, "rom init failed: %d\n", ret);
+ if (ret < 0)
return ret;
- }
ret = wait_for_completion_timeout(&adev->fw_ready,
msecs_to_jiffies(AVS_FW_INIT_TIMEOUT_MS));
if (!ret) {
- dev_err(adev->dev, "firmware ready timeout\n");
+ dev_err(adev->dev, "firmware ready timeout, status: 0x%08x, lec: 0x%08x\n",
+ snd_hdac_adsp_readl(adev, AVS_FW_REG_STATUS(adev)),
+ snd_hdac_adsp_readl(adev, AVS_FW_REG_ERROR(adev)));
avs_dsp_core_disable(adev, AVS_MAIN_CORE_MASK);
return -ETIMEDOUT;
}
@@ -392,7 +395,7 @@ int avs_hda_load_basefw(struct avs_dev *adev, struct firmware *fw)
ret = avs_hda_init_rom(adev, dma_id, true);
if (!ret)
break;
- dev_info(adev->dev, "#%d rom init fail: %d\n", i + 1, ret);
+ dev_info(adev->dev, "#%d rom init failed: %d\n", i + 1, ret);
}
if (ret < 0)
goto cleanup_resources;
@@ -404,7 +407,8 @@ int avs_hda_load_basefw(struct avs_dev *adev, struct firmware *fw)
AVS_FW_INIT_POLLING_US, AVS_FW_INIT_TIMEOUT_US);
snd_hdac_dsp_trigger(hstream, false);
if (ret < 0) {
- dev_err(adev->dev, "transfer fw failed: %d\n", ret);
+ dev_err(adev->dev, "transfer fw failed: %d, status: 0x%08x, lec: 0x%08x\n",
+ ret, reg, snd_hdac_adsp_readl(adev, AVS_FW_REG_ERROR(adev)));
avs_dsp_core_disable(adev, AVS_MAIN_CORE_MASK);
}
@@ -584,7 +588,9 @@ static int avs_dsp_load_basefw(struct avs_dev *adev)
ret = wait_for_completion_timeout(&adev->fw_ready,
msecs_to_jiffies(AVS_FW_INIT_TIMEOUT_MS));
if (!ret) {
- dev_err(adev->dev, "firmware ready timeout\n");
+ dev_err(adev->dev, "firmware ready timeout, status: 0x%08x, lec: 0x%08x\n",
+ snd_hdac_adsp_readl(adev, AVS_FW_REG_STATUS(adev)),
+ snd_hdac_adsp_readl(adev, AVS_FW_REG_ERROR(adev)));
avs_dsp_core_disable(adev, AVS_MAIN_CORE_MASK);
ret = -ETIMEDOUT;
goto release_fw;
@@ -675,16 +681,12 @@ int avs_dsp_first_boot_firmware(struct avs_dev *adev)
}
ret = avs_ipc_get_hw_config(adev, &adev->hw_cfg);
- if (ret) {
- dev_err(adev->dev, "get hw cfg failed: %d\n", ret);
+ if (ret)
return AVS_IPC_RET(ret);
- }
ret = avs_ipc_get_fw_config(adev, &adev->fw_cfg);
- if (ret) {
- dev_err(adev->dev, "get fw cfg failed: %d\n", ret);
+ if (ret)
return AVS_IPC_RET(ret);
- }
adev->core_refs = devm_kcalloc(adev->dev, adev->hw_cfg.dsp_cores,
sizeof(*adev->core_refs), GFP_KERNEL);
diff --git a/sound/soc/intel/avs/messages.c b/sound/soc/intel/avs/messages.c
index ec458bd51b10..30b666f8909b 100644
--- a/sound/soc/intel/avs/messages.c
+++ b/sound/soc/intel/avs/messages.c
@@ -400,10 +400,12 @@ int avs_ipc_get_fw_config(struct avs_dev *adev, struct avs_fw_cfg *cfg)
AVS_BASEFW_FIRMWARE_CONFIG, NULL, 0,
&payload, &payload_size);
if (ret)
- return ret;
+ goto err;
/* Non-zero payload expected for FIRMWARE_CONFIG. */
- if (!payload_size)
- return -EREMOTEIO;
+ if (!payload_size) {
+ ret = -EREMOTEIO;
+ goto err;
+ }
while (offset < payload_size) {
tlv = (struct avs_tlv *)(payload + offset);
@@ -502,6 +504,9 @@ int avs_ipc_get_fw_config(struct avs_dev *adev, struct avs_fw_cfg *cfg)
/* No longer needed, free it as it's owned by the get_large_config() caller. */
kfree(payload);
+err:
+ if (ret)
+ dev_err(adev->dev, "get fw cfg failed: %d\n", ret);
return ret;
}
@@ -517,10 +522,12 @@ int avs_ipc_get_hw_config(struct avs_dev *adev, struct avs_hw_cfg *cfg)
AVS_BASEFW_HARDWARE_CONFIG, NULL, 0,
&payload, &payload_size);
if (ret)
- return ret;
+ goto err;
/* Non-zero payload expected for HARDWARE_CONFIG. */
- if (!payload_size)
- return -EREMOTEIO;
+ if (!payload_size) {
+ ret = -EREMOTEIO;
+ goto err;
+ }
while (offset < payload_size) {
tlv = (struct avs_tlv *)(payload + offset);
@@ -590,6 +597,9 @@ int avs_ipc_get_hw_config(struct avs_dev *adev, struct avs_hw_cfg *cfg)
exit:
/* No longer needed, free it as it's owned by the get_large_config() caller. */
kfree(payload);
+err:
+ if (ret)
+ dev_err(adev->dev, "get hw cfg failed: %d\n", ret);
return ret;
}
diff --git a/sound/soc/intel/avs/messages.h b/sound/soc/intel/avs/messages.h
index d0bdb7d9266c..0378633c7f96 100644
--- a/sound/soc/intel/avs/messages.h
+++ b/sound/soc/intel/avs/messages.h
@@ -859,8 +859,7 @@ static_assert(sizeof(struct avs_aec_cfg) == 92);
struct avs_asrc_cfg {
struct avs_modcfg_base base;
u32 out_freq;
- u32 rsvd0:1;
- u32 mode:1;
+ u32 mode:2;
u32 rsvd2:2;
u32 disable_jitter_buffer:1;
u32 rsvd3:27;
diff --git a/sound/soc/intel/avs/pcm.c b/sound/soc/intel/avs/pcm.c
index 945f9c0a6a54..4bfbcb5a5ae8 100644
--- a/sound/soc/intel/avs/pcm.c
+++ b/sound/soc/intel/avs/pcm.c
@@ -161,6 +161,7 @@ static int avs_dai_be_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dpcm *dpcm;
be = snd_soc_substream_to_rtd(substream);
+ /* dpcm_fe_dai_open() guarantees the list is not empty at this point. */
for_each_dpcm_fe(be, substream->stream, dpcm) {
fe = dpcm->fe;
fe_hw_params = &fe->dpcm[substream->stream].hw_params;
@@ -576,6 +577,7 @@ static int avs_dai_fe_hw_params(struct snd_pcm_substream *substream,
hdac_stream(host_stream)->format_val = 0;
fe = snd_soc_substream_to_rtd(substream);
+ /* dpcm_fe_dai_open() guarantees the list is not empty at this point. */
for_each_dpcm_be(fe, substream->stream, dpcm) {
be = dpcm->be;
be_hw_params = &be->dpcm[substream->stream].hw_params;
@@ -1564,6 +1566,7 @@ static int avs_component_hda_probe(struct snd_soc_component *component)
if (ret < 0) {
dev_err(component->dev, "create widgets failed: %d\n",
ret);
+ snd_soc_unregister_dai(dai);
goto exit;
}
}
@@ -1578,8 +1581,8 @@ exit:
static void avs_component_hda_remove(struct snd_soc_component *component)
{
- avs_component_hda_unregister_dais(component);
avs_component_remove(component);
+ avs_component_hda_unregister_dais(component);
}
static int avs_component_hda_open(struct snd_soc_component *component,
diff --git a/sound/soc/intel/avs/registers.h b/sound/soc/intel/avs/registers.h
index 5b6d60eb3c18..368ede05f2cd 100644
--- a/sound/soc/intel/avs/registers.h
+++ b/sound/soc/intel/avs/registers.h
@@ -76,7 +76,7 @@
/* Constants used when accessing SRAM, space shared with firmware */
#define AVS_FW_REG_BASE(adev) ((adev)->spec->sram->base_offset)
#define AVS_FW_REG_STATUS(adev) (AVS_FW_REG_BASE(adev) + 0x0)
-#define AVS_FW_REG_ERROR_CODE(adev) (AVS_FW_REG_BASE(adev) + 0x4)
+#define AVS_FW_REG_ERROR(adev) (AVS_FW_REG_BASE(adev) + 0x4)
#define AVS_WINDOW_CHUNK_SIZE SZ_4K
#define AVS_FW_REGS_SIZE AVS_WINDOW_CHUNK_SIZE
diff --git a/sound/soc/intel/avs/topology.c b/sound/soc/intel/avs/topology.c
index 5cda527020c7..d612f20ed989 100644
--- a/sound/soc/intel/avs/topology.c
+++ b/sound/soc/intel/avs/topology.c
@@ -1466,7 +1466,7 @@ avs_tplg_path_template_create(struct snd_soc_component *comp, struct avs_tplg *o
static const struct avs_tplg_token_parser mod_init_config_parsers[] = {
{
- .token = AVS_TKN_MOD_INIT_CONFIG_ID_U32,
+ .token = AVS_TKN_INIT_CONFIG_ID_U32,
.type = SND_SOC_TPLG_TUPLE_TYPE_WORD,
.offset = offsetof(struct avs_tplg_init_config, id),
.parse = avs_parse_word_token,
@@ -1519,7 +1519,7 @@ static int avs_tplg_parse_initial_configs(struct snd_soc_component *comp,
esize = le32_to_cpu(tuples->size) + le32_to_cpu(tmp->size);
ret = parse_dictionary_entries(comp, tuples, esize, config, 1, sizeof(*config),
- AVS_TKN_MOD_INIT_CONFIG_ID_U32,
+ AVS_TKN_INIT_CONFIG_ID_U32,
mod_init_config_parsers,
ARRAY_SIZE(mod_init_config_parsers));
diff --git a/sound/soc/intel/avs/trace.h b/sound/soc/intel/avs/trace.h
index c9eaa5a60ed3..f4288d0ad5ef 100644
--- a/sound/soc/intel/avs/trace.h
+++ b/sound/soc/intel/avs/trace.h
@@ -37,60 +37,62 @@ TRACE_EVENT(avs_dsp_core_op,
void trace_avs_msg_payload(const void *data, size_t size);
-#define trace_avs_request(msg, fwregs) \
+#define trace_avs_request(msg, sts, lec) \
({ \
- trace_avs_ipc_request_msg((msg)->header, fwregs); \
+ trace_avs_ipc_request_msg((msg)->header, sts, lec); \
trace_avs_msg_payload((msg)->data, (msg)->size); \
})
-#define trace_avs_reply(msg, fwregs) \
+#define trace_avs_reply(msg, sts, lec) \
({ \
- trace_avs_ipc_reply_msg((msg)->header, fwregs); \
+ trace_avs_ipc_reply_msg((msg)->header, sts, lec); \
trace_avs_msg_payload((msg)->data, (msg)->size); \
})
-#define trace_avs_notify(msg, fwregs) \
+#define trace_avs_notify(msg, sts, lec) \
({ \
- trace_avs_ipc_notify_msg((msg)->header, fwregs); \
+ trace_avs_ipc_notify_msg((msg)->header, sts, lec); \
trace_avs_msg_payload((msg)->data, (msg)->size); \
})
#endif
DECLARE_EVENT_CLASS(avs_ipc_msg_hdr,
- TP_PROTO(u64 header, u64 fwregs),
+ TP_PROTO(u64 header, u32 sts, u32 lec),
- TP_ARGS(header, fwregs),
+ TP_ARGS(header, sts, lec),
TP_STRUCT__entry(
__field(u64, header)
- __field(u64, fwregs)
+ __field(u32, sts)
+ __field(u32, lec)
),
TP_fast_assign(
__entry->header = header;
- __entry->fwregs = fwregs;
+ __entry->sts = sts;
+ __entry->lec = lec;
),
TP_printk("primary: 0x%08X, extension: 0x%08X,\n"
- "fwstatus: 0x%08X, fwerror: 0x%08X",
+ "status: 0x%08X, error: 0x%08X",
lower_32_bits(__entry->header), upper_32_bits(__entry->header),
- lower_32_bits(__entry->fwregs), upper_32_bits(__entry->fwregs))
+ __entry->sts, __entry->lec)
);
DEFINE_EVENT(avs_ipc_msg_hdr, avs_ipc_request_msg,
- TP_PROTO(u64 header, u64 fwregs),
- TP_ARGS(header, fwregs)
+ TP_PROTO(u64 header, u32 sts, u32 lec),
+ TP_ARGS(header, sts, lec)
);
DEFINE_EVENT(avs_ipc_msg_hdr, avs_ipc_reply_msg,
- TP_PROTO(u64 header, u64 fwregs),
- TP_ARGS(header, fwregs)
+ TP_PROTO(u64 header, u32 sts, u32 lec),
+ TP_ARGS(header, sts, lec)
);
DEFINE_EVENT(avs_ipc_msg_hdr, avs_ipc_notify_msg,
- TP_PROTO(u64 header, u64 fwregs),
- TP_ARGS(header, fwregs)
+ TP_PROTO(u64 header, u32 sts, u32 lec),
+ TP_ARGS(header, sts, lec)
);
TRACE_EVENT_CONDITION(avs_ipc_msg_payload,
diff --git a/sound/soc/intel/boards/skl_hda_dsp_generic.c b/sound/soc/intel/boards/skl_hda_dsp_generic.c
index 22668bac74a1..0554c7e2cb34 100644
--- a/sound/soc/intel/boards/skl_hda_dsp_generic.c
+++ b/sound/soc/intel/boards/skl_hda_dsp_generic.c
@@ -124,8 +124,6 @@ static int skl_hda_audio_probe(struct platform_device *pdev)
return ret;
card->dev = &pdev->dev;
- if (!snd_soc_acpi_sof_parent(&pdev->dev))
- card->disable_route_checks = true;
if (mach->mach_params.dmic_num > 0) {
card->components = devm_kasprintf(card->dev, GFP_KERNEL,
diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
index dc9b9f7c3a7d..b0d35fda7b17 100644
--- a/sound/soc/intel/boards/sof_sdw.c
+++ b/sound/soc/intel/boards/sof_sdw.c
@@ -22,6 +22,8 @@ static int quirk_override = -1;
module_param_named(quirk, quirk_override, int, 0444);
MODULE_PARM_DESC(quirk, "Board-specific quirk override");
+#define DMIC_DEFAULT_CHANNELS 2
+
static void log_quirks(struct device *dev)
{
if (SOC_SDW_JACK_JDSRC(sof_sdw_quirk))
@@ -42,6 +44,8 @@ static void log_quirks(struct device *dev)
dev_dbg(dev, "quirk SOC_SDW_CODEC_SPKR enabled\n");
if (sof_sdw_quirk & SOC_SDW_SIDECAR_AMPS)
dev_dbg(dev, "quirk SOC_SDW_SIDECAR_AMPS enabled\n");
+ if (sof_sdw_quirk & SOC_SDW_CODEC_MIC)
+ dev_dbg(dev, "quirk SOC_SDW_CODEC_MIC enabled\n");
}
static int sof_sdw_quirk_cb(const struct dmi_system_id *id)
@@ -639,9 +643,10 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
.callback = sof_sdw_quirk_cb,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "380E")
+ DMI_MATCH(DMI_PRODUCT_NAME, "83HM")
},
- .driver_data = (void *)(SOC_SDW_SIDECAR_AMPS),
+ .driver_data = (void *)(SOC_SDW_SIDECAR_AMPS |
+ SOC_SDW_CODEC_MIC),
},
{
.callback = sof_sdw_quirk_cb,
@@ -1142,22 +1147,24 @@ static int sof_card_dai_links_create(struct snd_soc_card *card)
hdmi_num = SOF_PRE_TGL_HDMI_COUNT;
/* enable dmic01 & dmic16k */
- if (sof_sdw_quirk & SOC_SDW_PCH_DMIC || mach_params->dmic_num) {
- if (ctx->ignore_internal_dmic)
- dev_warn(dev, "Ignoring PCH DMIC\n");
- else
- dmic_num = 2;
+ if (ctx->ignore_internal_dmic) {
+ dev_dbg(dev, "SoundWire DMIC is used, ignoring internal DMIC\n");
+ mach_params->dmic_num = 0;
+ } else if (mach_params->dmic_num) {
+ dmic_num = 2;
+ } else if (sof_sdw_quirk & SOC_SDW_PCH_DMIC) {
+ dmic_num = 2;
+ /*
+ * mach_params->dmic_num will be used to set the cfg-mics value of
+ * card->components string. Set it to the default value.
+ */
+ mach_params->dmic_num = DMIC_DEFAULT_CHANNELS;
}
- /*
- * mach_params->dmic_num will be used to set the cfg-mics value of card->components
- * string. Overwrite it to the actual number of PCH DMICs used in the device.
- */
- mach_params->dmic_num = dmic_num;
if (sof_sdw_quirk & SOF_SSP_BT_OFFLOAD_PRESENT)
bt_num = 1;
- dev_dbg(dev, "sdw %d, ssp %d, dmic %d, hdmi %d, bt: %d\n",
+ dev_dbg(dev, "DAI link numbers: sdw %d, ssp %d, dmic %d, hdmi %d, bt: %d\n",
sdw_be_num, ssp_num, dmic_num,
intel_ctx->hdmi.idisp_codec ? hdmi_num : 0, bt_num);
diff --git a/sound/soc/intel/common/soc-acpi-intel-arl-match.c b/sound/soc/intel/common/soc-acpi-intel-arl-match.c
index 24d850df77ca..32147dc9d2d6 100644
--- a/sound/soc/intel/common/soc-acpi-intel-arl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-arl-match.c
@@ -138,7 +138,7 @@ static const struct snd_soc_acpi_adr_device cs35l56_2_r1_adr[] = {
},
};
-static const struct snd_soc_acpi_adr_device cs35l56_3_l1_adr[] = {
+static const struct snd_soc_acpi_adr_device cs35l56_3_l3_adr[] = {
{
.adr = 0x00033301fa355601ull,
.num_endpoints = 1,
@@ -147,6 +147,24 @@ static const struct snd_soc_acpi_adr_device cs35l56_3_l1_adr[] = {
},
};
+static const struct snd_soc_acpi_adr_device cs35l56_2_r3_adr[] = {
+ {
+ .adr = 0x00023301fa355601ull,
+ .num_endpoints = 1,
+ .endpoints = &spk_r_endpoint,
+ .name_prefix = "AMP2"
+ },
+};
+
+static const struct snd_soc_acpi_adr_device cs35l56_3_l1_adr[] = {
+ {
+ .adr = 0x00033101fa355601ull,
+ .num_endpoints = 1,
+ .endpoints = &spk_l_endpoint,
+ .name_prefix = "AMP1"
+ },
+};
+
static const struct snd_soc_acpi_endpoint cs42l43_endpoints[] = {
{ /* Jack Playback Endpoint */
.num = 0,
@@ -306,6 +324,25 @@ static const struct snd_soc_acpi_link_adr arl_cs42l43_l0_cs35l56_2_l23[] = {
},
{
.mask = BIT(3),
+ .num_adr = ARRAY_SIZE(cs35l56_3_l3_adr),
+ .adr_d = cs35l56_3_l3_adr,
+ },
+ {}
+};
+
+static const struct snd_soc_acpi_link_adr arl_cs42l43_l0_cs35l56_3_l23[] = {
+ {
+ .mask = BIT(0),
+ .num_adr = ARRAY_SIZE(cs42l43_0_adr),
+ .adr_d = cs42l43_0_adr,
+ },
+ {
+ .mask = BIT(2),
+ .num_adr = ARRAY_SIZE(cs35l56_2_r3_adr),
+ .adr_d = cs35l56_2_r3_adr,
+ },
+ {
+ .mask = BIT(3),
.num_adr = ARRAY_SIZE(cs35l56_3_l1_adr),
.adr_d = cs35l56_3_l1_adr,
},
@@ -407,6 +444,12 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_arl_sdw_machines[] = {
.sof_tplg_filename = "sof-arl-cs42l43-l0-cs35l56-l23.tplg",
},
{
+ .link_mask = BIT(0) | BIT(2) | BIT(3),
+ .links = arl_cs42l43_l0_cs35l56_3_l23,
+ .drv_name = "sof_sdw",
+ .sof_tplg_filename = "sof-arl-cs42l43-l0-cs35l56-l23.tplg",
+ },
+ {
.link_mask = BIT(0) | BIT(2),
.links = arl_cs42l43_l0_cs35l56_l2,
.drv_name = "sof_sdw",
diff --git a/sound/soc/intel/common/soc-acpi-intel-lnl-match.c b/sound/soc/intel/common/soc-acpi-intel-lnl-match.c
index 98a9c36d7a4c..0b4a9c27c47e 100644
--- a/sound/soc/intel/common/soc-acpi-intel-lnl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-lnl-match.c
@@ -91,6 +91,23 @@ static const struct snd_soc_acpi_endpoint rt722_endpoints[] = {
},
};
+static const struct snd_soc_acpi_endpoint jack_dmic_endpoints[] = {
+ /* Jack Endpoint */
+ {
+ .num = 0,
+ .aggregated = 0,
+ .group_position = 0,
+ .group_id = 0,
+ },
+ /* DMIC Endpoint */
+ {
+ .num = 1,
+ .aggregated = 0,
+ .group_position = 0,
+ .group_id = 0,
+ },
+};
+
static const struct snd_soc_acpi_endpoint jack_amp_g1_dmic_endpoints_endpoints[] = {
/* Jack Endpoint */
{
@@ -295,6 +312,24 @@ static const struct snd_soc_acpi_adr_device rt1320_1_group1_adr[] = {
}
};
+static const struct snd_soc_acpi_adr_device rt1320_1_group2_adr[] = {
+ {
+ .adr = 0x000130025D132001ull,
+ .num_endpoints = 1,
+ .endpoints = &spk_l_endpoint,
+ .name_prefix = "rt1320-1"
+ }
+};
+
+static const struct snd_soc_acpi_adr_device rt1320_3_group2_adr[] = {
+ {
+ .adr = 0x000330025D132001ull,
+ .num_endpoints = 1,
+ .endpoints = &spk_r_endpoint,
+ .name_prefix = "rt1320-2"
+ }
+};
+
static const struct snd_soc_acpi_adr_device rt713_0_adr[] = {
{
.adr = 0x000031025D071301ull,
@@ -304,6 +339,15 @@ static const struct snd_soc_acpi_adr_device rt713_0_adr[] = {
}
};
+static const struct snd_soc_acpi_adr_device rt713_vb_2_adr[] = {
+ {
+ .adr = 0x000230025d071301ull,
+ .num_endpoints = ARRAY_SIZE(jack_dmic_endpoints),
+ .endpoints = jack_dmic_endpoints,
+ .name_prefix = "rt713"
+ }
+};
+
static const struct snd_soc_acpi_adr_device rt714_0_adr[] = {
{
.adr = 0x000030025D071401ull,
@@ -453,6 +497,25 @@ static const struct snd_soc_acpi_link_adr lnl_sdw_rt713_l0_rt1318_l1[] = {
{}
};
+static const struct snd_soc_acpi_link_adr lnl_sdw_rt713_vb_l2_rt1320_l13[] = {
+ {
+ .mask = BIT(2),
+ .num_adr = ARRAY_SIZE(rt713_vb_2_adr),
+ .adr_d = rt713_vb_2_adr,
+ },
+ {
+ .mask = BIT(1),
+ .num_adr = ARRAY_SIZE(rt1320_1_group2_adr),
+ .adr_d = rt1320_1_group2_adr,
+ },
+ {
+ .mask = BIT(3),
+ .num_adr = ARRAY_SIZE(rt1320_3_group2_adr),
+ .adr_d = rt1320_3_group2_adr,
+ },
+ {}
+};
+
static const struct snd_soc_acpi_link_adr lnl_sdw_rt712_vb_l2_rt1320_l1[] = {
{
.mask = BIT(2),
@@ -550,6 +613,13 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_lnl_sdw_machines[] = {
.machine_check = snd_soc_acpi_intel_sdca_is_device_rt712_vb,
.sof_tplg_filename = "sof-lnl-rt712-l2-rt1320-l1.tplg"
},
+ {
+ .link_mask = BIT(1) | BIT(2) | BIT(3),
+ .links = lnl_sdw_rt713_vb_l2_rt1320_l13,
+ .drv_name = "sof_sdw",
+ .machine_check = snd_soc_acpi_intel_sdca_is_device_rt712_vb,
+ .sof_tplg_filename = "sof-lnl-rt713-l2-rt1320-l13.tplg"
+ },
{},
};
EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_lnl_sdw_machines);
diff --git a/sound/soc/intel/common/soc-acpi-intel-mtl-match.c b/sound/soc/intel/common/soc-acpi-intel-mtl-match.c
index 03fc5a187012..770e2194a283 100644
--- a/sound/soc/intel/common/soc-acpi-intel-mtl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-mtl-match.c
@@ -441,6 +441,179 @@ static const struct snd_soc_acpi_adr_device cs42l43_0_adr[] = {
}
};
+/* CS42L43 - speaker DAI aggregated with 4 amps */
+static const struct snd_soc_acpi_endpoint cs42l43_4amp_spkagg_endpoints[] = {
+ { /* Jack Playback Endpoint */
+ .num = 0,
+ .aggregated = 0,
+ .group_position = 0,
+ .group_id = 0,
+ },
+ { /* DMIC Capture Endpoint */
+ .num = 1,
+ .aggregated = 0,
+ .group_position = 0,
+ .group_id = 0,
+ },
+ { /* Jack Capture Endpoint */
+ .num = 2,
+ .aggregated = 0,
+ .group_position = 0,
+ .group_id = 0,
+ },
+ { /* Speaker Playback Endpoint */
+ .num = 3,
+ .aggregated = 1,
+ .group_position = 4,
+ .group_id = 1,
+ },
+};
+
+/* CS42L43 on link3 aggregated with 4 amps */
+static const struct snd_soc_acpi_adr_device cs42l43_l3_4amp_spkagg_adr[] = {
+ {
+ .adr = 0x00033001FA424301ull,
+ .num_endpoints = ARRAY_SIZE(cs42l43_4amp_spkagg_endpoints),
+ .endpoints = cs42l43_4amp_spkagg_endpoints,
+ .name_prefix = "cs42l43"
+ }
+};
+
+static const struct snd_soc_acpi_endpoint cs35l56_l_fb_endpoints[] = {
+ { /* Speaker Playback Endpoint */
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 0,
+ .group_id = 1,
+ },
+ { /* Feedback Capture Endpoint */
+ .num = 1,
+ .aggregated = 1,
+ .group_position = 0,
+ .group_id = 2,
+ },
+};
+
+static const struct snd_soc_acpi_endpoint cs35l56_r_fb_endpoints[] = {
+ { /* Speaker Playback Endpoint */
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 1,
+ .group_id = 1,
+ },
+ { /* Feedback Capture Endpoint */
+ .num = 1,
+ .aggregated = 1,
+ .group_position = 1,
+ .group_id = 2,
+ },
+};
+
+static const struct snd_soc_acpi_endpoint cs35l56_2_fb_endpoints[] = {
+ { /* Speaker Playback Endpoint */
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 2,
+ .group_id = 1,
+ },
+ { /* Feedback Capture Endpoint */
+ .num = 1,
+ .aggregated = 1,
+ .group_position = 2,
+ .group_id = 2,
+ },
+};
+
+static const struct snd_soc_acpi_endpoint cs35l56_3_fb_endpoints[] = {
+ { /* Speaker Playback Endpoint */
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 3,
+ .group_id = 1,
+ },
+ { /* Feedback Capture Endpoint */
+ .num = 1,
+ .aggregated = 1,
+ .group_position = 3,
+ .group_id = 2,
+ },
+};
+
+static const struct snd_soc_acpi_endpoint cs35l56_4_fb_endpoints[] = {
+ { /* Speaker Playback Endpoint */
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 4,
+ .group_id = 1,
+ },
+ { /* Feedback Capture Endpoint */
+ .num = 1,
+ .aggregated = 1,
+ .group_position = 4,
+ .group_id = 2,
+ },
+};
+
+static const struct snd_soc_acpi_endpoint cs35l56_5_fb_endpoints[] = {
+ { /* Speaker Playback Endpoint */
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 5,
+ .group_id = 1,
+ },
+ { /* Feedback Capture Endpoint */
+ .num = 1,
+ .aggregated = 1,
+ .group_position = 5,
+ .group_id = 2,
+ },
+};
+
+static const struct snd_soc_acpi_endpoint cs35l56_6_fb_endpoints[] = {
+ { /* Speaker Playback Endpoint */
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 6,
+ .group_id = 1,
+ },
+ { /* Feedback Capture Endpoint */
+ .num = 1,
+ .aggregated = 1,
+ .group_position = 6,
+ .group_id = 2,
+ },
+};
+
+static const struct snd_soc_acpi_endpoint cs35l56_7_fb_endpoints[] = {
+ { /* Speaker Playback Endpoint */
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 7,
+ .group_id = 1,
+ },
+ { /* Feedback Capture Endpoint */
+ .num = 1,
+ .aggregated = 1,
+ .group_position = 7,
+ .group_id = 2,
+ },
+};
+
+static const struct snd_soc_acpi_adr_device cs35l56_0_adr[] = {
+ {
+ .adr = 0x00003301FA355601ull,
+ .num_endpoints = 1,
+ .endpoints = &spk_l_endpoint,
+ .name_prefix = "AMP1"
+ },
+ {
+ .adr = 0x00003201FA355601ull,
+ .num_endpoints = 1,
+ .endpoints = &spk_2_endpoint,
+ .name_prefix = "AMP2"
+ }
+};
+
static const struct snd_soc_acpi_adr_device cs35l56_1_adr[] = {
{
.adr = 0x00013701FA355601ull,
@@ -471,17 +644,71 @@ static const struct snd_soc_acpi_adr_device cs35l56_2_adr[] = {
}
};
+static const struct snd_soc_acpi_adr_device cs35l56_0_fb_adr[] = {
+ {
+ .adr = 0x00003301FA355601ull,
+ .num_endpoints = ARRAY_SIZE(cs35l56_l_fb_endpoints),
+ .endpoints = cs35l56_l_fb_endpoints,
+ .name_prefix = "AMP1"
+ },
+ {
+ .adr = 0x00003201FA355601ull,
+ .num_endpoints = ARRAY_SIZE(cs35l56_2_fb_endpoints),
+ .endpoints = cs35l56_2_fb_endpoints,
+ .name_prefix = "AMP2"
+ },
+ {
+ .adr = 0x00003101FA355601ull,
+ .num_endpoints = ARRAY_SIZE(cs35l56_4_fb_endpoints),
+ .endpoints = cs35l56_4_fb_endpoints,
+ .name_prefix = "AMP3"
+ },
+ {
+ .adr = 0x00003001FA355601ull,
+ .num_endpoints = ARRAY_SIZE(cs35l56_6_fb_endpoints),
+ .endpoints = cs35l56_6_fb_endpoints,
+ .name_prefix = "AMP4"
+ },
+};
+
+static const struct snd_soc_acpi_adr_device cs35l56_1_fb_adr[] = {
+ {
+ .adr = 0x00013701FA355601ull,
+ .num_endpoints = ARRAY_SIZE(cs35l56_r_fb_endpoints),
+ .endpoints = cs35l56_r_fb_endpoints,
+ .name_prefix = "AMP8"
+ },
+ {
+ .adr = 0x00013601FA355601ull,
+ .num_endpoints = ARRAY_SIZE(cs35l56_3_fb_endpoints),
+ .endpoints = cs35l56_3_fb_endpoints,
+ .name_prefix = "AMP7"
+ },
+ {
+ .adr = 0x00013501FA355601ull,
+ .num_endpoints = ARRAY_SIZE(cs35l56_5_fb_endpoints),
+ .endpoints = cs35l56_5_fb_endpoints,
+ .name_prefix = "AMP6"
+ },
+ {
+ .adr = 0x00013401FA355601ull,
+ .num_endpoints = ARRAY_SIZE(cs35l56_7_fb_endpoints),
+ .endpoints = cs35l56_7_fb_endpoints,
+ .name_prefix = "AMP5"
+ },
+};
+
static const struct snd_soc_acpi_adr_device cs35l56_2_r_adr[] = {
{
.adr = 0x00023201FA355601ull,
- .num_endpoints = 1,
- .endpoints = &spk_r_endpoint,
+ .num_endpoints = ARRAY_SIZE(cs35l56_r_fb_endpoints),
+ .endpoints = cs35l56_r_fb_endpoints,
.name_prefix = "AMP3"
},
{
.adr = 0x00023301FA355601ull,
- .num_endpoints = 1,
- .endpoints = &spk_3_endpoint,
+ .num_endpoints = ARRAY_SIZE(cs35l56_3_fb_endpoints),
+ .endpoints = cs35l56_3_fb_endpoints,
.name_prefix = "AMP4"
}
@@ -490,14 +717,14 @@ static const struct snd_soc_acpi_adr_device cs35l56_2_r_adr[] = {
static const struct snd_soc_acpi_adr_device cs35l56_3_l_adr[] = {
{
.adr = 0x00033001fa355601ull,
- .num_endpoints = 1,
- .endpoints = &spk_l_endpoint,
+ .num_endpoints = ARRAY_SIZE(cs35l56_l_fb_endpoints),
+ .endpoints = cs35l56_l_fb_endpoints,
.name_prefix = "AMP1"
},
{
.adr = 0x00033101fa355601ull,
- .num_endpoints = 1,
- .endpoints = &spk_2_endpoint,
+ .num_endpoints = ARRAY_SIZE(cs35l56_2_fb_endpoints),
+ .endpoints = cs35l56_2_fb_endpoints,
.name_prefix = "AMP2"
}
};
@@ -765,6 +992,40 @@ static const struct snd_soc_acpi_link_adr cs42l43_link0_cs35l56_link2_link3[] =
{}
};
+static const struct snd_soc_acpi_link_adr cs42l43_link3_cs35l56_x4_link0_link1_spkagg[] = {
+ /* Expected order: jack -> amp */
+ {
+ .mask = BIT(3),
+ .num_adr = ARRAY_SIZE(cs42l43_l3_4amp_spkagg_adr),
+ .adr_d = cs42l43_l3_4amp_spkagg_adr,
+ },
+ {
+ .mask = BIT(1),
+ .num_adr = 2,
+ .adr_d = cs35l56_1_adr,
+ },
+ {
+ .mask = BIT(0),
+ .num_adr = 2,
+ .adr_d = cs35l56_0_adr,
+ },
+ {}
+};
+
+static const struct snd_soc_acpi_link_adr mtl_cs35l56_x8_link0_link1_fb[] = {
+ {
+ .mask = BIT(1),
+ .num_adr = ARRAY_SIZE(cs35l56_1_fb_adr),
+ .adr_d = cs35l56_1_fb_adr,
+ },
+ {
+ .mask = BIT(0),
+ .num_adr = ARRAY_SIZE(cs35l56_0_fb_adr),
+ .adr_d = cs35l56_0_fb_adr,
+ },
+ {}
+};
+
/* this table is used when there is no I2S codec present */
struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_sdw_machines[] = {
/* mockup tests need to be first */
@@ -842,12 +1103,24 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_sdw_machines[] = {
.sof_tplg_filename = "sof-mtl-cs42l43-l0-cs35l56-l23.tplg",
},
{
+ .link_mask = BIT(0) | BIT(1) | BIT(3),
+ .links = cs42l43_link3_cs35l56_x4_link0_link1_spkagg,
+ .drv_name = "sof_sdw",
+ .sof_tplg_filename = "sof-mtl-cs42l43-l3-cs35l56-l01-spkagg.tplg",
+ },
+ {
.link_mask = GENMASK(2, 0),
.links = mtl_cs42l43_cs35l56,
.drv_name = "sof_sdw",
.sof_tplg_filename = "sof-mtl-cs42l43-l0-cs35l56-l12.tplg",
},
{
+ .link_mask = BIT(0) | BIT(1),
+ .links = mtl_cs35l56_x8_link0_link1_fb,
+ .drv_name = "sof_sdw",
+ .sof_tplg_filename = "sof-mtl-cs35l56-l01-fb8.tplg"
+ },
+ {
.link_mask = BIT(0),
.links = mtl_cs42l43_l0,
.drv_name = "sof_sdw",
diff --git a/sound/soc/intel/common/soc-acpi-intel-ptl-match.c b/sound/soc/intel/common/soc-acpi-intel-ptl-match.c
index f1c0d7a02cda..9eb4a43e3e7a 100644
--- a/sound/soc/intel/common/soc-acpi-intel-ptl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-ptl-match.c
@@ -8,6 +8,7 @@
#include <sound/soc-acpi.h>
#include <sound/soc-acpi-intel-match.h>
+#include "soc-acpi-intel-sdca-quirks.h"
#include "soc-acpi-intel-sdw-mockup-match.h"
#include <sound/soc-acpi-intel-ssp-common.h>
@@ -35,6 +36,20 @@ static const struct snd_soc_acpi_endpoint single_endpoint = {
.group_id = 0,
};
+static const struct snd_soc_acpi_endpoint spk_l_endpoint = {
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 0,
+ .group_id = 1,
+};
+
+static const struct snd_soc_acpi_endpoint spk_r_endpoint = {
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 1,
+ .group_id = 1,
+};
+
/*
* Multi-function codecs with three endpoints created for
* headset, amp and dmic functions.
@@ -60,6 +75,47 @@ static const struct snd_soc_acpi_endpoint rt_mf_endpoints[] = {
},
};
+static const struct snd_soc_acpi_endpoint jack_dmic_endpoints[] = {
+ /* Jack Endpoint */
+ {
+ .num = 0,
+ .aggregated = 0,
+ .group_position = 0,
+ .group_id = 0,
+ },
+ /* DMIC Endpoint */
+ {
+ .num = 1,
+ .aggregated = 0,
+ .group_position = 0,
+ .group_id = 0,
+ },
+};
+
+static const struct snd_soc_acpi_endpoint jack_amp_g1_dmic_endpoints_endpoints[] = {
+ /* Jack Endpoint */
+ {
+ .num = 0,
+ .aggregated = 0,
+ .group_position = 0,
+ .group_id = 0,
+ },
+ /* Amp Endpoint, work as spk_l_endpoint */
+ {
+ .num = 1,
+ .aggregated = 1,
+ .group_position = 0,
+ .group_id = 1,
+ },
+ /* DMIC Endpoint */
+ {
+ .num = 2,
+ .aggregated = 0,
+ .group_position = 0,
+ .group_id = 0,
+ },
+};
+
static const struct snd_soc_acpi_adr_device rt711_sdca_0_adr[] = {
{
.adr = 0x000030025D071101ull,
@@ -69,6 +125,24 @@ static const struct snd_soc_acpi_adr_device rt711_sdca_0_adr[] = {
}
};
+static const struct snd_soc_acpi_adr_device rt712_vb_2_group1_adr[] = {
+ {
+ .adr = 0x000230025D071201ull,
+ .num_endpoints = ARRAY_SIZE(jack_amp_g1_dmic_endpoints_endpoints),
+ .endpoints = jack_amp_g1_dmic_endpoints_endpoints,
+ .name_prefix = "rt712"
+ }
+};
+
+static const struct snd_soc_acpi_adr_device rt713_vb_2_adr[] = {
+ {
+ .adr = 0x000230025d071301ull,
+ .num_endpoints = ARRAY_SIZE(jack_dmic_endpoints),
+ .endpoints = jack_dmic_endpoints,
+ .name_prefix = "rt713"
+ }
+};
+
static const struct snd_soc_acpi_adr_device rt721_3_single_adr[] = {
{
.adr = 0x000330025d072101ull,
@@ -114,6 +188,33 @@ static const struct snd_soc_acpi_adr_device rt722_3_single_adr[] = {
}
};
+static const struct snd_soc_acpi_adr_device rt1320_1_group1_adr[] = {
+ {
+ .adr = 0x000130025D132001ull,
+ .num_endpoints = 1,
+ .endpoints = &spk_r_endpoint,
+ .name_prefix = "rt1320-1"
+ }
+};
+
+static const struct snd_soc_acpi_adr_device rt1320_1_group2_adr[] = {
+ {
+ .adr = 0x000130025D132001ull,
+ .num_endpoints = 1,
+ .endpoints = &spk_l_endpoint,
+ .name_prefix = "rt1320-1"
+ }
+};
+
+static const struct snd_soc_acpi_adr_device rt1320_3_group2_adr[] = {
+ {
+ .adr = 0x000330025D132001ull,
+ .num_endpoints = 1,
+ .endpoints = &spk_r_endpoint,
+ .name_prefix = "rt1320-2"
+ }
+};
+
static const struct snd_soc_acpi_link_adr ptl_rt722_only[] = {
{
.mask = BIT(0),
@@ -150,6 +251,39 @@ static const struct snd_soc_acpi_link_adr ptl_rvp[] = {
{}
};
+static const struct snd_soc_acpi_link_adr lnl_sdw_rt713_vb_l2_rt1320_l13[] = {
+ {
+ .mask = BIT(2),
+ .num_adr = ARRAY_SIZE(rt713_vb_2_adr),
+ .adr_d = rt713_vb_2_adr,
+ },
+ {
+ .mask = BIT(1),
+ .num_adr = ARRAY_SIZE(rt1320_1_group2_adr),
+ .adr_d = rt1320_1_group2_adr,
+ },
+ {
+ .mask = BIT(3),
+ .num_adr = ARRAY_SIZE(rt1320_3_group2_adr),
+ .adr_d = rt1320_3_group2_adr,
+ },
+ {}
+};
+
+static const struct snd_soc_acpi_link_adr lnl_sdw_rt712_vb_l2_rt1320_l1[] = {
+ {
+ .mask = BIT(2),
+ .num_adr = ARRAY_SIZE(rt712_vb_2_group1_adr),
+ .adr_d = rt712_vb_2_group1_adr,
+ },
+ {
+ .mask = BIT(1),
+ .num_adr = ARRAY_SIZE(rt1320_1_group1_adr),
+ .adr_d = rt1320_1_group1_adr,
+ },
+ {}
+};
+
/* this table is used when there is no I2S codec present */
struct snd_soc_acpi_mach snd_soc_acpi_intel_ptl_sdw_machines[] = {
/* mockup tests need to be first */
@@ -201,6 +335,20 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_ptl_sdw_machines[] = {
.drv_name = "sof_sdw",
.sof_tplg_filename = "sof-ptl-rt722.tplg",
},
+ {
+ .link_mask = BIT(1) | BIT(2),
+ .links = lnl_sdw_rt712_vb_l2_rt1320_l1,
+ .drv_name = "sof_sdw",
+ .machine_check = snd_soc_acpi_intel_sdca_is_device_rt712_vb,
+ .sof_tplg_filename = "sof-lnl-rt712-l2-rt1320-l1.tplg"
+ },
+ {
+ .link_mask = BIT(1) | BIT(2) | BIT(3),
+ .links = lnl_sdw_rt713_vb_l2_rt1320_l13,
+ .drv_name = "sof_sdw",
+ .machine_check = snd_soc_acpi_intel_sdca_is_device_rt712_vb,
+ .sof_tplg_filename = "sof-lnl-rt713-l2-rt1320-l13.tplg"
+ },
{},
};
EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_ptl_sdw_machines);
diff --git a/sound/soc/intel/common/soc-acpi-intel-tgl-match.c b/sound/soc/intel/common/soc-acpi-intel-tgl-match.c
index 161ba532d270..6f8c06413665 100644
--- a/sound/soc/intel/common/soc-acpi-intel-tgl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-tgl-match.c
@@ -536,6 +536,194 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_tgl_machines[] = {
};
EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_tgl_machines);
+static const struct snd_soc_acpi_endpoint cs35l56_l_fb_endpoints[] = {
+ { /* Speaker Playback Endpoint */
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 0,
+ .group_id = 1,
+ },
+ { /* Feedback Capture Endpoint */
+ .num = 1,
+ .aggregated = 1,
+ .group_position = 0,
+ .group_id = 2,
+ },
+};
+
+static const struct snd_soc_acpi_endpoint cs35l56_r_fb_endpoints[] = {
+ { /* Speaker Playback Endpoint */
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 1,
+ .group_id = 1,
+ },
+ { /* Feedback Capture Endpoint */
+ .num = 1,
+ .aggregated = 1,
+ .group_position = 1,
+ .group_id = 2,
+ },
+};
+
+static const struct snd_soc_acpi_endpoint cs35l56_2_fb_endpoints[] = {
+ { /* Speaker Playback Endpoint */
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 2,
+ .group_id = 1,
+ },
+ { /* Feedback Capture Endpoint */
+ .num = 1,
+ .aggregated = 1,
+ .group_position = 2,
+ .group_id = 2,
+ },
+};
+
+static const struct snd_soc_acpi_endpoint cs35l56_3_fb_endpoints[] = {
+ { /* Speaker Playback Endpoint */
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 3,
+ .group_id = 1,
+ },
+ { /* Feedback Capture Endpoint */
+ .num = 1,
+ .aggregated = 1,
+ .group_position = 3,
+ .group_id = 2,
+ },
+};
+
+static const struct snd_soc_acpi_endpoint cs35l56_4_fb_endpoints[] = {
+ { /* Speaker Playback Endpoint */
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 4,
+ .group_id = 1,
+ },
+ { /* Feedback Capture Endpoint */
+ .num = 1,
+ .aggregated = 1,
+ .group_position = 4,
+ .group_id = 2,
+ }
+};
+
+static const struct snd_soc_acpi_endpoint cs35l56_5_fb_endpoints[] = {
+ { /* Speaker Playback Endpoint */
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 5,
+ .group_id = 1,
+ },
+ { /* Feedback Capture Endpoint */
+ .num = 1,
+ .aggregated = 1,
+ .group_position = 5,
+ .group_id = 2,
+ }
+};
+
+static const struct snd_soc_acpi_endpoint cs35l56_6_fb_endpoints[] = {
+ { /* Speaker Playback Endpoint */
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 6,
+ .group_id = 1,
+ },
+ { /* Feedback Capture Endpoint */
+ .num = 1,
+ .aggregated = 1,
+ .group_position = 6,
+ .group_id = 2,
+ }
+};
+
+static const struct snd_soc_acpi_endpoint cs35l56_7_fb_endpoints[] = {
+ { /* Speaker Playback Endpoint */
+ .num = 0,
+ .aggregated = 1,
+ .group_position = 7,
+ .group_id = 1,
+ },
+ { /* Feedback Capture Endpoint */
+ .num = 1,
+ .aggregated = 1,
+ .group_position = 7,
+ .group_id = 2,
+ }
+};
+
+static const struct snd_soc_acpi_adr_device cs35l56_sdw_eight_1_4_fb_adr[] = {
+ {
+ .adr = 0x00003301fa355601,
+ .num_endpoints = ARRAY_SIZE(cs35l56_l_fb_endpoints),
+ .endpoints = cs35l56_l_fb_endpoints,
+ .name_prefix = "AMP1"
+ },
+ {
+ .adr = 0x00003201fa355601,
+ .num_endpoints = ARRAY_SIZE(cs35l56_2_fb_endpoints),
+ .endpoints = cs35l56_2_fb_endpoints,
+ .name_prefix = "AMP2"
+ },
+ {
+ .adr = 0x00003101fa355601,
+ .num_endpoints = ARRAY_SIZE(cs35l56_4_fb_endpoints),
+ .endpoints = cs35l56_4_fb_endpoints,
+ .name_prefix = "AMP3"
+ },
+ {
+ .adr = 0x00003001fa355601,
+ .num_endpoints = ARRAY_SIZE(cs35l56_6_fb_endpoints),
+ .endpoints = cs35l56_6_fb_endpoints,
+ .name_prefix = "AMP4"
+ },
+};
+
+static const struct snd_soc_acpi_adr_device cs35l56_sdw_eight_5_8_fb_adr[] = {
+ {
+ .adr = 0x00013701fa355601,
+ .num_endpoints = ARRAY_SIZE(cs35l56_r_fb_endpoints),
+ .endpoints = cs35l56_r_fb_endpoints,
+ .name_prefix = "AMP8"
+ },
+ {
+ .adr = 0x00013601fa355601,
+ .num_endpoints = ARRAY_SIZE(cs35l56_3_fb_endpoints),
+ .endpoints = cs35l56_3_fb_endpoints,
+ .name_prefix = "AMP7"
+ },
+ {
+ .adr = 0x00013501fa355601,
+ .num_endpoints = ARRAY_SIZE(cs35l56_5_fb_endpoints),
+ .endpoints = cs35l56_5_fb_endpoints,
+ .name_prefix = "AMP6"
+ },
+ {
+ .adr = 0x00013401fa355601,
+ .num_endpoints = ARRAY_SIZE(cs35l56_7_fb_endpoints),
+ .endpoints = cs35l56_7_fb_endpoints,
+ .name_prefix = "AMP5"
+ },
+};
+
+static const struct snd_soc_acpi_link_adr up_extreme_cs35l56_sdw_eight[] = {
+ {
+ .mask = BIT(1),
+ .num_adr = ARRAY_SIZE(cs35l56_sdw_eight_5_8_fb_adr),
+ .adr_d = cs35l56_sdw_eight_5_8_fb_adr,
+ },
+ {
+ .mask = BIT(0),
+ .num_adr = ARRAY_SIZE(cs35l56_sdw_eight_1_4_fb_adr),
+ .adr_d = cs35l56_sdw_eight_1_4_fb_adr,
+ },
+ {}
+};
+
/* this table is used when there is no I2S codec present */
struct snd_soc_acpi_mach snd_soc_acpi_intel_tgl_sdw_machines[] = {
/* mockup tests need to be first */
@@ -635,6 +823,12 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_tgl_sdw_machines[] = {
.drv_name = "sof_sdw",
.sof_tplg_filename = "sof-tgl-rt711.tplg",
},
+ {
+ .link_mask = BIT(0) | BIT(1),
+ .links = up_extreme_cs35l56_sdw_eight,
+ .drv_name = "sof_sdw",
+ .sof_tplg_filename = "sof-tgl-cs35l56-l01-fb8.tplg"
+ },
{},
};
EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_tgl_sdw_machines);
diff --git a/sound/soc/intel/keembay/kmb_platform.c b/sound/soc/intel/keembay/kmb_platform.c
index aa5de167e790..4ed71d11ad77 100644
--- a/sound/soc/intel/keembay/kmb_platform.c
+++ b/sound/soc/intel/keembay/kmb_platform.c
@@ -869,7 +869,7 @@ static int kmb_plat_dai_probe(struct platform_device *pdev)
kmb_i2s->fifo_th = (1 << COMP1_FIFO_DEPTH(comp1_reg)) / 2;
- kmb_i2s->use_pio = !(of_property_read_bool(np, "dmas"));
+ kmb_i2s->use_pio = !of_property_present(np, "dmas");
if (kmb_i2s->use_pio) {
irq = platform_get_irq_optional(pdev, 0);
diff --git a/sound/soc/mediatek/common/mtk-soundcard-driver.c b/sound/soc/mediatek/common/mtk-soundcard-driver.c
index 3bbf42c42805..f4314dddc460 100644
--- a/sound/soc/mediatek/common/mtk-soundcard-driver.c
+++ b/sound/soc/mediatek/common/mtk-soundcard-driver.c
@@ -221,7 +221,7 @@ int mtk_soundcard_common_probe(struct platform_device *pdev)
card->name = pdata->card_name;
}
- needs_legacy_probe = !of_property_read_bool(pdev->dev.of_node, "audio-routing");
+ needs_legacy_probe = !of_property_present(pdev->dev.of_node, "audio-routing");
if (needs_legacy_probe) {
/*
* If we have no .soc_probe() callback there's no way of using
@@ -262,7 +262,7 @@ int mtk_soundcard_common_probe(struct platform_device *pdev)
adsp_node = NULL;
if (adsp_node) {
- if (of_property_read_bool(pdev->dev.of_node, "mediatek,dai-link")) {
+ if (of_property_present(pdev->dev.of_node, "mediatek,dai-link")) {
ret = mtk_sof_dailink_parse_of(card, pdev->dev.of_node,
"mediatek,dai-link",
card->dai_link, card->num_links);
diff --git a/sound/soc/mediatek/mt8192/mt8192-afe-pcm.c b/sound/soc/mediatek/mt8192/mt8192-afe-pcm.c
index 9b502f4cd6ea..80cda7bf5ccc 100644
--- a/sound/soc/mediatek/mt8192/mt8192-afe-pcm.c
+++ b/sound/soc/mediatek/mt8192/mt8192-afe-pcm.c
@@ -2158,27 +2158,26 @@ static int mt8192_afe_pcm_dev_probe(struct platform_device *pdev)
{
struct mtk_base_afe *afe;
struct mt8192_afe_private *afe_priv;
- struct device *dev;
+ struct device *dev = &pdev->dev;
struct reset_control *rstc;
int i, ret, irq_id;
- ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(34));
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(34));
if (ret)
return ret;
- afe = devm_kzalloc(&pdev->dev, sizeof(*afe), GFP_KERNEL);
+ afe = devm_kzalloc(dev, sizeof(*afe), GFP_KERNEL);
if (!afe)
return -ENOMEM;
platform_set_drvdata(pdev, afe);
- afe->platform_priv = devm_kzalloc(&pdev->dev, sizeof(*afe_priv),
+ afe->platform_priv = devm_kzalloc(dev, sizeof(*afe_priv),
GFP_KERNEL);
if (!afe->platform_priv)
return -ENOMEM;
afe_priv = afe->platform_priv;
- afe->dev = &pdev->dev;
- dev = afe->dev;
+ afe->dev = dev;
/* init audio related clock */
ret = mt8192_init_clock(afe);
@@ -2196,7 +2195,7 @@ static int mt8192_afe_pcm_dev_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(dev, ret, "failed to trigger audio reset\n");
- ret = devm_pm_runtime_enable(&pdev->dev);
+ ret = devm_pm_runtime_enable(dev);
if (ret)
return ret;
@@ -2212,13 +2211,13 @@ static int mt8192_afe_pcm_dev_probe(struct platform_device *pdev)
/* enable clock for regcache get default value from hw */
afe_priv->pm_runtime_bypass_reg_ctl = true;
- pm_runtime_get_sync(&pdev->dev);
+ pm_runtime_get_sync(dev);
ret = regmap_reinit_cache(afe->regmap, &mt8192_afe_regmap_config);
if (ret)
return dev_err_probe(dev, ret, "regmap_reinit_cache fail\n");
- pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_put_sync(dev);
afe_priv->pm_runtime_bypass_reg_ctl = false;
regcache_cache_only(afe->regmap, true);
@@ -2285,7 +2284,7 @@ static int mt8192_afe_pcm_dev_probe(struct platform_device *pdev)
afe->runtime_suspend = mt8192_afe_runtime_suspend;
/* register platform */
- ret = devm_snd_soc_register_component(&pdev->dev,
+ ret = devm_snd_soc_register_component(dev,
&mtk_afe_pcm_platform,
afe->dai_drivers,
afe->num_dai_drivers);
diff --git a/sound/soc/mediatek/mt8365/Makefile b/sound/soc/mediatek/mt8365/Makefile
index 52ba45a8498a..b197025e34bb 100644
--- a/sound/soc/mediatek/mt8365/Makefile
+++ b/sound/soc/mediatek/mt8365/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
# MTK Platform driver
-snd-soc-mt8365-pcm-objs := \
+snd-soc-mt8365-pcm-y := \
mt8365-afe-clk.o \
mt8365-afe-pcm.o \
mt8365-dai-adda.o \
diff --git a/sound/soc/mediatek/mt8365/mt8365-mt6357.c b/sound/soc/mediatek/mt8365/mt8365-mt6357.c
index d398e83ea052..9f28d6bf0323 100644
--- a/sound/soc/mediatek/mt8365/mt8365-mt6357.c
+++ b/sound/soc/mediatek/mt8365/mt8365-mt6357.c
@@ -6,12 +6,19 @@
* Authors: Nicolas Belin <nbelin@baylibre.com>
*/
+#include <linux/array_size.h>
+#include <linux/dev_printk.h>
+#include <linux/err.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
#include <sound/soc.h>
#include <sound/pcm_params.h>
+
#include "mt8365-afe-common.h"
-#include <linux/pinctrl/consumer.h>
#include "../common/mtk-soc-card.h"
#include "../common/mtk-soundcard-driver.h"
diff --git a/sound/soc/qcom/common.c b/sound/soc/qcom/common.c
index 928cf5cb5999..7ee60a58a336 100644
--- a/sound/soc/qcom/common.c
+++ b/sound/soc/qcom/common.c
@@ -44,20 +44,20 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
return ret;
}
- if (of_property_read_bool(dev->of_node, "widgets")) {
+ if (of_property_present(dev->of_node, "widgets")) {
ret = snd_soc_of_parse_audio_simple_widgets(card, "widgets");
if (ret)
return ret;
}
/* DAPM routes */
- if (of_property_read_bool(dev->of_node, "audio-routing")) {
+ if (of_property_present(dev->of_node, "audio-routing")) {
ret = snd_soc_of_parse_audio_routing(card, "audio-routing");
if (ret)
return ret;
}
/* Deprecated, only for compatibility with old device trees */
- if (of_property_read_bool(dev->of_node, "qcom,audio-routing")) {
+ if (of_property_present(dev->of_node, "qcom,audio-routing")) {
ret = snd_soc_of_parse_audio_routing(card, "qcom,audio-routing");
if (ret)
return ret;
diff --git a/sound/soc/qcom/sc7180.c b/sound/soc/qcom/sc7180.c
index bc030ce29680..d95710b1ea4e 100644
--- a/sound/soc/qcom/sc7180.c
+++ b/sound/soc/qcom/sc7180.c
@@ -513,7 +513,7 @@ static int sc7180_snd_platform_probe(struct platform_device *pdev)
card->controls = sc7180_snd_controls;
card->num_controls = ARRAY_SIZE(sc7180_snd_controls);
- if (of_property_read_bool(dev->of_node, "dmic-gpios")) {
+ if (of_property_present(dev->of_node, "dmic-gpios")) {
card->dapm_widgets = sc7180_snd_dual_mic_widgets,
card->num_dapm_widgets = ARRAY_SIZE(sc7180_snd_dual_mic_widgets),
card->controls = sc7180_snd_dual_mic_controls,
diff --git a/sound/soc/qcom/sdm845.c b/sound/soc/qcom/sdm845.c
index a479d7e5b7fb..fcc7df75346f 100644
--- a/sound/soc/qcom/sdm845.c
+++ b/sound/soc/qcom/sdm845.c
@@ -215,6 +215,7 @@ static int sdm845_snd_hw_params(struct snd_pcm_substream *substream,
ret = sdm845_slim_snd_hw_params(substream, params);
break;
case QUATERNARY_MI2S_RX:
+ case SECONDARY_MI2S_RX:
break;
default:
pr_err("%s: invalid dai id 0x%x\n", __func__, cpu_dai->id);
@@ -356,6 +357,7 @@ static int sdm845_snd_startup(struct snd_pcm_substream *substream)
snd_soc_dai_set_fmt(codec_dai, codec_dai_fmt);
break;
+ case SECONDARY_MI2S_RX:
case SECONDARY_MI2S_TX:
codec_dai_fmt |= SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_I2S;
if (++(data->sec_mi2s_clk_count) == 1) {
@@ -371,8 +373,6 @@ static int sdm845_snd_startup(struct snd_pcm_substream *substream)
Q6AFE_LPASS_CLK_ID_QUAD_MI2S_IBIT,
MI2S_BCLK_RATE, SNDRV_PCM_STREAM_PLAYBACK);
snd_soc_dai_set_fmt(cpu_dai, fmt);
-
-
break;
case QUATERNARY_TDM_RX_0:
@@ -441,6 +441,7 @@ static void sdm845_snd_shutdown(struct snd_pcm_substream *substream)
}
break;
+ case SECONDARY_MI2S_RX:
case SECONDARY_MI2S_TX:
if (--(data->sec_mi2s_clk_count) == 0) {
snd_soc_dai_set_sysclk(cpu_dai,
diff --git a/sound/soc/renesas/rz-ssi.c b/sound/soc/renesas/rz-ssi.c
index 6efd017aaa7f..3a0af4ca7ab6 100644
--- a/sound/soc/renesas/rz-ssi.c
+++ b/sound/soc/renesas/rz-ssi.c
@@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <linux/dmaengine.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
@@ -71,7 +72,7 @@
#define PREALLOC_BUFFER (SZ_32K)
#define PREALLOC_BUFFER_MAX (SZ_32K)
-#define SSI_RATES SNDRV_PCM_RATE_8000_48000 /* 8k-44.1kHz */
+#define SSI_RATES SNDRV_PCM_RATE_8000_48000 /* 8k-48kHz */
#define SSI_FMTS SNDRV_PCM_FMTBIT_S16_LE
#define SSI_CHAN_MIN 2
#define SSI_CHAN_MAX 2
@@ -99,7 +100,6 @@ struct rz_ssi_stream {
struct rz_ssi_priv {
void __iomem *base;
- struct platform_device *pdev;
struct reset_control *rstc;
struct device *dev;
struct clk *sfr_clk;
@@ -163,16 +163,7 @@ static void rz_ssi_reg_mask_setl(struct rz_ssi_priv *priv, uint reg,
writel(val, (priv->base + reg));
}
-static inline struct snd_soc_dai *
-rz_ssi_get_dai(struct snd_pcm_substream *substream)
-{
- struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
-
- return snd_soc_rtd_to_cpu(rtd, 0);
-}
-
-static inline bool rz_ssi_stream_is_play(struct rz_ssi_priv *ssi,
- struct snd_pcm_substream *substream)
+static inline bool rz_ssi_stream_is_play(struct snd_pcm_substream *substream)
{
return substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
}
@@ -244,22 +235,21 @@ static void rz_ssi_stream_init(struct rz_ssi_stream *strm,
static void rz_ssi_stream_quit(struct rz_ssi_priv *ssi,
struct rz_ssi_stream *strm)
{
- struct snd_soc_dai *dai = rz_ssi_get_dai(strm->substream);
+ struct device *dev = ssi->dev;
rz_ssi_set_substream(strm, NULL);
if (strm->oerr_num > 0)
- dev_info(dai->dev, "overrun = %d\n", strm->oerr_num);
+ dev_info(dev, "overrun = %d\n", strm->oerr_num);
if (strm->uerr_num > 0)
- dev_info(dai->dev, "underrun = %d\n", strm->uerr_num);
+ dev_info(dev, "underrun = %d\n", strm->uerr_num);
}
static int rz_ssi_clk_setup(struct rz_ssi_priv *ssi, unsigned int rate,
unsigned int channels)
{
- static s8 ckdv[16] = { 1, 2, 4, 8, 16, 32, 64, 128,
- 6, 12, 24, 48, 96, -1, -1, -1 };
+ static u8 ckdv[] = { 1, 2, 4, 8, 16, 32, 64, 128, 6, 12, 24, 48, 96 };
unsigned int channel_bits = 32; /* System Word Length */
unsigned long bclk_rate = rate * channels * channel_bits;
unsigned int div;
@@ -318,7 +308,8 @@ static int rz_ssi_clk_setup(struct rz_ssi_priv *ssi, unsigned int rate,
static void rz_ssi_set_idle(struct rz_ssi_priv *ssi)
{
- int timeout;
+ u32 tmp;
+ int ret;
/* Disable irqs */
rz_ssi_reg_mask_setl(ssi, SSICR, SSICR_TUIEN | SSICR_TOIEN |
@@ -331,15 +322,9 @@ static void rz_ssi_set_idle(struct rz_ssi_priv *ssi)
SSISR_RUIRQ), 0);
/* Wait for idle */
- timeout = 100;
- while (--timeout) {
- if (rz_ssi_reg_readl(ssi, SSISR) & SSISR_IIRQ)
- break;
- udelay(1);
- }
-
- if (!timeout)
- dev_info(ssi->dev, "timeout waiting for SSI idle\n");
+ ret = readl_poll_timeout_atomic(ssi->base + SSISR, tmp, (tmp & SSISR_IIRQ), 1, 100);
+ if (ret)
+ dev_warn_ratelimited(ssi->dev, "timeout waiting for SSI idle\n");
/* Hold FIFOs in reset */
rz_ssi_reg_mask_setl(ssi, SSIFCR, 0, SSIFCR_FIFO_RST);
@@ -347,7 +332,7 @@ static void rz_ssi_set_idle(struct rz_ssi_priv *ssi)
static int rz_ssi_start(struct rz_ssi_priv *ssi, struct rz_ssi_stream *strm)
{
- bool is_play = rz_ssi_stream_is_play(ssi, strm->substream);
+ bool is_play = rz_ssi_stream_is_play(strm->substream);
bool is_full_duplex;
u32 ssicr, ssifcr;
@@ -403,6 +388,15 @@ static int rz_ssi_start(struct rz_ssi_priv *ssi, struct rz_ssi_stream *strm)
return 0;
}
+static int rz_ssi_swreset(struct rz_ssi_priv *ssi)
+{
+ u32 tmp;
+
+ rz_ssi_reg_mask_setl(ssi, SSIFCR, 0, SSIFCR_SSIRST);
+ rz_ssi_reg_mask_setl(ssi, SSIFCR, SSIFCR_SSIRST, 0);
+ return readl_poll_timeout_atomic(ssi->base + SSIFCR, tmp, !(tmp & SSIFCR_SSIRST), 1, 5);
+}
+
static int rz_ssi_stop(struct rz_ssi_priv *ssi, struct rz_ssi_stream *strm)
{
strm->running = 0;
@@ -415,8 +409,12 @@ static int rz_ssi_stop(struct rz_ssi_priv *ssi, struct rz_ssi_stream *strm)
rz_ssi_reg_mask_setl(ssi, SSICR, SSICR_TEN | SSICR_REN, 0);
/* Cancel all remaining DMA transactions */
- if (rz_ssi_is_dma_enabled(ssi))
- dmaengine_terminate_async(strm->dma_ch);
+ if (rz_ssi_is_dma_enabled(ssi)) {
+ if (ssi->playback.dma_ch)
+ dmaengine_terminate_async(ssi->playback.dma_ch);
+ if (ssi->capture.dma_ch)
+ dmaengine_terminate_async(ssi->capture.dma_ch);
+ }
rz_ssi_set_idle(ssi);
@@ -523,6 +521,8 @@ static int rz_ssi_pio_send(struct rz_ssi_priv *ssi, struct rz_ssi_stream *strm)
sample_space = strm->fifo_sample_size;
ssifsr = rz_ssi_reg_readl(ssi, SSIFSR);
sample_space -= (ssifsr >> SSIFSR_TDC_SHIFT) & SSIFSR_TDC_MASK;
+ if (sample_space < 0)
+ return -EINVAL;
/* Only add full frames at a time */
while (frames_left && (sample_space >= runtime->channels)) {
@@ -680,7 +680,7 @@ static int rz_ssi_dma_transfer(struct rz_ssi_priv *ssi,
*/
return 0;
- dir = rz_ssi_stream_is_play(ssi, substream) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
+ dir = rz_ssi_stream_is_play(substream) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
/* Always transfer 1 period */
amount = runtime->period_size;
@@ -784,6 +784,32 @@ no_dma:
return -ENODEV;
}
+static int rz_ssi_trigger_resume(struct rz_ssi_priv *ssi)
+{
+ int ret;
+
+ if (rz_ssi_is_stream_running(&ssi->playback) ||
+ rz_ssi_is_stream_running(&ssi->capture))
+ return 0;
+
+ ret = rz_ssi_swreset(ssi);
+ if (ret)
+ return ret;
+
+ return rz_ssi_clk_setup(ssi, ssi->hw_params_cache.rate,
+ ssi->hw_params_cache.channels);
+}
+
+static void rz_ssi_streams_suspend(struct rz_ssi_priv *ssi)
+{
+ if (rz_ssi_is_stream_running(&ssi->playback) ||
+ rz_ssi_is_stream_running(&ssi->capture))
+ return;
+
+ ssi->playback.dma_buffer_pos = 0;
+ ssi->capture.dma_buffer_pos = 0;
+}
+
static int rz_ssi_dai_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *dai)
{
@@ -792,21 +818,21 @@ static int rz_ssi_dai_trigger(struct snd_pcm_substream *substream, int cmd,
int ret = 0, i, num_transfer = 1;
switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- /* Soft Reset */
- if (!rz_ssi_is_stream_running(&ssi->playback) &&
- !rz_ssi_is_stream_running(&ssi->capture)) {
- rz_ssi_reg_mask_setl(ssi, SSIFCR, 0, SSIFCR_SSIRST);
- rz_ssi_reg_mask_setl(ssi, SSIFCR, SSIFCR_SSIRST, 0);
- udelay(5);
- }
+ case SNDRV_PCM_TRIGGER_RESUME:
+ ret = rz_ssi_trigger_resume(ssi);
+ if (ret)
+ return ret;
- rz_ssi_stream_init(strm, substream);
+ fallthrough;
+
+ case SNDRV_PCM_TRIGGER_START:
+ if (cmd == SNDRV_PCM_TRIGGER_START)
+ rz_ssi_stream_init(strm, substream);
if (ssi->dma_rt) {
bool is_playback;
- is_playback = rz_ssi_stream_is_play(ssi, substream);
+ is_playback = rz_ssi_stream_is_play(substream);
ret = rz_ssi_dma_slave_config(ssi, ssi->playback.dma_ch,
is_playback);
/* Fallback to pio */
@@ -829,6 +855,12 @@ static int rz_ssi_dai_trigger(struct snd_pcm_substream *substream, int cmd,
ret = rz_ssi_start(ssi, strm);
break;
+
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ rz_ssi_stop(ssi, strm);
+ rz_ssi_streams_suspend(ssi);
+ break;
+
case SNDRV_PCM_TRIGGER_STOP:
rz_ssi_stop(ssi, strm);
rz_ssi_stream_quit(ssi, strm);
@@ -925,6 +957,7 @@ static int rz_ssi_dai_hw_params(struct snd_pcm_substream *substream,
SNDRV_PCM_HW_PARAM_SAMPLE_BITS)->min;
unsigned int channels = params_channels(params);
unsigned int rate = params_rate(params);
+ int ret;
if (sample_bits != 16) {
dev_err(ssi->dev, "Unsupported sample width: %d\n",
@@ -951,6 +984,10 @@ static int rz_ssi_dai_hw_params(struct snd_pcm_substream *substream,
rz_ssi_cache_hw_params(ssi, rate, channels, strm->sample_width,
sample_bits);
+ ret = rz_ssi_swreset(ssi);
+ if (ret)
+ return ret;
+
return rz_ssi_clk_setup(ssi, rate, channels);
}
@@ -963,7 +1000,8 @@ static const struct snd_soc_dai_ops rz_ssi_dai_ops = {
static const struct snd_pcm_hardware rz_ssi_pcm_hardware = {
.info = SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_MMAP |
- SNDRV_PCM_INFO_MMAP_VALID,
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_RESUME,
.buffer_bytes_max = PREALLOC_BUFFER,
.period_bytes_min = 32,
.period_bytes_max = 8192,
@@ -986,7 +1024,8 @@ static int rz_ssi_pcm_open(struct snd_soc_component *component,
static snd_pcm_uframes_t rz_ssi_pcm_pointer(struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
- struct snd_soc_dai *dai = rz_ssi_get_dai(substream);
+ struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
+ struct snd_soc_dai *dai = snd_soc_rtd_to_cpu(rtd, 0);
struct rz_ssi_priv *ssi = snd_soc_dai_get_drvdata(dai);
struct rz_ssi_stream *strm = rz_ssi_stream_get(ssi, substream);
@@ -1031,37 +1070,37 @@ static const struct snd_soc_component_driver rz_ssi_soc_component = {
static int rz_ssi_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct rz_ssi_priv *ssi;
struct clk *audio_clk;
struct resource *res;
int ret;
- ssi = devm_kzalloc(&pdev->dev, sizeof(*ssi), GFP_KERNEL);
+ ssi = devm_kzalloc(dev, sizeof(*ssi), GFP_KERNEL);
if (!ssi)
return -ENOMEM;
- ssi->pdev = pdev;
- ssi->dev = &pdev->dev;
+ ssi->dev = dev;
ssi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(ssi->base))
return PTR_ERR(ssi->base);
ssi->phys = res->start;
- ssi->clk = devm_clk_get(&pdev->dev, "ssi");
+ ssi->clk = devm_clk_get(dev, "ssi");
if (IS_ERR(ssi->clk))
return PTR_ERR(ssi->clk);
- ssi->sfr_clk = devm_clk_get(&pdev->dev, "ssi_sfr");
+ ssi->sfr_clk = devm_clk_get(dev, "ssi_sfr");
if (IS_ERR(ssi->sfr_clk))
return PTR_ERR(ssi->sfr_clk);
- audio_clk = devm_clk_get(&pdev->dev, "audio_clk1");
+ audio_clk = devm_clk_get(dev, "audio_clk1");
if (IS_ERR(audio_clk))
return dev_err_probe(&pdev->dev, PTR_ERR(audio_clk),
"no audio clk1");
ssi->audio_clk_1 = clk_get_rate(audio_clk);
- audio_clk = devm_clk_get(&pdev->dev, "audio_clk2");
+ audio_clk = devm_clk_get(dev, "audio_clk2");
if (IS_ERR(audio_clk))
return dev_err_probe(&pdev->dev, PTR_ERR(audio_clk),
"no audio clk2");
@@ -1074,13 +1113,13 @@ static int rz_ssi_probe(struct platform_device *pdev)
ssi->audio_mck = ssi->audio_clk_1 ? ssi->audio_clk_1 : ssi->audio_clk_2;
/* Detect DMA support */
- ret = rz_ssi_dma_request(ssi, &pdev->dev);
+ ret = rz_ssi_dma_request(ssi, dev);
if (ret < 0) {
- dev_warn(&pdev->dev, "DMA not available, using PIO\n");
+ dev_warn(dev, "DMA not available, using PIO\n");
ssi->playback.transfer = rz_ssi_pio_send;
ssi->capture.transfer = rz_ssi_pio_recv;
} else {
- dev_info(&pdev->dev, "DMA enabled");
+ dev_info(dev, "DMA enabled");
ssi->playback.transfer = rz_ssi_dma_transfer;
ssi->capture.transfer = rz_ssi_dma_transfer;
}
@@ -1089,21 +1128,20 @@ static int rz_ssi_probe(struct platform_device *pdev)
ssi->capture.priv = ssi;
spin_lock_init(&ssi->lock);
- dev_set_drvdata(&pdev->dev, ssi);
+ dev_set_drvdata(dev, ssi);
/* Error Interrupt */
ssi->irq_int = platform_get_irq_byname(pdev, "int_req");
if (ssi->irq_int < 0) {
- rz_ssi_release_dma_channels(ssi);
- return ssi->irq_int;
+ ret = ssi->irq_int;
+ goto err_release_dma_chs;
}
- ret = devm_request_irq(&pdev->dev, ssi->irq_int, &rz_ssi_interrupt,
- 0, dev_name(&pdev->dev), ssi);
+ ret = devm_request_irq(dev, ssi->irq_int, &rz_ssi_interrupt,
+ 0, dev_name(dev), ssi);
if (ret < 0) {
- rz_ssi_release_dma_channels(ssi);
- return dev_err_probe(&pdev->dev, ret,
- "irq request error (int_req)\n");
+ dev_err_probe(dev, ret, "irq request error (int_req)\n");
+ goto err_release_dma_chs;
}
if (!rz_ssi_is_dma_enabled(ssi)) {
@@ -1115,11 +1153,11 @@ static int rz_ssi_probe(struct platform_device *pdev)
if (ssi->irq_rt < 0)
return ssi->irq_rt;
- ret = devm_request_irq(&pdev->dev, ssi->irq_rt,
+ ret = devm_request_irq(dev, ssi->irq_rt,
&rz_ssi_interrupt, 0,
- dev_name(&pdev->dev), ssi);
+ dev_name(dev), ssi);
if (ret < 0)
- return dev_err_probe(&pdev->dev, ret,
+ return dev_err_probe(dev, ret,
"irq request error (dma_rt)\n");
} else {
if (ssi->irq_tx < 0)
@@ -1128,52 +1166,48 @@ static int rz_ssi_probe(struct platform_device *pdev)
if (ssi->irq_rx < 0)
return ssi->irq_rx;
- ret = devm_request_irq(&pdev->dev, ssi->irq_tx,
+ ret = devm_request_irq(dev, ssi->irq_tx,
&rz_ssi_interrupt, 0,
- dev_name(&pdev->dev), ssi);
+ dev_name(dev), ssi);
if (ret < 0)
- return dev_err_probe(&pdev->dev, ret,
+ return dev_err_probe(dev, ret,
"irq request error (dma_tx)\n");
- ret = devm_request_irq(&pdev->dev, ssi->irq_rx,
+ ret = devm_request_irq(dev, ssi->irq_rx,
&rz_ssi_interrupt, 0,
- dev_name(&pdev->dev), ssi);
+ dev_name(dev), ssi);
if (ret < 0)
- return dev_err_probe(&pdev->dev, ret,
+ return dev_err_probe(dev, ret,
"irq request error (dma_rx)\n");
}
}
- ssi->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ ssi->rstc = devm_reset_control_get_exclusive(dev, NULL);
if (IS_ERR(ssi->rstc)) {
ret = PTR_ERR(ssi->rstc);
- goto err_reset;
+ goto err_release_dma_chs;
}
- reset_control_deassert(ssi->rstc);
- pm_runtime_enable(&pdev->dev);
- ret = pm_runtime_resume_and_get(&pdev->dev);
+ /* Default 0 for power saving. Can be overridden via sysfs. */
+ pm_runtime_set_autosuspend_delay(dev, 0);
+ pm_runtime_use_autosuspend(dev);
+ ret = devm_pm_runtime_enable(dev);
if (ret < 0) {
- dev_err(&pdev->dev, "pm_runtime_resume_and_get failed\n");
- goto err_pm;
+ dev_err(dev, "Failed to enable runtime PM!\n");
+ goto err_release_dma_chs;
}
- ret = devm_snd_soc_register_component(&pdev->dev, &rz_ssi_soc_component,
+ ret = devm_snd_soc_register_component(dev, &rz_ssi_soc_component,
rz_ssi_soc_dai,
ARRAY_SIZE(rz_ssi_soc_dai));
if (ret < 0) {
- dev_err(&pdev->dev, "failed to register snd component\n");
- goto err_snd_soc;
+ dev_err(dev, "failed to register snd component\n");
+ goto err_release_dma_chs;
}
return 0;
-err_snd_soc:
- pm_runtime_put(ssi->dev);
-err_pm:
- pm_runtime_disable(ssi->dev);
- reset_control_assert(ssi->rstc);
-err_reset:
+err_release_dma_chs:
rz_ssi_release_dma_channels(ssi);
return ret;
@@ -1185,8 +1219,6 @@ static void rz_ssi_remove(struct platform_device *pdev)
rz_ssi_release_dma_channels(ssi);
- pm_runtime_put(ssi->dev);
- pm_runtime_disable(ssi->dev);
reset_control_assert(ssi->rstc);
}
@@ -1196,10 +1228,30 @@ static const struct of_device_id rz_ssi_of_match[] = {
};
MODULE_DEVICE_TABLE(of, rz_ssi_of_match);
+static int rz_ssi_runtime_suspend(struct device *dev)
+{
+ struct rz_ssi_priv *ssi = dev_get_drvdata(dev);
+
+ return reset_control_assert(ssi->rstc);
+}
+
+static int rz_ssi_runtime_resume(struct device *dev)
+{
+ struct rz_ssi_priv *ssi = dev_get_drvdata(dev);
+
+ return reset_control_deassert(ssi->rstc);
+}
+
+static const struct dev_pm_ops rz_ssi_pm_ops = {
+ RUNTIME_PM_OPS(rz_ssi_runtime_suspend, rz_ssi_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+};
+
static struct platform_driver rz_ssi_driver = {
.driver = {
.name = "rz-ssi-pcm-audio",
.of_match_table = rz_ssi_of_match,
+ .pm = pm_ptr(&rz_ssi_pm_ops),
},
.probe = rz_ssi_probe,
.remove = rz_ssi_remove,
diff --git a/sound/soc/rockchip/rockchip_i2s_tdm.c b/sound/soc/rockchip/rockchip_i2s_tdm.c
index d1f28699652f..bd0dc586e24a 100644
--- a/sound/soc/rockchip/rockchip_i2s_tdm.c
+++ b/sound/soc/rockchip/rockchip_i2s_tdm.c
@@ -514,33 +514,6 @@ static void rockchip_i2s_tdm_xfer_resume(struct snd_pcm_substream *substream,
I2S_XFER_RXS_START);
}
-static int rockchip_i2s_ch_to_io(unsigned int ch, bool substream_capture)
-{
- if (substream_capture) {
- switch (ch) {
- case I2S_CHN_4:
- return I2S_IO_6CH_OUT_4CH_IN;
- case I2S_CHN_6:
- return I2S_IO_4CH_OUT_6CH_IN;
- case I2S_CHN_8:
- return I2S_IO_2CH_OUT_8CH_IN;
- default:
- return I2S_IO_8CH_OUT_2CH_IN;
- }
- } else {
- switch (ch) {
- case I2S_CHN_4:
- return I2S_IO_4CH_OUT_6CH_IN;
- case I2S_CHN_6:
- return I2S_IO_6CH_OUT_4CH_IN;
- case I2S_CHN_8:
- return I2S_IO_8CH_OUT_2CH_IN;
- default:
- return I2S_IO_2CH_OUT_8CH_IN;
- }
- }
-}
-
static int rockchip_i2s_io_multiplex(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
@@ -577,7 +550,6 @@ static int rockchip_i2s_io_multiplex(struct snd_pcm_substream *substream,
return -EINVAL;
}
- rockchip_i2s_ch_to_io(val, true);
} else {
struct snd_pcm_str *capture_str =
&substream->pcm->streams[SNDRV_PCM_STREAM_CAPTURE];
diff --git a/sound/soc/sdca/Makefile b/sound/soc/sdca/Makefile
index c296bd5a0a7c..5d1ddbbfbf62 100644
--- a/sound/soc/sdca/Makefile
+++ b/sound/soc/sdca/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-snd-soc-sdca-objs := sdca_functions.o sdca_device.o
+snd-soc-sdca-y := sdca_functions.o sdca_device.o
obj-$(CONFIG_SND_SOC_SDCA) += snd-soc-sdca.o
diff --git a/sound/soc/sdca/sdca_device.c b/sound/soc/sdca/sdca_device.c
index 80d663777eb5..b6399b773986 100644
--- a/sound/soc/sdca/sdca_device.c
+++ b/sound/soc/sdca/sdca_device.c
@@ -7,6 +7,8 @@
*/
#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/property.h>
#include <linux/soundwire/sdw.h>
#include <sound/sdca.h>
#include <sound/sdca_function.h>
diff --git a/sound/soc/sdca/sdca_functions.c b/sound/soc/sdca/sdca_functions.c
index 652865329968..38071bc838b9 100644
--- a/sound/soc/sdca/sdca_functions.c
+++ b/sound/soc/sdca/sdca_functions.c
@@ -6,86 +6,75 @@
* https://www.mipi.org/mipi-sdca-v1-0-download
*/
+#define dev_fmt(fmt) "%s: " fmt, __func__
+
#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/property.h>
#include <linux/soundwire/sdw.h>
+#include <linux/types.h>
#include <sound/sdca.h>
#include <sound/sdca_function.h>
-static int patch_sdca_function_type(struct device *dev,
- u32 interface_revision,
- u32 *function_type,
- const char **function_name)
+static int patch_sdca_function_type(u32 interface_revision, u32 *function_type)
{
- unsigned long function_type_patch = 0;
-
/*
* Unfortunately early SDCA specifications used different indices for Functions,
* for backwards compatibility we have to reorder the values found
*/
- if (interface_revision >= 0x0801)
- goto skip_early_draft_order;
-
- switch (*function_type) {
- case 1:
- function_type_patch = SDCA_FUNCTION_TYPE_SMART_AMP;
- break;
- case 2:
- function_type_patch = SDCA_FUNCTION_TYPE_SMART_MIC;
- break;
- case 3:
- function_type_patch = SDCA_FUNCTION_TYPE_SPEAKER_MIC;
- break;
- case 4:
- function_type_patch = SDCA_FUNCTION_TYPE_UAJ;
- break;
- case 5:
- function_type_patch = SDCA_FUNCTION_TYPE_RJ;
- break;
- case 6:
- function_type_patch = SDCA_FUNCTION_TYPE_HID;
- break;
- default:
- dev_warn(dev, "%s: SDCA version %#x unsupported function type %d, skipped\n",
- __func__, interface_revision, *function_type);
- return -EINVAL;
+ if (interface_revision < 0x0801) {
+ switch (*function_type) {
+ case 1:
+ *function_type = SDCA_FUNCTION_TYPE_SMART_AMP;
+ break;
+ case 2:
+ *function_type = SDCA_FUNCTION_TYPE_SMART_MIC;
+ break;
+ case 3:
+ *function_type = SDCA_FUNCTION_TYPE_SPEAKER_MIC;
+ break;
+ case 4:
+ *function_type = SDCA_FUNCTION_TYPE_UAJ;
+ break;
+ case 5:
+ *function_type = SDCA_FUNCTION_TYPE_RJ;
+ break;
+ case 6:
+ *function_type = SDCA_FUNCTION_TYPE_HID;
+ break;
+ default:
+ return -EINVAL;
+ }
}
-skip_early_draft_order:
- if (function_type_patch)
- *function_type = function_type_patch;
+ return 0;
+}
- /* now double-check the values */
- switch (*function_type) {
+static const char *get_sdca_function_name(u32 function_type)
+{
+ switch (function_type) {
case SDCA_FUNCTION_TYPE_SMART_AMP:
- *function_name = SDCA_FUNCTION_TYPE_SMART_AMP_NAME;
- break;
+ return SDCA_FUNCTION_TYPE_SMART_AMP_NAME;
case SDCA_FUNCTION_TYPE_SMART_MIC:
- *function_name = SDCA_FUNCTION_TYPE_SMART_MIC_NAME;
- break;
+ return SDCA_FUNCTION_TYPE_SMART_MIC_NAME;
case SDCA_FUNCTION_TYPE_UAJ:
- *function_name = SDCA_FUNCTION_TYPE_UAJ_NAME;
- break;
+ return SDCA_FUNCTION_TYPE_UAJ_NAME;
case SDCA_FUNCTION_TYPE_HID:
- *function_name = SDCA_FUNCTION_TYPE_HID_NAME;
- break;
+ return SDCA_FUNCTION_TYPE_HID_NAME;
case SDCA_FUNCTION_TYPE_SIMPLE_AMP:
+ return SDCA_FUNCTION_TYPE_SIMPLE_AMP_NAME;
case SDCA_FUNCTION_TYPE_SIMPLE_MIC:
+ return SDCA_FUNCTION_TYPE_SIMPLE_MIC_NAME;
case SDCA_FUNCTION_TYPE_SPEAKER_MIC:
+ return SDCA_FUNCTION_TYPE_SPEAKER_MIC_NAME;
case SDCA_FUNCTION_TYPE_RJ:
+ return SDCA_FUNCTION_TYPE_RJ_NAME;
case SDCA_FUNCTION_TYPE_IMP_DEF:
- dev_warn(dev, "%s: found unsupported SDCA function type %d, skipped\n",
- __func__, *function_type);
- return -EINVAL;
+ return SDCA_FUNCTION_TYPE_IMP_DEF_NAME;
default:
- dev_err(dev, "%s: found invalid SDCA function type %d, skipped\n",
- __func__, *function_type);
- return -EINVAL;
+ return NULL;
}
-
- dev_info(dev, "%s: found SDCA function %s (type %d)\n",
- __func__, *function_name, *function_type);
-
- return 0;
}
static int find_sdca_function(struct acpi_device *adev, void *data)
@@ -101,21 +90,16 @@ static int find_sdca_function(struct acpi_device *adev, void *data)
int ret;
if (sdca_data->num_functions >= SDCA_MAX_FUNCTION_COUNT) {
- dev_err(dev, "%s: maximum number of functions exceeded\n", __func__);
+ dev_err(dev, "maximum number of functions exceeded\n");
return -EINVAL;
}
- /*
- * The number of functions cannot exceed 8, we could use
- * acpi_get_local_address() but the value is stored as u64 so
- * we might as well avoid casts and intermediate levels
- */
ret = acpi_get_local_u64_address(adev->handle, &addr);
if (ret < 0)
return ret;
- if (!addr) {
- dev_err(dev, "%s: no addr\n", __func__);
+ if (!addr || addr > 0x7) {
+ dev_err(dev, "invalid addr: 0x%llx\n", addr);
return -ENODEV;
}
@@ -140,15 +124,25 @@ static int find_sdca_function(struct acpi_device *adev, void *data)
fwnode_handle_put(control5);
if (ret < 0) {
- dev_err(dev, "%s: the function type can only be determined from ACPI information\n",
- __func__);
+ dev_err(dev, "function type only supported as DisCo constant\n");
return ret;
}
- ret = patch_sdca_function_type(dev, sdca_data->interface_revision,
- &function_type, &function_name);
- if (ret < 0)
+ ret = patch_sdca_function_type(sdca_data->interface_revision, &function_type);
+ if (ret < 0) {
+ dev_err(dev, "SDCA version %#x invalid function type %d\n",
+ sdca_data->interface_revision, function_type);
return ret;
+ }
+
+ function_name = get_sdca_function_name(function_type);
+ if (!function_name) {
+ dev_err(dev, "invalid SDCA function type %d\n", function_type);
+ return -EINVAL;
+ }
+
+ dev_info(dev, "SDCA function %s (type %d) at 0x%llx\n",
+ function_name, function_type, addr);
/* store results */
func_index = sdca_data->num_functions;
diff --git a/sound/soc/sdw_utils/soc_sdw_cs_amp.c b/sound/soc/sdw_utils/soc_sdw_cs_amp.c
index a0bb626c5cb8..4b6181cf2971 100644
--- a/sound/soc/sdw_utils/soc_sdw_cs_amp.c
+++ b/sound/soc/sdw_utils/soc_sdw_cs_amp.c
@@ -15,6 +15,7 @@
#include <sound/soc_sdw_utils.h>
#define CODEC_NAME_SIZE 8
+#define CS_AMP_CHANNELS_PER_AMP 4
int asoc_sdw_cs_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai)
{
@@ -48,6 +49,51 @@ int asoc_sdw_cs_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai
}
EXPORT_SYMBOL_NS(asoc_sdw_cs_spk_rtd_init, "SND_SOC_SDW_UTILS");
+int asoc_sdw_cs_spk_feedback_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai)
+{
+ const struct snd_soc_dai_link *dai_link = rtd->dai_link;
+ const struct snd_soc_dai_link_ch_map *ch_map;
+ const struct snd_soc_dai_link_component *codec_dlc;
+ struct snd_soc_dai *codec_dai;
+ u8 ch_slot[8] = {};
+ unsigned int amps_per_bus, ch_per_amp, mask;
+ int i, ret;
+
+ WARN_ON(dai_link->num_cpus > ARRAY_SIZE(ch_slot));
+
+ /*
+ * CS35L56 has 4 TX channels. When the capture is aggregated the
+ * same bus slots will be allocated to all the amps on a bus. Only
+ * one amp on that bus can be transmitting in each slot so divide
+ * the available 4 slots between all the amps on a bus.
+ */
+ amps_per_bus = dai_link->num_codecs / dai_link->num_cpus;
+ if ((amps_per_bus == 0) || (amps_per_bus > CS_AMP_CHANNELS_PER_AMP)) {
+ dev_err(rtd->card->dev, "Illegal num_codecs:%u / num_cpus:%u\n",
+ dai_link->num_codecs, dai_link->num_cpus);
+ return -EINVAL;
+ }
+
+ ch_per_amp = CS_AMP_CHANNELS_PER_AMP / amps_per_bus;
+
+ for_each_rtd_ch_maps(rtd, i, ch_map) {
+ codec_dlc = snd_soc_link_to_codec(rtd->dai_link, i);
+ codec_dai = snd_soc_find_dai(codec_dlc);
+ mask = GENMASK(ch_per_amp - 1, 0) << ch_slot[ch_map->cpu];
+
+ ret = snd_soc_dai_set_tdm_slot(codec_dai, 0, mask, 4, 32);
+ if (ret < 0) {
+ dev_err(rtd->card->dev, "Failed to set TDM slot:%d\n", ret);
+ return ret;
+ }
+
+ ch_slot[ch_map->cpu] += ch_per_amp;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_NS(asoc_sdw_cs_spk_feedback_rtd_init, "SND_SOC_SDW_UTILS");
+
int asoc_sdw_cs_amp_init(struct snd_soc_card *card,
struct snd_soc_dai_link *dai_links,
struct asoc_sdw_codec_info *info,
diff --git a/sound/soc/sdw_utils/soc_sdw_utils.c b/sound/soc/sdw_utils/soc_sdw_utils.c
index 937fa3ce59df..6ee7d30b8ece 100644
--- a/sound/soc/sdw_utils/soc_sdw_utils.c
+++ b/sound/soc/sdw_utils/soc_sdw_utils.c
@@ -488,10 +488,10 @@ struct asoc_sdw_codec_info codec_info_list[] = {
.part_id = 0x3556,
.dais = {
{
- .direction = {true, true},
+ .direction = {true, false},
.dai_name = "cs35l56-sdw1",
.dai_type = SOC_SDW_DAI_TYPE_AMP,
- .dailink = {SOC_SDW_AMP_OUT_DAI_ID, SOC_SDW_AMP_IN_DAI_ID},
+ .dailink = {SOC_SDW_AMP_OUT_DAI_ID, SOC_SDW_UNUSED_DAI_ID},
.init = asoc_sdw_cs_amp_init,
.rtd_init = asoc_sdw_cs_spk_rtd_init,
.controls = generic_spk_controls,
@@ -499,8 +499,15 @@ struct asoc_sdw_codec_info codec_info_list[] = {
.widgets = generic_spk_widgets,
.num_widgets = ARRAY_SIZE(generic_spk_widgets),
},
+ {
+ .direction = {false, true},
+ .dai_name = "cs35l56-sdw1c",
+ .dai_type = SOC_SDW_DAI_TYPE_AMP,
+ .dailink = {SOC_SDW_UNUSED_DAI_ID, SOC_SDW_AMP_IN_DAI_ID},
+ .rtd_init = asoc_sdw_cs_spk_feedback_rtd_init,
+ },
},
- .dai_num = 1,
+ .dai_num = 2,
},
{
.part_id = 0x4242,
diff --git a/sound/soc/soc-card.c b/sound/soc/soc-card.c
index 8e9546fe7428..e6eb71b3010a 100644
--- a/sound/soc/soc-card.c
+++ b/sound/soc/soc-card.c
@@ -219,7 +219,7 @@ int snd_soc_card_set_bias_level(struct snd_soc_card *card,
{
int ret = 0;
- if (card && card->set_bias_level)
+ if (card->set_bias_level)
ret = card->set_bias_level(card, dapm, level);
return soc_card_ret(card, ret);
@@ -231,7 +231,7 @@ int snd_soc_card_set_bias_level_post(struct snd_soc_card *card,
{
int ret = 0;
- if (card && card->set_bias_level_post)
+ if (card->set_bias_level_post)
ret = card->set_bias_level_post(card, dapm, level);
return soc_card_ret(card, ret);
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index a1dace4bb616..3c6d8aef4130 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1449,23 +1449,46 @@ int snd_soc_runtime_set_dai_fmt(struct snd_soc_pcm_runtime *rtd,
{
struct snd_soc_dai *cpu_dai;
struct snd_soc_dai *codec_dai;
+ unsigned int ext_fmt;
unsigned int i;
int ret;
if (!dai_fmt)
return 0;
+ /*
+ * dai_fmt has 4 types
+ * 1. SND_SOC_DAIFMT_FORMAT_MASK
+ * 2. SND_SOC_DAIFMT_CLOCK
+ * 3. SND_SOC_DAIFMT_INV
+ * 4. SND_SOC_DAIFMT_CLOCK_PROVIDER
+ *
+ * 4. CLOCK_PROVIDER is set from Codec perspective in dai_fmt. So it will be flipped
+ * when this function calls set_fmt() for CPU (CBx_CFx -> Bx_Cx). see below.
+ * This mean, we can't set CPU/Codec both are clock consumer for example.
+ * New idea handles 4. in each dai->ext_fmt. It can keep compatibility.
+ *
+ * Legacy
+ * dai_fmt includes 1, 2, 3, 4
+ *
+ * New idea
+ * dai_fmt includes 1, 2, 3
+ * ext_fmt includes 4
+ */
for_each_rtd_codec_dais(rtd, i, codec_dai) {
- ret = snd_soc_dai_set_fmt(codec_dai, dai_fmt);
+ ext_fmt = rtd->dai_link->codecs[i].ext_fmt;
+ ret = snd_soc_dai_set_fmt(codec_dai, dai_fmt | ext_fmt);
if (ret != 0 && ret != -ENOTSUPP)
return ret;
}
/* Flip the polarity for the "CPU" end of link */
+ /* Will effect only for 4. SND_SOC_DAIFMT_CLOCK_PROVIDER */
dai_fmt = snd_soc_daifmt_clock_provider_flipped(dai_fmt);
for_each_rtd_cpu_dais(rtd, i, cpu_dai) {
- ret = snd_soc_dai_set_fmt(cpu_dai, dai_fmt);
+ ext_fmt = rtd->dai_link->cpus[i].ext_fmt;
+ ret = snd_soc_dai_set_fmt(cpu_dai, dai_fmt | ext_fmt);
if (ret != 0 && ret != -ENOTSUPP)
return ret;
}
@@ -1644,18 +1667,8 @@ static int soc_probe_component(struct snd_soc_card *card,
ret = snd_soc_dapm_add_routes(dapm,
component->driver->dapm_routes,
component->driver->num_dapm_routes);
- if (ret < 0) {
- if (card->disable_route_checks) {
- dev_info(card->dev,
- "%s: disable_route_checks set, ignoring errors on add_routes\n",
- __func__);
- } else {
- dev_err(card->dev,
- "%s: snd_soc_dapm_add_routes failed: %d\n",
- __func__, ret);
- goto err_probe;
- }
- }
+ if (ret < 0)
+ goto err_probe;
/* see for_each_card_components */
list_add(&component->card_list, &card->component_dev_list);
@@ -2234,18 +2247,8 @@ static int snd_soc_bind_card(struct snd_soc_card *card)
ret = snd_soc_dapm_add_routes(&card->dapm, card->dapm_routes,
card->num_dapm_routes);
- if (ret < 0) {
- if (card->disable_route_checks) {
- dev_info(card->dev,
- "%s: disable_route_checks set, ignoring errors on add_routes\n",
- __func__);
- } else {
- dev_err(card->dev,
- "%s: snd_soc_dapm_add_routes failed: %d\n",
- __func__, ret);
- goto probe_end;
- }
- }
+ if (ret < 0)
+ goto probe_end;
ret = snd_soc_dapm_add_routes(&card->dapm, card->of_dapm_routes,
card->num_of_dapm_routes);
@@ -3389,6 +3392,9 @@ unsigned int snd_soc_daifmt_parse_clock_provider_raw(struct device_node *np,
char prop[128];
unsigned int bit, frame;
+ if (!np)
+ return 0;
+
if (!prefix)
prefix = "";
diff --git a/sound/soc/soc-dai.c b/sound/soc/soc-dai.c
index 34ba1a93a4c9..ca0308f6d41c 100644
--- a/sound/soc/soc-dai.c
+++ b/sound/soc/soc-dai.c
@@ -360,6 +360,22 @@ int snd_soc_dai_set_tristate(struct snd_soc_dai *dai, int tristate)
}
EXPORT_SYMBOL_GPL(snd_soc_dai_set_tristate);
+int snd_soc_dai_prepare(struct snd_soc_dai *dai,
+ struct snd_pcm_substream *substream)
+{
+ int ret = 0;
+
+ if (!snd_soc_dai_stream_valid(dai, substream->stream))
+ return 0;
+
+ if (dai->driver->ops &&
+ dai->driver->ops->prepare)
+ ret = dai->driver->ops->prepare(substream, dai);
+
+ return soc_dai_ret(dai, ret);
+}
+EXPORT_SYMBOL_GPL(snd_soc_dai_prepare);
+
/**
* snd_soc_dai_digital_mute - configure DAI system or master clock.
* @dai: DAI
@@ -577,14 +593,9 @@ int snd_soc_pcm_dai_prepare(struct snd_pcm_substream *substream)
int i, ret;
for_each_rtd_dais(rtd, i, dai) {
- if (!snd_soc_dai_stream_valid(dai, substream->stream))
- continue;
- if (dai->driver->ops &&
- dai->driver->ops->prepare) {
- ret = dai->driver->ops->prepare(substream, dai);
- if (ret < 0)
- return soc_dai_ret(dai, ret);
- }
+ ret = snd_soc_dai_prepare(dai, substream);
+ if (ret < 0)
+ return ret;
}
return 0;
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 99521c784a9b..b5116b700d73 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -730,7 +730,7 @@ static int snd_soc_dapm_set_bias_level(struct snd_soc_dapm_context *dapm,
if (ret != 0)
goto out;
- if (!card || dapm != &card->dapm)
+ if (dapm != &card->dapm)
ret = snd_soc_dapm_force_bias_level(dapm, level);
if (ret != 0)
@@ -4013,6 +4013,18 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
break;
case SND_SOC_DAPM_POST_PMU:
+ snd_soc_dapm_widget_for_each_source_path(w, path) {
+ source = path->source->priv;
+
+ snd_soc_dai_prepare(source, substream);
+ }
+
+ snd_soc_dapm_widget_for_each_sink_path(w, path) {
+ sink = path->sink->priv;
+
+ snd_soc_dai_prepare(sink, substream);
+ }
+
snd_soc_dapm_widget_for_each_sink_path(w, path) {
sink = path->sink->priv;
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 1150455619aa..88b3ad5a2552 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -38,7 +38,6 @@ static inline int _soc_pcm_ret(struct snd_soc_pcm_runtime *rtd,
switch (ret) {
case -EPROBE_DEFER:
case -ENOTSUPP:
- case -EINVAL:
break;
default:
dev_err(rtd->dev,
@@ -986,7 +985,13 @@ static int __soc_pcm_prepare(struct snd_soc_pcm_runtime *rtd,
}
out:
- return soc_pcm_ret(rtd, ret);
+ /*
+ * Don't use soc_pcm_ret() on .prepare callback to lower error log severity
+ *
+ * We don't want to log an error since we do not want to give userspace a way to do a
+ * denial-of-service attack on the syslog / diskspace.
+ */
+ return ret;
}
/* PCM prepare ops for non-DPCM streams */
@@ -998,6 +1003,13 @@ static int soc_pcm_prepare(struct snd_pcm_substream *substream)
snd_soc_dpcm_mutex_lock(rtd);
ret = __soc_pcm_prepare(rtd, substream);
snd_soc_dpcm_mutex_unlock(rtd);
+
+ /*
+ * Don't use soc_pcm_ret() on .prepare callback to lower error log severity
+ *
+ * We don't want to log an error since we do not want to give userspace a way to do a
+ * denial-of-service attack on the syslog / diskspace.
+ */
return ret;
}
@@ -2539,7 +2551,13 @@ int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream)
be->dpcm[stream].state = SND_SOC_DPCM_STATE_PREPARE;
}
- return soc_pcm_ret(fe, ret);
+ /*
+ * Don't use soc_pcm_ret() on .prepare callback to lower error log severity
+ *
+ * We don't want to log an error since we do not want to give userspace a way to do a
+ * denial-of-service attack on the syslog / diskspace.
+ */
+ return ret;
}
static int dpcm_fe_dai_prepare(struct snd_pcm_substream *substream)
@@ -2579,7 +2597,13 @@ out:
dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
snd_soc_dpcm_mutex_unlock(fe);
- return soc_pcm_ret(fe, ret);
+ /*
+ * Don't use soc_pcm_ret() on .prepare callback to lower error log severity
+ *
+ * We don't want to log an error since we do not want to give userspace a way to do a
+ * denial-of-service attack on the syslog / diskspace.
+ */
+ return ret;
}
static int dpcm_run_update_shutdown(struct snd_soc_pcm_runtime *fe, int stream)
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 43003d2d3666..9f4da061eff9 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -1101,14 +1101,8 @@ static int soc_tplg_dapm_graph_elems_load(struct soc_tplg *tplg,
}
ret = snd_soc_dapm_add_routes(dapm, route, 1);
- if (ret) {
- if (!dapm->card->disable_route_checks) {
- dev_err(tplg->dev, "ASoC: dapm_add_routes failed: %d\n", ret);
- break;
- }
- dev_info(tplg->dev,
- "ASoC: disable_route_checks set, ignoring dapm_add_routes errors\n");
- }
+ if (ret)
+ break;
}
return ret;
diff --git a/sound/soc/sof/intel/atom.c b/sound/soc/sof/intel/atom.c
index 30e981c558c6..0d364bcdcfa9 100644
--- a/sound/soc/sof/intel/atom.c
+++ b/sound/soc/sof/intel/atom.c
@@ -78,20 +78,20 @@ void atom_dump(struct snd_sof_dev *sdev, u32 flags)
imrd = snd_sof_dsp_read64(sdev, DSP_BAR, SHIM_IMRD);
dev_err(sdev->dev,
"error: ipc host -> DSP: pending %s complete %s raw 0x%llx\n",
- (panic & SHIM_IPCX_BUSY) ? "yes" : "no",
- (panic & SHIM_IPCX_DONE) ? "yes" : "no", panic);
+ str_yes_no(panic & SHIM_IPCX_BUSY),
+ str_yes_no(panic & SHIM_IPCX_DONE), panic);
dev_err(sdev->dev,
"error: mask host: pending %s complete %s raw 0x%llx\n",
- (imrx & SHIM_IMRX_BUSY) ? "yes" : "no",
- (imrx & SHIM_IMRX_DONE) ? "yes" : "no", imrx);
+ str_yes_no(imrx & SHIM_IMRX_BUSY),
+ str_yes_no(imrx & SHIM_IMRX_DONE), imrx);
dev_err(sdev->dev,
"error: ipc DSP -> host: pending %s complete %s raw 0x%llx\n",
- (status & SHIM_IPCD_BUSY) ? "yes" : "no",
- (status & SHIM_IPCD_DONE) ? "yes" : "no", status);
+ str_yes_no(status & SHIM_IPCD_BUSY),
+ str_yes_no(status & SHIM_IPCD_DONE), status);
dev_err(sdev->dev,
"error: mask DSP: pending %s complete %s raw 0x%llx\n",
- (imrd & SHIM_IMRD_BUSY) ? "yes" : "no",
- (imrd & SHIM_IMRD_DONE) ? "yes" : "no", imrd);
+ str_yes_no(imrd & SHIM_IMRD_BUSY),
+ str_yes_no(imrd & SHIM_IMRD_DONE), imrd);
}
EXPORT_SYMBOL_NS(atom_dump, "SND_SOC_SOF_INTEL_ATOM_HIFI_EP");
diff --git a/sound/soc/sof/intel/bdw.c b/sound/soc/sof/intel/bdw.c
index c4d92f3508b6..5282c0071534 100644
--- a/sound/soc/sof/intel/bdw.c
+++ b/sound/soc/sof/intel/bdw.c
@@ -266,20 +266,20 @@ static void bdw_dump(struct snd_sof_dev *sdev, u32 flags)
imrd = snd_sof_dsp_read(sdev, BDW_DSP_BAR, SHIM_IMRD);
dev_err(sdev->dev,
"error: ipc host -> DSP: pending %s complete %s raw 0x%8.8x\n",
- (panic & SHIM_IPCX_BUSY) ? "yes" : "no",
- (panic & SHIM_IPCX_DONE) ? "yes" : "no", panic);
+ str_yes_no(panic & SHIM_IPCX_BUSY),
+ str_yes_no(panic & SHIM_IPCX_DONE), panic);
dev_err(sdev->dev,
"error: mask host: pending %s complete %s raw 0x%8.8x\n",
- (imrx & SHIM_IMRX_BUSY) ? "yes" : "no",
- (imrx & SHIM_IMRX_DONE) ? "yes" : "no", imrx);
+ str_yes_no(imrx & SHIM_IMRX_BUSY),
+ str_yes_no(imrx & SHIM_IMRX_DONE), imrx);
dev_err(sdev->dev,
"error: ipc DSP -> host: pending %s complete %s raw 0x%8.8x\n",
- (status & SHIM_IPCD_BUSY) ? "yes" : "no",
- (status & SHIM_IPCD_DONE) ? "yes" : "no", status);
+ str_yes_no(status & SHIM_IPCD_BUSY),
+ str_yes_no(status & SHIM_IPCD_DONE), status);
dev_err(sdev->dev,
"error: mask DSP: pending %s complete %s raw 0x%8.8x\n",
- (imrd & SHIM_IMRD_BUSY) ? "yes" : "no",
- (imrd & SHIM_IMRD_DONE) ? "yes" : "no", imrd);
+ str_yes_no(imrd & SHIM_IMRD_BUSY),
+ str_yes_no(imrd & SHIM_IMRD_DONE), imrd);
}
/*
diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c
index 0db2a3e554fb..da12aabc1bb8 100644
--- a/sound/soc/sof/intel/hda-dai.c
+++ b/sound/soc/sof/intel/hda-dai.c
@@ -503,6 +503,12 @@ int sdw_hda_dai_hw_params(struct snd_pcm_substream *substream,
int ret;
int i;
+ if (!w) {
+ dev_err(cpu_dai->dev, "%s widget not found, check amp link num in the topology\n",
+ cpu_dai->name);
+ return -EINVAL;
+ }
+
ops = hda_dai_get_ops(substream, cpu_dai);
if (!ops) {
dev_err(cpu_dai->dev, "DAI widget ops not set\n");
@@ -582,6 +588,12 @@ int sdw_hda_dai_hw_params(struct snd_pcm_substream *substream,
*/
for_each_rtd_cpu_dais(rtd, i, dai) {
w = snd_soc_dai_get_widget(dai, substream->stream);
+ if (!w) {
+ dev_err(cpu_dai->dev,
+ "%s widget not found, check amp link num in the topology\n",
+ dai->name);
+ return -EINVAL;
+ }
ipc4_copier = widget_to_copier(w);
memcpy(&ipc4_copier->dma_config_tlv[cpu_dai_id], dma_config_tlv,
sizeof(*dma_config_tlv));
diff --git a/sound/soc/sof/intel/hda-pcm.c b/sound/soc/sof/intel/hda-pcm.c
index 5b5e484f9acf..1dd8d2092c3b 100644
--- a/sound/soc/sof/intel/hda-pcm.c
+++ b/sound/soc/sof/intel/hda-pcm.c
@@ -37,6 +37,11 @@ static bool hda_disable_rewinds;
module_param_named(disable_rewinds, hda_disable_rewinds, bool, 0444);
MODULE_PARM_DESC(disable_rewinds, "SOF HDA disable rewinds");
+static int hda_force_pause_support = -1;
+module_param_named(force_pause_support, hda_force_pause_support, int, 0444);
+MODULE_PARM_DESC(force_pause_support,
+ "Pause support: -1: Use default, 0: Disable, 1: Enable (default -1)");
+
u32 hda_dsp_get_mult_div(struct snd_sof_dev *sdev, int rate)
{
switch (rate) {
@@ -240,6 +245,16 @@ int hda_dsp_pcm_open(struct snd_sof_dev *sdev,
if (hda_always_enable_dmi_l1 && direction == SNDRV_PCM_STREAM_CAPTURE)
runtime->hw.info &= ~SNDRV_PCM_INFO_PAUSE;
+ /*
+ * Do not advertise the PAUSE support if it is forced to be disabled via
+ * module parameter or if the pause_supported is false for the PCM
+ * device
+ */
+ if (hda_force_pause_support == 0 ||
+ (hda_force_pause_support == -1 &&
+ !spcm->stream[substream->stream].pause_supported))
+ runtime->hw.info &= ~SNDRV_PCM_INFO_PAUSE;
+
if (hda_always_enable_dmi_l1 ||
direction == SNDRV_PCM_STREAM_PLAYBACK ||
spcm->stream[substream->stream].d0i3_compatible)
diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
index f991785f727e..be689f6e10c8 100644
--- a/sound/soc/sof/intel/hda.c
+++ b/sound/soc/sof/intel/hda.c
@@ -63,6 +63,11 @@ static int sdw_params_stream(struct device *dev,
struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(d, params_data->substream->stream);
struct snd_sof_dai_config_data data = { 0 };
+ if (!w) {
+ dev_err(dev, "%s widget not found, check amp link num in the topology\n",
+ d->name);
+ return -EINVAL;
+ }
data.dai_index = (params_data->link_id << 8) | d->id;
data.dai_data = params_data->alh_stream_id;
data.dai_node_id = data.dai_data;
diff --git a/sound/soc/sof/ipc4-topology.c b/sound/soc/sof/ipc4-topology.c
index b55eb977e443..c04c62478827 100644
--- a/sound/soc/sof/ipc4-topology.c
+++ b/sound/soc/sof/ipc4-topology.c
@@ -2827,7 +2827,7 @@ static int sof_ipc4_widget_setup(struct snd_sof_dev *sdev, struct snd_sof_widget
msg->primary |= SOF_IPC4_MOD_INSTANCE(swidget->instance_id);
msg->extension &= ~SOF_IPC4_MOD_EXT_PARAM_SIZE_MASK;
- msg->extension |= ipc_size >> 2;
+ msg->extension |= SOF_IPC4_MOD_EXT_PARAM_SIZE(ipc_size >> 2);
msg->extension &= ~SOF_IPC4_MOD_EXT_PPL_ID_MASK;
msg->extension |= SOF_IPC4_MOD_EXT_PPL_ID(pipe_widget->instance_id);
diff --git a/sound/soc/sof/sof-audio.h b/sound/soc/sof/sof-audio.h
index 01b819dd8498..62f3c11a9216 100644
--- a/sound/soc/sof/sof-audio.h
+++ b/sound/soc/sof/sof-audio.h
@@ -332,6 +332,7 @@ struct snd_sof_pcm_stream {
struct work_struct period_elapsed_work;
struct snd_soc_dapm_widget_list *list; /* list of connected DAPM widgets */
bool d0i3_compatible; /* DSP can be in D0I3 when this pcm is opened */
+ bool pause_supported; /* PCM device supports PAUSE operation */
unsigned int dsp_max_burst_size_in_ms; /* The maximum size of the host DMA burst in ms */
/*
* flag to indicate that the DSP pipelines should be kept
diff --git a/sound/soc/sof/sof-priv.h b/sound/soc/sof/sof-priv.h
index 843be3b6415d..abbb5ee7e08c 100644
--- a/sound/soc/sof/sof-priv.h
+++ b/sound/soc/sof/sof-priv.h
@@ -76,14 +76,6 @@ bool sof_debug_check_flag(int mask);
#define SOF_IPC_DSP_REPLY 0
#define SOF_IPC_HOST_REPLY 1
-/* convenience constructor for DAI driver streams */
-#define SOF_DAI_STREAM(sname, scmin, scmax, srates, sfmt) \
- {.stream_name = sname, .channels_min = scmin, .channels_max = scmax, \
- .rates = srates, .formats = sfmt}
-
-#define SOF_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | \
- SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_FLOAT)
-
/* So far the primary core on all DSPs has ID 0 */
#define SOF_DSP_PRIMARY_CORE 0
diff --git a/sound/soc/sof/topology.c b/sound/soc/sof/topology.c
index b3fca5fd87d6..688cc7ac1714 100644
--- a/sound/soc/sof/topology.c
+++ b/sound/soc/sof/topology.c
@@ -407,6 +407,10 @@ static const struct sof_topology_token stream_tokens[] = {
offsetof(struct snd_sof_pcm, stream[0].d0i3_compatible)},
{SOF_TKN_STREAM_CAPTURE_COMPATIBLE_D0I3, SND_SOC_TPLG_TUPLE_TYPE_BOOL, get_token_u16,
offsetof(struct snd_sof_pcm, stream[1].d0i3_compatible)},
+ {SOF_TKN_STREAM_PLAYBACK_PAUSE_SUPPORTED, SND_SOC_TPLG_TUPLE_TYPE_BOOL, get_token_u16,
+ offsetof(struct snd_sof_pcm, stream[0].pause_supported)},
+ {SOF_TKN_STREAM_CAPTURE_PAUSE_SUPPORTED, SND_SOC_TPLG_TUPLE_TYPE_BOOL, get_token_u16,
+ offsetof(struct snd_sof_pcm, stream[1].pause_supported)},
};
/* Leds */
diff --git a/sound/soc/sunxi/sun4i-codec.c b/sound/soc/sunxi/sun4i-codec.c
index 933a0913237c..886b3fa537d2 100644
--- a/sound/soc/sunxi/sun4i-codec.c
+++ b/sound/soc/sunxi/sun4i-codec.c
@@ -5,6 +5,7 @@
* Copyright 2015 Maxime Ripard <maxime.ripard@free-electrons.com>
* Copyright 2015 Adam Sampson <ats@offog.org>
* Copyright 2016 Chen-Yu Tsai <wens@csie.org>
+ * Copyright 2018 Mesih Kilinc <mesihkilinc@gmail.com>
*
* Based on the Allwinner SDK driver, released under the GPL.
*/
@@ -265,6 +266,64 @@
/* TODO H3 DAP (Digital Audio Processing) bits */
+#define SUN4I_DMA_MAX_BURST (8)
+
+/* suniv specific registers */
+
+#define SUNIV_DMA_MAX_BURST (4)
+
+/* Codec DAC digital controls and FIFO registers */
+#define SUNIV_CODEC_ADC_FIFOC (0x10)
+#define SUNIV_CODEC_ADC_FIFOC_EN_AD (28)
+#define SUNIV_CODEC_ADC_FIFOS (0x14)
+#define SUNIV_CODEC_ADC_RXDATA (0x18)
+
+/* Output mixer and gain controls */
+#define SUNIV_CODEC_OM_DACA_CTRL (0x20)
+#define SUNIV_CODEC_OM_DACA_CTRL_DACAREN (31)
+#define SUNIV_CODEC_OM_DACA_CTRL_DACALEN (30)
+#define SUNIV_CODEC_OM_DACA_CTRL_RMIXEN (29)
+#define SUNIV_CODEC_OM_DACA_CTRL_LMIXEN (28)
+#define SUNIV_CODEC_OM_DACA_CTRL_RHPPAMUTE (27)
+#define SUNIV_CODEC_OM_DACA_CTRL_LHPPAMUTE (26)
+#define SUNIV_CODEC_OM_DACA_CTRL_RHPIS (25)
+#define SUNIV_CODEC_OM_DACA_CTRL_LHPIS (24)
+#define SUNIV_CODEC_OM_DACA_CTRL_HPCOM_CTL (22)
+#define SUNIV_CODEC_OM_DACA_CTRL_COMPTEN (21)
+#define SUNIV_CODEC_OM_DACA_CTRL_RMIXMUTE_MICIN (20)
+#define SUNIV_CODEC_OM_DACA_CTRL_RMIXMUTE_LINEIN (19)
+#define SUNIV_CODEC_OM_DACA_CTRL_RMIXMUTE_FMIN (18)
+#define SUNIV_CODEC_OM_DACA_CTRL_RMIXMUTE_RDAC (17)
+#define SUNIV_CODEC_OM_DACA_CTRL_RMIXMUTE_LDAC (16)
+#define SUNIV_CODEC_OM_DACA_CTRL_HPPAEN (15)
+#define SUNIV_CODEC_OM_DACA_CTRL_LMIXMUTE_MICIN (12)
+#define SUNIV_CODEC_OM_DACA_CTRL_LMIXMUTE_LINEIN (11)
+#define SUNIV_CODEC_OM_DACA_CTRL_LMIXMUTE_FMIN (10)
+#define SUNIV_CODEC_OM_DACA_CTRL_LMIXMUTE_LDAC (9)
+#define SUNIV_CODEC_OM_DACA_CTRL_LMIXMUTE_RDAC (8)
+#define SUNIV_CODEC_OM_DACA_CTRL_LTLNMUTE (7)
+#define SUNIV_CODEC_OM_DACA_CTRL_RTLNMUTE (6)
+#define SUNIV_CODEC_OM_DACA_CTRL_HPVOL (0)
+
+/* Analog Input Mixer controls */
+#define SUNIV_CODEC_ADC_ACTL (0x24)
+#define SUNIV_CODEC_ADC_ADCEN (31)
+#define SUNIV_CODEC_ADC_MICG (24)
+#define SUNIV_CODEC_ADC_LINEINVOL (21)
+#define SUNIV_CODEC_ADC_ADCG (16)
+#define SUNIV_CODEC_ADC_ADCMIX_MIC (13)
+#define SUNIV_CODEC_ADC_ADCMIX_FMINL (12)
+#define SUNIV_CODEC_ADC_ADCMIX_FMINR (11)
+#define SUNIV_CODEC_ADC_ADCMIX_LINEIN (10)
+#define SUNIV_CODEC_ADC_ADCMIX_LOUT (9)
+#define SUNIV_CODEC_ADC_ADCMIX_ROUT (8)
+#define SUNIV_CODEC_ADC_PASPEEDSELECT (7)
+#define SUNIV_CODEC_ADC_FMINVOL (4)
+#define SUNIV_CODEC_ADC_MICAMPEN (3)
+#define SUNIV_CODEC_ADC_MICBOOST (0)
+
+#define SUNIV_CODEC_ADC_DBG (0x4c)
+
struct sun4i_codec {
struct device *dev;
struct regmap *regmap;
@@ -1255,6 +1314,228 @@ static const struct snd_soc_component_driver sun8i_a23_codec_codec = {
.endianness = 1,
};
+/*suniv F1C100s codec */
+
+/* headphone controls */
+static const char * const suniv_codec_hp_src_enum_text[] = {
+ "DAC", "Mixer",
+};
+
+static SOC_ENUM_DOUBLE_DECL(suniv_codec_hp_src_enum,
+ SUNIV_CODEC_OM_DACA_CTRL,
+ SUNIV_CODEC_OM_DACA_CTRL_LHPIS,
+ SUNIV_CODEC_OM_DACA_CTRL_RHPIS,
+ suniv_codec_hp_src_enum_text);
+
+static const struct snd_kcontrol_new suniv_codec_hp_src[] = {
+ SOC_DAPM_ENUM("Headphone Source Playback Route",
+ suniv_codec_hp_src_enum),
+};
+
+/* mixer controls */
+static const struct snd_kcontrol_new suniv_codec_adc_mixer_controls[] = {
+ SOC_DAPM_SINGLE("Right Out Capture Switch", SUNIV_CODEC_ADC_ACTL,
+ SUNIV_CODEC_ADC_ADCMIX_ROUT, 1, 0),
+ SOC_DAPM_SINGLE("Left Out Capture Switch", SUNIV_CODEC_ADC_ACTL,
+ SUNIV_CODEC_ADC_ADCMIX_LOUT, 1, 0),
+ SOC_DAPM_SINGLE("Line In Capture Switch", SUNIV_CODEC_ADC_ACTL,
+ SUNIV_CODEC_ADC_ADCMIX_LINEIN, 1, 0),
+ SOC_DAPM_SINGLE("Right FM In Capture Switch", SUNIV_CODEC_ADC_ACTL,
+ SUNIV_CODEC_ADC_ADCMIX_FMINR, 1, 0),
+ SOC_DAPM_SINGLE("Left FM In Capture Switch", SUNIV_CODEC_ADC_ACTL,
+ SUNIV_CODEC_ADC_ADCMIX_FMINL, 1, 0),
+ SOC_DAPM_SINGLE("Mic Capture Switch", SUNIV_CODEC_ADC_ACTL,
+ SUNIV_CODEC_ADC_ADCMIX_MIC, 1, 0),
+};
+
+static const struct snd_kcontrol_new suniv_codec_dac_lmixer_controls[] = {
+ SOC_DAPM_SINGLE("Right DAC Playback Switch", SUNIV_CODEC_OM_DACA_CTRL,
+ SUNIV_CODEC_OM_DACA_CTRL_LMIXMUTE_RDAC, 1, 0),
+ SOC_DAPM_SINGLE("Left DAC Playback Switch", SUNIV_CODEC_OM_DACA_CTRL,
+ SUNIV_CODEC_OM_DACA_CTRL_LMIXMUTE_LDAC, 1, 0),
+ SOC_DAPM_SINGLE("FM In Playback Switch", SUNIV_CODEC_OM_DACA_CTRL,
+ SUNIV_CODEC_OM_DACA_CTRL_LMIXMUTE_FMIN, 1, 0),
+ SOC_DAPM_SINGLE("Line In Playback Switch", SUNIV_CODEC_OM_DACA_CTRL,
+ SUNIV_CODEC_OM_DACA_CTRL_LMIXMUTE_LINEIN, 1, 0),
+ SOC_DAPM_SINGLE("Mic In Playback Switch", SUNIV_CODEC_OM_DACA_CTRL,
+ SUNIV_CODEC_OM_DACA_CTRL_LMIXMUTE_MICIN, 1, 0),
+};
+
+static const struct snd_kcontrol_new suniv_codec_dac_rmixer_controls[] = {
+ SOC_DAPM_SINGLE("Left DAC Playback Switch", SUNIV_CODEC_OM_DACA_CTRL,
+ SUNIV_CODEC_OM_DACA_CTRL_RMIXMUTE_LDAC, 1, 0),
+ SOC_DAPM_SINGLE("Right DAC Playback Switch", SUNIV_CODEC_OM_DACA_CTRL,
+ SUNIV_CODEC_OM_DACA_CTRL_RMIXMUTE_RDAC, 1, 0),
+ SOC_DAPM_SINGLE("FM In Playback Switch", SUNIV_CODEC_OM_DACA_CTRL,
+ SUNIV_CODEC_OM_DACA_CTRL_RMIXMUTE_FMIN, 1, 0),
+ SOC_DAPM_SINGLE("Line In Playback Switch", SUNIV_CODEC_OM_DACA_CTRL,
+ SUNIV_CODEC_OM_DACA_CTRL_RMIXMUTE_LINEIN, 1, 0),
+ SOC_DAPM_SINGLE("Mic In Playback Switch", SUNIV_CODEC_OM_DACA_CTRL,
+ SUNIV_CODEC_OM_DACA_CTRL_RMIXMUTE_MICIN, 1, 0),
+};
+
+static const DECLARE_TLV_DB_SCALE(suniv_codec_dvol_scale, -7308, 116, 0);
+static const DECLARE_TLV_DB_SCALE(suniv_codec_hp_vol_scale, -6300, 100, 1);
+static const DECLARE_TLV_DB_SCALE(suniv_codec_out_mixer_pregain_scale,
+ -450, 150, 0);
+
+static const DECLARE_TLV_DB_RANGE(suniv_codec_mic_gain_scale,
+ 0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
+ 1, 7, TLV_DB_SCALE_ITEM(2400, 300, 0),
+);
+
+static const struct snd_kcontrol_new suniv_codec_codec_widgets[] = {
+ SOC_SINGLE_TLV("DAC Playback Volume", SUN4I_CODEC_DAC_DPC,
+ SUN4I_CODEC_DAC_DPC_DVOL, 0x3f, 1,
+ suniv_codec_dvol_scale),
+ SOC_SINGLE_TLV("Headphone Playback Volume",
+ SUNIV_CODEC_OM_DACA_CTRL,
+ SUNIV_CODEC_OM_DACA_CTRL_HPVOL, 0x3f, 0,
+ suniv_codec_hp_vol_scale),
+ SOC_DOUBLE("Headphone Playback Switch",
+ SUNIV_CODEC_OM_DACA_CTRL,
+ SUNIV_CODEC_OM_DACA_CTRL_LHPPAMUTE,
+ SUNIV_CODEC_OM_DACA_CTRL_RHPPAMUTE, 1, 0),
+ SOC_SINGLE_TLV("Line In Playback Volume",
+ SUNIV_CODEC_ADC_ACTL, SUNIV_CODEC_ADC_LINEINVOL,
+ 0x7, 0, suniv_codec_out_mixer_pregain_scale),
+ SOC_SINGLE_TLV("FM In Playback Volume",
+ SUNIV_CODEC_ADC_ACTL, SUNIV_CODEC_ADC_FMINVOL,
+ 0x7, 0, suniv_codec_out_mixer_pregain_scale),
+ SOC_SINGLE_TLV("Mic In Playback Volume",
+ SUNIV_CODEC_ADC_ACTL, SUNIV_CODEC_ADC_MICG,
+ 0x7, 0, suniv_codec_out_mixer_pregain_scale),
+
+ /* Microphone Amp boost gains */
+ SOC_SINGLE_TLV("Mic Boost Volume", SUNIV_CODEC_ADC_ACTL,
+ SUNIV_CODEC_ADC_MICBOOST, 0x7, 0,
+ suniv_codec_mic_gain_scale),
+ SOC_SINGLE_TLV("ADC Capture Volume",
+ SUNIV_CODEC_ADC_ACTL, SUNIV_CODEC_ADC_ADCG,
+ 0x7, 0, suniv_codec_out_mixer_pregain_scale),
+};
+
+static const struct snd_soc_dapm_widget suniv_codec_codec_dapm_widgets[] = {
+ /* Microphone inputs */
+ SND_SOC_DAPM_INPUT("MIC"),
+
+ /* Microphone Bias */
+ /* deleted: HBIAS, MBIAS */
+
+ /* Mic input path */
+ SND_SOC_DAPM_PGA("Mic Amplifier", SUNIV_CODEC_ADC_ACTL,
+ SUNIV_CODEC_ADC_MICAMPEN, 0, NULL, 0),
+
+ /* Line In */
+ SND_SOC_DAPM_INPUT("LINEIN"),
+
+ /* FM In */
+ SND_SOC_DAPM_INPUT("FMINR"),
+ SND_SOC_DAPM_INPUT("FMINL"),
+
+ /* Digital parts of the ADCs */
+ SND_SOC_DAPM_SUPPLY("ADC Enable", SUNIV_CODEC_ADC_FIFOC,
+ SUNIV_CODEC_ADC_FIFOC_EN_AD, 0,
+ NULL, 0),
+
+ /* Analog parts of the ADCs */
+ SND_SOC_DAPM_ADC("ADC", "Codec Capture", SUNIV_CODEC_ADC_ACTL,
+ SUNIV_CODEC_ADC_ADCEN, 0),
+
+ /* ADC Mixers */
+ SOC_MIXER_ARRAY("ADC Mixer", SUNIV_CODEC_ADC_ACTL,
+ SND_SOC_NOPM, 0,
+ suniv_codec_adc_mixer_controls),
+
+ /* Digital parts of the DACs */
+ SND_SOC_DAPM_SUPPLY("DAC Enable", SUN4I_CODEC_DAC_DPC,
+ SUN4I_CODEC_DAC_DPC_EN_DA, 0,
+ NULL, 0),
+
+ /* Analog parts of the DACs */
+ SND_SOC_DAPM_DAC("Left DAC", "Codec Playback",
+ SUNIV_CODEC_OM_DACA_CTRL,
+ SUNIV_CODEC_OM_DACA_CTRL_DACALEN, 0),
+ SND_SOC_DAPM_DAC("Right DAC", "Codec Playback",
+ SUNIV_CODEC_OM_DACA_CTRL,
+ SUNIV_CODEC_OM_DACA_CTRL_DACAREN, 0),
+
+ /* Mixers */
+ SOC_MIXER_ARRAY("Left Mixer", SUNIV_CODEC_OM_DACA_CTRL,
+ SUNIV_CODEC_OM_DACA_CTRL_LMIXEN, 0,
+ suniv_codec_dac_lmixer_controls),
+ SOC_MIXER_ARRAY("Right Mixer", SUNIV_CODEC_OM_DACA_CTRL,
+ SUNIV_CODEC_OM_DACA_CTRL_RMIXEN, 0,
+ suniv_codec_dac_rmixer_controls),
+
+ /* Headphone output path */
+ SND_SOC_DAPM_MUX("Headphone Source Playback Route",
+ SND_SOC_NOPM, 0, 0, suniv_codec_hp_src),
+ SND_SOC_DAPM_OUT_DRV("Headphone Amp", SUNIV_CODEC_OM_DACA_CTRL,
+ SUNIV_CODEC_OM_DACA_CTRL_HPPAEN, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("HPCOM Protection", SUNIV_CODEC_OM_DACA_CTRL,
+ SUNIV_CODEC_OM_DACA_CTRL_COMPTEN, 0, NULL, 0),
+ SND_SOC_DAPM_REG(snd_soc_dapm_supply, "HPCOM", SUNIV_CODEC_OM_DACA_CTRL,
+ SUNIV_CODEC_OM_DACA_CTRL_HPCOM_CTL, 0x3, 0x3, 0),
+ SND_SOC_DAPM_OUTPUT("HP"),
+};
+
+static const struct snd_soc_dapm_route suniv_codec_codec_dapm_routes[] = {
+ /* DAC Routes */
+ { "Left DAC", NULL, "DAC Enable" },
+ { "Right DAC", NULL, "DAC Enable" },
+
+ /* Microphone Routes */
+ { "Mic Amplifier", NULL, "MIC"},
+
+ /* Left Mixer Routes */
+ { "Left Mixer", "Right DAC Playback Switch", "Right DAC" },
+ { "Left Mixer", "Left DAC Playback Switch", "Left DAC" },
+ { "Left Mixer", "FM In Playback Switch", "FMINL" },
+ { "Left Mixer", "Line In Playback Switch", "LINEIN" },
+ { "Left Mixer", "Mic In Playback Switch", "Mic Amplifier" },
+
+ /* Right Mixer Routes */
+ { "Right Mixer", "Left DAC Playback Switch", "Left DAC" },
+ { "Right Mixer", "Right DAC Playback Switch", "Right DAC" },
+ { "Right Mixer", "FM In Playback Switch", "FMINR" },
+ { "Right Mixer", "Line In Playback Switch", "LINEIN" },
+ { "Right Mixer", "Mic In Playback Switch", "Mic Amplifier" },
+
+ /* ADC Mixer Routes */
+ { "ADC Mixer", "Right Out Capture Switch", "Right Mixer" },
+ { "ADC Mixer", "Left Out Capture Switch", "Left Mixer" },
+ { "ADC Mixer", "Line In Capture Switch", "LINEIN" },
+ { "ADC Mixer", "Right FM In Capture Switch", "FMINR" },
+ { "ADC Mixer", "Left FM In Capture Switch", "FMINL" },
+ { "ADC Mixer", "Mic Capture Switch", "Mic Amplifier" },
+
+ /* Headphone Routes */
+ { "Headphone Source Playback Route", "DAC", "Left DAC" },
+ { "Headphone Source Playback Route", "DAC", "Right DAC" },
+ { "Headphone Source Playback Route", "Mixer", "Left Mixer" },
+ { "Headphone Source Playback Route", "Mixer", "Right Mixer" },
+ { "Headphone Amp", NULL, "Headphone Source Playback Route" },
+ { "HP", NULL, "Headphone Amp" },
+ { "HPCOM", NULL, "HPCOM Protection" },
+
+ /* ADC Routes */
+ { "ADC", NULL, "ADC Mixer" },
+ { "ADC", NULL, "ADC Enable" },
+};
+
+static const struct snd_soc_component_driver suniv_codec_codec = {
+ .controls = suniv_codec_codec_widgets,
+ .num_controls = ARRAY_SIZE(suniv_codec_codec_widgets),
+ .dapm_widgets = suniv_codec_codec_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(suniv_codec_codec_dapm_widgets),
+ .dapm_routes = suniv_codec_codec_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(suniv_codec_codec_dapm_routes),
+ .idle_bias_on = 1,
+ .use_pmdown_time = 1,
+ .endianness = 1,
+};
+
static const struct snd_soc_component_driver sun4i_codec_component = {
.name = "sun4i-codec",
.legacy_dai_naming = 1,
@@ -1701,6 +1982,56 @@ static struct snd_soc_card *sun50i_h616_codec_create_card(struct device *dev)
return card;
};
+static const struct snd_soc_dapm_widget suniv_codec_card_dapm_widgets[] = {
+ SND_SOC_DAPM_HP("Headphone", NULL),
+ SND_SOC_DAPM_LINE("Line In", NULL),
+ SND_SOC_DAPM_LINE("Right FM In", NULL),
+ SND_SOC_DAPM_LINE("Left FM In", NULL),
+ SND_SOC_DAPM_MIC("Mic", NULL),
+ SND_SOC_DAPM_SPK("Speaker", sun4i_codec_spk_event),
+};
+
+/* Connect digital side enables to analog side widgets */
+static const struct snd_soc_dapm_route suniv_codec_card_routes[] = {
+ /* ADC Routes */
+ { "ADC", NULL, "ADC Enable" },
+ { "Codec Capture", NULL, "ADC" },
+
+ /* DAC Routes */
+ { "Left DAC", NULL, "DAC Enable" },
+ { "Right DAC", NULL, "DAC Enable" },
+ { "Left DAC", NULL, "Codec Playback" },
+ { "Right DAC", NULL, "Codec Playback" },
+};
+
+static struct snd_soc_card *suniv_codec_create_card(struct device *dev)
+{
+ struct snd_soc_card *card;
+ int ret;
+
+ card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL);
+ if (!card)
+ return ERR_PTR(-ENOMEM);
+
+ card->dai_link = sun4i_codec_create_link(dev, &card->num_links);
+ if (!card->dai_link)
+ return ERR_PTR(-ENOMEM);
+
+ card->dev = dev;
+ card->name = "F1C100s Audio Codec";
+ card->dapm_widgets = suniv_codec_card_dapm_widgets;
+ card->num_dapm_widgets = ARRAY_SIZE(suniv_codec_card_dapm_widgets);
+ card->dapm_routes = suniv_codec_card_routes;
+ card->num_dapm_routes = ARRAY_SIZE(suniv_codec_card_routes);
+ card->fully_routed = true;
+
+ ret = snd_soc_of_parse_audio_routing(card, "allwinner,audio-routing");
+ if (ret)
+ dev_warn(dev, "failed to parse audio-routing: %d\n", ret);
+
+ return card;
+};
+
static const struct regmap_config sun4i_codec_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -1751,6 +2082,13 @@ static const struct regmap_config sun50i_h616_codec_regmap_config = {
.cache_type = REGCACHE_NONE,
};
+static const struct regmap_config suniv_codec_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = SUNIV_CODEC_ADC_DBG,
+};
+
struct sun4i_codec_quirks {
const struct regmap_config *regmap_config;
const struct snd_soc_component_driver *codec;
@@ -1761,6 +2099,7 @@ struct sun4i_codec_quirks {
unsigned int reg_adc_rxdata; /* RX FIFO offset for DMA config */
bool has_reset;
bool playback_only;
+ u32 dma_max_burst;
};
static const struct sun4i_codec_quirks sun4i_codec_quirks = {
@@ -1771,6 +2110,7 @@ static const struct sun4i_codec_quirks sun4i_codec_quirks = {
.reg_dac_fifoc = REG_FIELD(SUN4I_CODEC_DAC_FIFOC, 0, 31),
.reg_dac_txdata = SUN4I_CODEC_DAC_TXDATA,
.reg_adc_rxdata = SUN4I_CODEC_ADC_RXDATA,
+ .dma_max_burst = SUN4I_DMA_MAX_BURST,
};
static const struct sun4i_codec_quirks sun6i_a31_codec_quirks = {
@@ -1782,6 +2122,7 @@ static const struct sun4i_codec_quirks sun6i_a31_codec_quirks = {
.reg_dac_txdata = SUN4I_CODEC_DAC_TXDATA,
.reg_adc_rxdata = SUN6I_CODEC_ADC_RXDATA,
.has_reset = true,
+ .dma_max_burst = SUN4I_DMA_MAX_BURST,
};
static const struct sun4i_codec_quirks sun7i_codec_quirks = {
@@ -1792,6 +2133,7 @@ static const struct sun4i_codec_quirks sun7i_codec_quirks = {
.reg_dac_fifoc = REG_FIELD(SUN4I_CODEC_DAC_FIFOC, 0, 31),
.reg_dac_txdata = SUN4I_CODEC_DAC_TXDATA,
.reg_adc_rxdata = SUN4I_CODEC_ADC_RXDATA,
+ .dma_max_burst = SUN4I_DMA_MAX_BURST,
};
static const struct sun4i_codec_quirks sun8i_a23_codec_quirks = {
@@ -1803,6 +2145,7 @@ static const struct sun4i_codec_quirks sun8i_a23_codec_quirks = {
.reg_dac_txdata = SUN4I_CODEC_DAC_TXDATA,
.reg_adc_rxdata = SUN6I_CODEC_ADC_RXDATA,
.has_reset = true,
+ .dma_max_burst = SUN4I_DMA_MAX_BURST,
};
static const struct sun4i_codec_quirks sun8i_h3_codec_quirks = {
@@ -1819,6 +2162,7 @@ static const struct sun4i_codec_quirks sun8i_h3_codec_quirks = {
.reg_dac_txdata = SUN8I_H3_CODEC_DAC_TXDATA,
.reg_adc_rxdata = SUN6I_CODEC_ADC_RXDATA,
.has_reset = true,
+ .dma_max_burst = SUN4I_DMA_MAX_BURST,
};
static const struct sun4i_codec_quirks sun8i_v3s_codec_quirks = {
@@ -1834,6 +2178,7 @@ static const struct sun4i_codec_quirks sun8i_v3s_codec_quirks = {
.reg_dac_txdata = SUN8I_H3_CODEC_DAC_TXDATA,
.reg_adc_rxdata = SUN6I_CODEC_ADC_RXDATA,
.has_reset = true,
+ .dma_max_burst = SUN4I_DMA_MAX_BURST,
};
static const struct sun4i_codec_quirks sun50i_h616_codec_quirks = {
@@ -1843,6 +2188,19 @@ static const struct sun4i_codec_quirks sun50i_h616_codec_quirks = {
.reg_dac_fifoc = REG_FIELD(SUN50I_H616_CODEC_DAC_FIFOC, 0, 31),
.reg_dac_txdata = SUN8I_H3_CODEC_DAC_TXDATA,
.has_reset = true,
+ .dma_max_burst = SUN4I_DMA_MAX_BURST,
+};
+
+static const struct sun4i_codec_quirks suniv_f1c100s_codec_quirks = {
+ .regmap_config = &suniv_codec_regmap_config,
+ .codec = &suniv_codec_codec,
+ .create_card = suniv_codec_create_card,
+ .reg_adc_fifoc = REG_FIELD(SUNIV_CODEC_ADC_FIFOC, 0, 31),
+ .reg_dac_fifoc = REG_FIELD(SUN4I_CODEC_DAC_FIFOC, 0, 31),
+ .reg_dac_txdata = SUN4I_CODEC_DAC_TXDATA,
+ .reg_adc_rxdata = SUNIV_CODEC_ADC_RXDATA,
+ .has_reset = true,
+ .dma_max_burst = SUNIV_DMA_MAX_BURST,
};
static const struct of_device_id sun4i_codec_of_match[] = {
@@ -1874,6 +2232,10 @@ static const struct of_device_id sun4i_codec_of_match[] = {
.compatible = "allwinner,sun50i-h616-codec",
.data = &sun50i_h616_codec_quirks,
},
+ {
+ .compatible = "allwinner,suniv-f1c100s-codec",
+ .data = &suniv_f1c100s_codec_quirks,
+ },
{}
};
MODULE_DEVICE_TABLE(of, sun4i_codec_of_match);
@@ -1911,7 +2273,7 @@ static int sun4i_codec_probe(struct platform_device *pdev)
}
/* Get the clocks from the DT */
- scodec->clk_apb = devm_clk_get(&pdev->dev, "apb");
+ scodec->clk_apb = devm_clk_get_enabled(&pdev->dev, "apb");
if (IS_ERR(scodec->clk_apb)) {
dev_err(&pdev->dev, "Failed to get the APB clock\n");
return PTR_ERR(scodec->clk_apb);
@@ -1924,8 +2286,7 @@ static int sun4i_codec_probe(struct platform_device *pdev)
}
if (quirks->has_reset) {
- scodec->rst = devm_reset_control_get_exclusive(&pdev->dev,
- NULL);
+ scodec->rst = devm_reset_control_get_exclusive_deasserted(&pdev->dev, NULL);
if (IS_ERR(scodec->rst)) {
dev_err(&pdev->dev, "Failed to get reset control\n");
return PTR_ERR(scodec->rst);
@@ -1961,32 +2322,16 @@ static int sun4i_codec_probe(struct platform_device *pdev)
return ret;
}
- /* Enable the bus clock */
- if (clk_prepare_enable(scodec->clk_apb)) {
- dev_err(&pdev->dev, "Failed to enable the APB clock\n");
- return -EINVAL;
- }
-
- /* Deassert the reset control */
- if (scodec->rst) {
- ret = reset_control_deassert(scodec->rst);
- if (ret) {
- dev_err(&pdev->dev,
- "Failed to deassert the reset control\n");
- goto err_clk_disable;
- }
- }
-
/* DMA configuration for TX FIFO */
scodec->playback_dma_data.addr = res->start + quirks->reg_dac_txdata;
- scodec->playback_dma_data.maxburst = 8;
+ scodec->playback_dma_data.maxburst = quirks->dma_max_burst;
scodec->playback_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
if (!quirks->playback_only) {
/* DMA configuration for RX FIFO */
scodec->capture_dma_data.addr = res->start +
quirks->reg_adc_rxdata;
- scodec->capture_dma_data.maxburst = 8;
+ scodec->capture_dma_data.maxburst = quirks->dma_max_burst;
scodec->capture_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
}
@@ -1994,7 +2339,7 @@ static int sun4i_codec_probe(struct platform_device *pdev)
&sun4i_codec_dai, 1);
if (ret) {
dev_err(&pdev->dev, "Failed to register our codec\n");
- goto err_assert_reset;
+ return ret;
}
ret = devm_snd_soc_register_component(&pdev->dev,
@@ -2002,20 +2347,20 @@ static int sun4i_codec_probe(struct platform_device *pdev)
&dummy_cpu_dai, 1);
if (ret) {
dev_err(&pdev->dev, "Failed to register our DAI\n");
- goto err_assert_reset;
+ return ret;
}
ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
if (ret) {
dev_err(&pdev->dev, "Failed to register against DMAEngine\n");
- goto err_assert_reset;
+ return ret;
}
card = quirks->create_card(&pdev->dev);
if (IS_ERR(card)) {
ret = PTR_ERR(card);
dev_err(&pdev->dev, "Failed to create our card\n");
- goto err_assert_reset;
+ return ret;
}
snd_soc_card_set_drvdata(card, scodec);
@@ -2023,28 +2368,17 @@ static int sun4i_codec_probe(struct platform_device *pdev)
ret = snd_soc_register_card(card);
if (ret) {
dev_err_probe(&pdev->dev, ret, "Failed to register our card\n");
- goto err_assert_reset;
+ return ret;
}
return 0;
-
-err_assert_reset:
- if (scodec->rst)
- reset_control_assert(scodec->rst);
-err_clk_disable:
- clk_disable_unprepare(scodec->clk_apb);
- return ret;
}
static void sun4i_codec_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
- struct sun4i_codec *scodec = snd_soc_card_get_drvdata(card);
snd_soc_unregister_card(card);
- if (scodec->rst)
- reset_control_assert(scodec->rst);
- clk_disable_unprepare(scodec->clk_apb);
}
static struct platform_driver sun4i_codec_driver = {
@@ -2063,4 +2397,5 @@ MODULE_AUTHOR("Jon Smirl <jonsmirl@gmail.com>");
MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>");
MODULE_AUTHOR("Ryan Walklin <ryan@testtoast.com");
+MODULE_AUTHOR("Mesih Kilinc <mesikilinc@gmail.com>");
MODULE_LICENSE("GPL");
diff --git a/sound/soc/sunxi/sun4i-spdif.c b/sound/soc/sunxi/sun4i-spdif.c
index 0aa416423246..41caf1795d09 100644
--- a/sound/soc/sunxi/sun4i-spdif.c
+++ b/sound/soc/sunxi/sun4i-spdif.c
@@ -176,6 +176,7 @@ struct sun4i_spdif_quirks {
unsigned int reg_dac_txdata;
bool has_reset;
unsigned int val_fctl_ftx;
+ unsigned int mclk_multiplier;
};
struct sun4i_spdif_dev {
@@ -201,6 +202,10 @@ static void sun4i_spdif_configure(struct sun4i_spdif_dev *host)
regmap_update_bits(host->regmap, SUN4I_SPDIF_FCTL,
quirks->val_fctl_ftx, quirks->val_fctl_ftx);
+ /* Valid data at the MSB of TXFIFO Register */
+ regmap_update_bits(host->regmap, SUN4I_SPDIF_FCTL,
+ SUN4I_SPDIF_FCTL_TXIM, 0);
+
/* clear TX counter */
regmap_write(host->regmap, SUN4I_SPDIF_TXCNT, 0);
}
@@ -282,14 +287,17 @@ static int sun4i_spdif_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
+ host->dma_params_tx.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
fmt |= SUN4I_SPDIF_TXCFG_FMT16BIT;
+ host->dma_params_tx.addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
break;
case SNDRV_PCM_FORMAT_S20_3LE:
fmt |= SUN4I_SPDIF_TXCFG_FMT20BIT;
break;
case SNDRV_PCM_FORMAT_S24_LE:
+ case SNDRV_PCM_FORMAT_S32_LE:
fmt |= SUN4I_SPDIF_TXCFG_FMT24BIT;
break;
default:
@@ -313,6 +321,7 @@ static int sun4i_spdif_hw_params(struct snd_pcm_substream *substream,
default:
return -EINVAL;
}
+ mclk *= host->quirks->mclk_multiplier;
ret = clk_set_rate(host->spdif_clk, mclk);
if (ret < 0) {
@@ -321,9 +330,6 @@ static int sun4i_spdif_hw_params(struct snd_pcm_substream *substream,
return ret;
}
- regmap_update_bits(host->regmap, SUN4I_SPDIF_FCTL,
- SUN4I_SPDIF_FCTL_TXIM, SUN4I_SPDIF_FCTL_TXIM);
-
switch (rate) {
case 22050:
case 24000:
@@ -347,6 +353,7 @@ static int sun4i_spdif_hw_params(struct snd_pcm_substream *substream,
default:
return -EINVAL;
}
+ mclk_div *= host->quirks->mclk_multiplier;
reg_val = 0;
reg_val |= SUN4I_SPDIF_TXCFG_ASS;
@@ -522,9 +529,10 @@ static const struct regmap_config sun4i_spdif_regmap_config = {
#define SUN4I_RATES SNDRV_PCM_RATE_8000_192000
-#define SUN4I_FORMATS (SNDRV_PCM_FORMAT_S16_LE | \
- SNDRV_PCM_FORMAT_S20_3LE | \
- SNDRV_PCM_FORMAT_S24_LE)
+#define SUN4I_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S20_3LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE)
static struct snd_soc_dai_driver sun4i_spdif_dai = {
.playback = {
@@ -540,24 +548,28 @@ static struct snd_soc_dai_driver sun4i_spdif_dai = {
static const struct sun4i_spdif_quirks sun4i_a10_spdif_quirks = {
.reg_dac_txdata = SUN4I_SPDIF_TXFIFO,
.val_fctl_ftx = SUN4I_SPDIF_FCTL_FTX,
+ .mclk_multiplier = 1,
};
static const struct sun4i_spdif_quirks sun6i_a31_spdif_quirks = {
.reg_dac_txdata = SUN4I_SPDIF_TXFIFO,
.val_fctl_ftx = SUN4I_SPDIF_FCTL_FTX,
.has_reset = true,
+ .mclk_multiplier = 1,
};
static const struct sun4i_spdif_quirks sun8i_h3_spdif_quirks = {
.reg_dac_txdata = SUN8I_SPDIF_TXFIFO,
.val_fctl_ftx = SUN4I_SPDIF_FCTL_FTX,
.has_reset = true,
+ .mclk_multiplier = 4,
};
static const struct sun4i_spdif_quirks sun50i_h6_spdif_quirks = {
.reg_dac_txdata = SUN8I_SPDIF_TXFIFO,
.val_fctl_ftx = SUN50I_H6_SPDIF_FCTL_FTX,
.has_reset = true,
+ .mclk_multiplier = 1,
};
static const struct of_device_id sun4i_spdif_of_match[] = {
diff --git a/sound/soc/xilinx/xlnx_spdif.c b/sound/soc/xilinx/xlnx_spdif.c
index 7febb3830dc2..017a64ab9f1e 100644
--- a/sound/soc/xilinx/xlnx_spdif.c
+++ b/sound/soc/xilinx/xlnx_spdif.c
@@ -248,41 +248,35 @@ static int xlnx_spdif_probe(struct platform_device *pdev)
if (!ctx)
return -ENOMEM;
- ctx->axi_clk = devm_clk_get(dev, "s_axi_aclk");
+ ctx->axi_clk = devm_clk_get_enabled(dev, "s_axi_aclk");
if (IS_ERR(ctx->axi_clk)) {
ret = PTR_ERR(ctx->axi_clk);
dev_err(dev, "failed to get s_axi_aclk(%d)\n", ret);
return ret;
}
- ret = clk_prepare_enable(ctx->axi_clk);
- if (ret) {
- dev_err(dev, "failed to enable s_axi_aclk(%d)\n", ret);
- return ret;
- }
ctx->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(ctx->base)) {
- ret = PTR_ERR(ctx->base);
- goto clk_err;
- }
+ if (IS_ERR(ctx->base))
+ return PTR_ERR(ctx->base);
+
ret = of_property_read_u32(node, "xlnx,spdif-mode", &ctx->mode);
if (ret < 0) {
dev_err(dev, "cannot get SPDIF mode\n");
- goto clk_err;
+ return ret;
}
if (ctx->mode) {
dai_drv = &xlnx_spdif_tx_dai;
} else {
ret = platform_get_irq(pdev, 0);
if (ret < 0)
- goto clk_err;
+ return ret;
+
ret = devm_request_irq(dev, ret,
xlnx_spdifrx_irq_handler,
0, "XLNX_SPDIF_RX", ctx);
if (ret) {
dev_err(dev, "spdif rx irq request failed\n");
- ret = -ENODEV;
- goto clk_err;
+ return -ENODEV;
}
init_waitqueue_head(&ctx->chsts_q);
@@ -292,7 +286,7 @@ static int xlnx_spdif_probe(struct platform_device *pdev)
ret = of_property_read_u32(node, "xlnx,aud_clk_i", &ctx->aclk);
if (ret < 0) {
dev_err(dev, "cannot get aud_clk_i value\n");
- goto clk_err;
+ return ret;
}
dev_set_drvdata(dev, ctx);
@@ -301,22 +295,13 @@ static int xlnx_spdif_probe(struct platform_device *pdev)
dai_drv, 1);
if (ret) {
dev_err(dev, "SPDIF component registration failed\n");
- goto clk_err;
+ return ret;
}
writel(XSPDIF_SOFT_RESET_VALUE, ctx->base + XSPDIF_SOFT_RESET_REG);
dev_info(dev, "%s DAI registered\n", dai_drv->name);
-clk_err:
- clk_disable_unprepare(ctx->axi_clk);
- return ret;
-}
-
-static void xlnx_spdif_remove(struct platform_device *pdev)
-{
- struct spdif_dev_data *ctx = dev_get_drvdata(&pdev->dev);
-
- clk_disable_unprepare(ctx->axi_clk);
+ return 0;
}
static struct platform_driver xlnx_spdif_driver = {
@@ -325,7 +310,6 @@ static struct platform_driver xlnx_spdif_driver = {
.of_match_table = xlnx_spdif_of_match,
},
.probe = xlnx_spdif_probe,
- .remove = xlnx_spdif_remove,
};
module_platform_driver(xlnx_spdif_driver);
diff --git a/tools/hv/.gitignore b/tools/hv/.gitignore
new file mode 100644
index 000000000000..0c5bc15d602f
--- /dev/null
+++ b/tools/hv/.gitignore
@@ -0,0 +1,3 @@
+hv_fcopy_uio_daemon
+hv_kvp_daemon
+hv_vss_daemon
diff --git a/tools/hv/hv_fcopy_uio_daemon.c b/tools/hv/hv_fcopy_uio_daemon.c
index 7a00f3066a98..0198321d14a2 100644
--- a/tools/hv/hv_fcopy_uio_daemon.c
+++ b/tools/hv/hv_fcopy_uio_daemon.c
@@ -35,8 +35,6 @@
#define WIN8_SRV_MINOR 1
#define WIN8_SRV_VERSION (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR)
-#define MAX_FOLDER_NAME 15
-#define MAX_PATH_LEN 15
#define FCOPY_UIO "/sys/bus/vmbus/devices/eb765408-105f-49b6-b4aa-c123b64d17d4/uio"
#define FCOPY_VER_COUNT 1
@@ -51,7 +49,7 @@ static const int fw_versions[] = {
#define HV_RING_SIZE 0x4000 /* 16KB ring buffer size */
-unsigned char desc[HV_RING_SIZE];
+static unsigned char desc[HV_RING_SIZE];
static int target_fd;
static char target_fname[PATH_MAX];
@@ -409,8 +407,8 @@ int main(int argc, char *argv[])
struct vmbus_br txbr, rxbr;
void *ring;
uint32_t len = HV_RING_SIZE;
- char uio_name[MAX_FOLDER_NAME] = {0};
- char uio_dev_path[MAX_PATH_LEN] = {0};
+ char uio_name[NAME_MAX] = {0};
+ char uio_dev_path[PATH_MAX] = {0};
static struct option long_options[] = {
{"help", no_argument, 0, 'h' },
@@ -468,8 +466,10 @@ int main(int argc, char *argv[])
*/
ret = pread(fcopy_fd, &tmp, sizeof(int), 0);
if (ret < 0) {
+ if (errno == EINTR || errno == EAGAIN)
+ continue;
syslog(LOG_ERR, "pread failed: %s", strerror(errno));
- continue;
+ goto close;
}
len = HV_RING_SIZE;
diff --git a/tools/hv/hv_get_dns_info.sh b/tools/hv/hv_get_dns_info.sh
index 058c17b46ffc..268521234d4b 100755
--- a/tools/hv/hv_get_dns_info.sh
+++ b/tools/hv/hv_get_dns_info.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/sh
# This example script parses /etc/resolv.conf to retrive DNS information.
# In the interest of keeping the KVP daemon code free of distro specific
@@ -10,4 +10,4 @@
# this script can be based on the Network Manager APIs for retrieving DNS
# entries.
-cat /etc/resolv.conf 2>/dev/null | awk '/^nameserver/ { print $2 }'
+exec awk '/^nameserver/ { print $2 }' /etc/resolv.conf 2>/dev/null
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index ae57bf69ad4a..04ba035d67e9 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -725,7 +725,7 @@ static void kvp_get_ipconfig_info(char *if_name,
* .
*/
- sprintf(cmd, KVP_SCRIPTS_PATH "%s", "hv_get_dns_info");
+ sprintf(cmd, "exec %s %s", KVP_SCRIPTS_PATH "hv_get_dns_info", if_name);
/*
* Execute the command to gather DNS info.
@@ -742,7 +742,7 @@ static void kvp_get_ipconfig_info(char *if_name,
* Enabled: DHCP enabled.
*/
- sprintf(cmd, KVP_SCRIPTS_PATH "%s %s", "hv_get_dhcp_info", if_name);
+ sprintf(cmd, "exec %s %s", KVP_SCRIPTS_PATH "hv_get_dhcp_info", if_name);
file = popen(cmd, "r");
if (file == NULL)
@@ -1606,8 +1606,9 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val)
* invoke the external script to do its magic.
*/
- str_len = snprintf(cmd, sizeof(cmd), KVP_SCRIPTS_PATH "%s %s %s",
- "hv_set_ifconfig", if_filename, nm_filename);
+ str_len = snprintf(cmd, sizeof(cmd), "exec %s %s %s",
+ KVP_SCRIPTS_PATH "hv_set_ifconfig",
+ if_filename, nm_filename);
/*
* This is a little overcautious, but it's necessary to suppress some
* false warnings from gcc 8.0.1.
diff --git a/tools/hv/hv_set_ifconfig.sh b/tools/hv/hv_set_ifconfig.sh
index 440a91b35823..2f8baed2b8f7 100755
--- a/tools/hv/hv_set_ifconfig.sh
+++ b/tools/hv/hv_set_ifconfig.sh
@@ -81,7 +81,7 @@ echo "ONBOOT=yes" >> $1
cp $1 /etc/sysconfig/network-scripts/
-chmod 600 $2
+umask 0177
interface=$(echo $2 | awk -F - '{ print $2 }')
filename="${2##*/}"
diff --git a/tools/include/uapi/linux/stddef.h b/tools/include/uapi/linux/stddef.h
index bb6ea517efb5..c53cde425406 100644
--- a/tools/include/uapi/linux/stddef.h
+++ b/tools/include/uapi/linux/stddef.h
@@ -8,6 +8,13 @@
#define __always_inline __inline__
#endif
+/* Not all C++ standards support type declarations inside an anonymous union */
+#ifndef __cplusplus
+#define __struct_group_tag(TAG) TAG
+#else
+#define __struct_group_tag(TAG)
+#endif
+
/**
* __struct_group() - Create a mirrored named and anonyomous struct
*
@@ -20,14 +27,14 @@
* and size: one anonymous and one named. The former's members can be used
* normally without sub-struct naming, and the latter can be used to
* reason about the start, end, and size of the group of struct members.
- * The named struct can also be explicitly tagged for layer reuse, as well
- * as both having struct attributes appended.
+ * The named struct can also be explicitly tagged for layer reuse (C only),
+ * as well as both having struct attributes appended.
*/
#define __struct_group(TAG, NAME, ATTRS, MEMBERS...) \
union { \
struct { MEMBERS } ATTRS; \
- struct TAG { MEMBERS } ATTRS NAME; \
- }
+ struct __struct_group_tag(TAG) { MEMBERS } ATTRS NAME; \
+ } ATTRS
/**
* __DECLARE_FLEX_ARRAY() - Declare a flexible array usable in a union
diff --git a/tools/net/ynl/lib/ynl.py b/tools/net/ynl/lib/ynl.py
index 01ec01a90e76..eea29359a899 100644
--- a/tools/net/ynl/lib/ynl.py
+++ b/tools/net/ynl/lib/ynl.py
@@ -556,10 +556,10 @@ class YnlFamily(SpecFamily):
if attr["type"] == 'nest':
nl_type |= Netlink.NLA_F_NESTED
attr_payload = b''
- sub_attrs = SpaceAttrs(self.attr_sets[space], value, search_attrs)
+ sub_space = attr['nested-attributes']
+ sub_attrs = SpaceAttrs(self.attr_sets[sub_space], value, search_attrs)
for subname, subvalue in value.items():
- attr_payload += self._add_attr(attr['nested-attributes'],
- subname, subvalue, sub_attrs)
+ attr_payload += self._add_attr(sub_space, subname, subvalue, sub_attrs)
elif attr["type"] == 'flag':
if not value:
# If value is absent or false then skip attribute creation.
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 4ce176ad411f..76060da755b5 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -3820,9 +3820,12 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
break;
case INSN_CONTEXT_SWITCH:
- if (func && (!next_insn || !next_insn->hint)) {
- WARN_INSN(insn, "unsupported instruction in callable function");
- return 1;
+ if (func) {
+ if (!next_insn || !next_insn->hint) {
+ WARN_INSN(insn, "unsupported instruction in callable function");
+ return 1;
+ }
+ break;
}
return 0;
diff --git a/tools/objtool/noreturns.h b/tools/objtool/noreturns.h
index f37614cc2c1b..b2174894f9f7 100644
--- a/tools/objtool/noreturns.h
+++ b/tools/objtool/noreturns.h
@@ -19,6 +19,7 @@ NORETURN(__x64_sys_exit_group)
NORETURN(arch_cpu_idle_dead)
NORETURN(bch2_trans_in_restart_error)
NORETURN(bch2_trans_restart_error)
+NORETURN(bch2_trans_unlocked_error)
NORETURN(cpu_bringup_and_idle)
NORETURN(cpu_startup_entry)
NORETURN(do_exit)
diff --git a/tools/sched_ext/include/scx/common.bpf.h b/tools/sched_ext/include/scx/common.bpf.h
index 2f36b7b6418d..625f5b046776 100644
--- a/tools/sched_ext/include/scx/common.bpf.h
+++ b/tools/sched_ext/include/scx/common.bpf.h
@@ -40,9 +40,9 @@ void scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_fl
void scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) __ksym __weak;
u32 scx_bpf_dispatch_nr_slots(void) __ksym;
void scx_bpf_dispatch_cancel(void) __ksym;
-bool scx_bpf_dsq_move_to_local(u64 dsq_id) __ksym;
-void scx_bpf_dsq_move_set_slice(struct bpf_iter_scx_dsq *it__iter, u64 slice) __ksym;
-void scx_bpf_dsq_move_set_vtime(struct bpf_iter_scx_dsq *it__iter, u64 vtime) __ksym;
+bool scx_bpf_dsq_move_to_local(u64 dsq_id) __ksym __weak;
+void scx_bpf_dsq_move_set_slice(struct bpf_iter_scx_dsq *it__iter, u64 slice) __ksym __weak;
+void scx_bpf_dsq_move_set_vtime(struct bpf_iter_scx_dsq *it__iter, u64 vtime) __ksym __weak;
bool scx_bpf_dsq_move(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
bool scx_bpf_dsq_move_vtime(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
u32 scx_bpf_reenqueue_local(void) __ksym;
diff --git a/tools/sched_ext/scx_central.c b/tools/sched_ext/scx_central.c
index 21deea320bd7..e938156ed0a0 100644
--- a/tools/sched_ext/scx_central.c
+++ b/tools/sched_ext/scx_central.c
@@ -97,7 +97,7 @@ restart:
SCX_BUG_ON(!cpuset, "Failed to allocate cpuset");
CPU_ZERO(cpuset);
CPU_SET(skel->rodata->central_cpu, cpuset);
- SCX_BUG_ON(sched_setaffinity(0, sizeof(cpuset), cpuset),
+ SCX_BUG_ON(sched_setaffinity(0, sizeof(*cpuset), cpuset),
"Failed to affinitize to central CPU %d (max %d)",
skel->rodata->central_cpu, skel->rodata->nr_cpu_ids - 1);
CPU_FREE(cpuset);
diff --git a/tools/testing/selftests/bpf/prog_tests/socket_helpers.h b/tools/testing/selftests/bpf/prog_tests/socket_helpers.h
new file mode 100644
index 000000000000..1bdfb79ef009
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/socket_helpers.h
@@ -0,0 +1,394 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __SOCKET_HELPERS__
+#define __SOCKET_HELPERS__
+
+#include <linux/vm_sockets.h>
+
+/* include/linux/net.h */
+#define SOCK_TYPE_MASK 0xf
+
+#define IO_TIMEOUT_SEC 30
+#define MAX_STRERR_LEN 256
+
+/* workaround for older vm_sockets.h */
+#ifndef VMADDR_CID_LOCAL
+#define VMADDR_CID_LOCAL 1
+#endif
+
+/* include/linux/cleanup.h */
+#define __get_and_null(p, nullvalue) \
+ ({ \
+ __auto_type __ptr = &(p); \
+ __auto_type __val = *__ptr; \
+ *__ptr = nullvalue; \
+ __val; \
+ })
+
+#define take_fd(fd) __get_and_null(fd, -EBADF)
+
+/* Wrappers that fail the test on error and report it. */
+
+#define _FAIL(errnum, fmt...) \
+ ({ \
+ error_at_line(0, (errnum), __func__, __LINE__, fmt); \
+ CHECK_FAIL(true); \
+ })
+#define FAIL(fmt...) _FAIL(0, fmt)
+#define FAIL_ERRNO(fmt...) _FAIL(errno, fmt)
+#define FAIL_LIBBPF(err, msg) \
+ ({ \
+ char __buf[MAX_STRERR_LEN]; \
+ libbpf_strerror((err), __buf, sizeof(__buf)); \
+ FAIL("%s: %s", (msg), __buf); \
+ })
+
+
+#define xaccept_nonblock(fd, addr, len) \
+ ({ \
+ int __ret = \
+ accept_timeout((fd), (addr), (len), IO_TIMEOUT_SEC); \
+ if (__ret == -1) \
+ FAIL_ERRNO("accept"); \
+ __ret; \
+ })
+
+#define xbind(fd, addr, len) \
+ ({ \
+ int __ret = bind((fd), (addr), (len)); \
+ if (__ret == -1) \
+ FAIL_ERRNO("bind"); \
+ __ret; \
+ })
+
+#define xclose(fd) \
+ ({ \
+ int __ret = close((fd)); \
+ if (__ret == -1) \
+ FAIL_ERRNO("close"); \
+ __ret; \
+ })
+
+#define xconnect(fd, addr, len) \
+ ({ \
+ int __ret = connect((fd), (addr), (len)); \
+ if (__ret == -1) \
+ FAIL_ERRNO("connect"); \
+ __ret; \
+ })
+
+#define xgetsockname(fd, addr, len) \
+ ({ \
+ int __ret = getsockname((fd), (addr), (len)); \
+ if (__ret == -1) \
+ FAIL_ERRNO("getsockname"); \
+ __ret; \
+ })
+
+#define xgetsockopt(fd, level, name, val, len) \
+ ({ \
+ int __ret = getsockopt((fd), (level), (name), (val), (len)); \
+ if (__ret == -1) \
+ FAIL_ERRNO("getsockopt(" #name ")"); \
+ __ret; \
+ })
+
+#define xlisten(fd, backlog) \
+ ({ \
+ int __ret = listen((fd), (backlog)); \
+ if (__ret == -1) \
+ FAIL_ERRNO("listen"); \
+ __ret; \
+ })
+
+#define xsetsockopt(fd, level, name, val, len) \
+ ({ \
+ int __ret = setsockopt((fd), (level), (name), (val), (len)); \
+ if (__ret == -1) \
+ FAIL_ERRNO("setsockopt(" #name ")"); \
+ __ret; \
+ })
+
+#define xsend(fd, buf, len, flags) \
+ ({ \
+ ssize_t __ret = send((fd), (buf), (len), (flags)); \
+ if (__ret == -1) \
+ FAIL_ERRNO("send"); \
+ __ret; \
+ })
+
+#define xrecv_nonblock(fd, buf, len, flags) \
+ ({ \
+ ssize_t __ret = recv_timeout((fd), (buf), (len), (flags), \
+ IO_TIMEOUT_SEC); \
+ if (__ret == -1) \
+ FAIL_ERRNO("recv"); \
+ __ret; \
+ })
+
+#define xsocket(family, sotype, flags) \
+ ({ \
+ int __ret = socket(family, sotype, flags); \
+ if (__ret == -1) \
+ FAIL_ERRNO("socket"); \
+ __ret; \
+ })
+
+static inline void close_fd(int *fd)
+{
+ if (*fd >= 0)
+ xclose(*fd);
+}
+
+#define __close_fd __attribute__((cleanup(close_fd)))
+
+static inline struct sockaddr *sockaddr(struct sockaddr_storage *ss)
+{
+ return (struct sockaddr *)ss;
+}
+
+static inline void init_addr_loopback4(struct sockaddr_storage *ss,
+ socklen_t *len)
+{
+ struct sockaddr_in *addr4 = memset(ss, 0, sizeof(*ss));
+
+ addr4->sin_family = AF_INET;
+ addr4->sin_port = 0;
+ addr4->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+ *len = sizeof(*addr4);
+}
+
+static inline void init_addr_loopback6(struct sockaddr_storage *ss,
+ socklen_t *len)
+{
+ struct sockaddr_in6 *addr6 = memset(ss, 0, sizeof(*ss));
+
+ addr6->sin6_family = AF_INET6;
+ addr6->sin6_port = 0;
+ addr6->sin6_addr = in6addr_loopback;
+ *len = sizeof(*addr6);
+}
+
+static inline void init_addr_loopback_vsock(struct sockaddr_storage *ss,
+ socklen_t *len)
+{
+ struct sockaddr_vm *addr = memset(ss, 0, sizeof(*ss));
+
+ addr->svm_family = AF_VSOCK;
+ addr->svm_port = VMADDR_PORT_ANY;
+ addr->svm_cid = VMADDR_CID_LOCAL;
+ *len = sizeof(*addr);
+}
+
+static inline void init_addr_loopback(int family, struct sockaddr_storage *ss,
+ socklen_t *len)
+{
+ switch (family) {
+ case AF_INET:
+ init_addr_loopback4(ss, len);
+ return;
+ case AF_INET6:
+ init_addr_loopback6(ss, len);
+ return;
+ case AF_VSOCK:
+ init_addr_loopback_vsock(ss, len);
+ return;
+ default:
+ FAIL("unsupported address family %d", family);
+ }
+}
+
+static inline int enable_reuseport(int s, int progfd)
+{
+ int err, one = 1;
+
+ err = xsetsockopt(s, SOL_SOCKET, SO_REUSEPORT, &one, sizeof(one));
+ if (err)
+ return -1;
+ err = xsetsockopt(s, SOL_SOCKET, SO_ATTACH_REUSEPORT_EBPF, &progfd,
+ sizeof(progfd));
+ if (err)
+ return -1;
+
+ return 0;
+}
+
+static inline int socket_loopback_reuseport(int family, int sotype, int progfd)
+{
+ struct sockaddr_storage addr;
+ socklen_t len = 0;
+ int err, s;
+
+ init_addr_loopback(family, &addr, &len);
+
+ s = xsocket(family, sotype, 0);
+ if (s == -1)
+ return -1;
+
+ if (progfd >= 0)
+ enable_reuseport(s, progfd);
+
+ err = xbind(s, sockaddr(&addr), len);
+ if (err)
+ goto close;
+
+ if (sotype & SOCK_DGRAM)
+ return s;
+
+ err = xlisten(s, SOMAXCONN);
+ if (err)
+ goto close;
+
+ return s;
+close:
+ xclose(s);
+ return -1;
+}
+
+static inline int socket_loopback(int family, int sotype)
+{
+ return socket_loopback_reuseport(family, sotype, -1);
+}
+
+static inline int poll_connect(int fd, unsigned int timeout_sec)
+{
+ struct timeval timeout = { .tv_sec = timeout_sec };
+ fd_set wfds;
+ int r, eval;
+ socklen_t esize = sizeof(eval);
+
+ FD_ZERO(&wfds);
+ FD_SET(fd, &wfds);
+
+ r = select(fd + 1, NULL, &wfds, NULL, &timeout);
+ if (r == 0)
+ errno = ETIME;
+ if (r != 1)
+ return -1;
+
+ if (getsockopt(fd, SOL_SOCKET, SO_ERROR, &eval, &esize) < 0)
+ return -1;
+ if (eval != 0) {
+ errno = eval;
+ return -1;
+ }
+
+ return 0;
+}
+
+static inline int poll_read(int fd, unsigned int timeout_sec)
+{
+ struct timeval timeout = { .tv_sec = timeout_sec };
+ fd_set rfds;
+ int r;
+
+ FD_ZERO(&rfds);
+ FD_SET(fd, &rfds);
+
+ r = select(fd + 1, &rfds, NULL, NULL, &timeout);
+ if (r == 0)
+ errno = ETIME;
+
+ return r == 1 ? 0 : -1;
+}
+
+static inline int accept_timeout(int fd, struct sockaddr *addr, socklen_t *len,
+ unsigned int timeout_sec)
+{
+ if (poll_read(fd, timeout_sec))
+ return -1;
+
+ return accept(fd, addr, len);
+}
+
+static inline int recv_timeout(int fd, void *buf, size_t len, int flags,
+ unsigned int timeout_sec)
+{
+ if (poll_read(fd, timeout_sec))
+ return -1;
+
+ return recv(fd, buf, len, flags);
+}
+
+
+static inline int create_pair(int family, int sotype, int *p0, int *p1)
+{
+ __close_fd int s, c = -1, p = -1;
+ struct sockaddr_storage addr;
+ socklen_t len = sizeof(addr);
+ int err;
+
+ s = socket_loopback(family, sotype);
+ if (s < 0)
+ return s;
+
+ err = xgetsockname(s, sockaddr(&addr), &len);
+ if (err)
+ return err;
+
+ c = xsocket(family, sotype, 0);
+ if (c < 0)
+ return c;
+
+ err = connect(c, sockaddr(&addr), len);
+ if (err) {
+ if (errno != EINPROGRESS) {
+ FAIL_ERRNO("connect");
+ return err;
+ }
+
+ err = poll_connect(c, IO_TIMEOUT_SEC);
+ if (err) {
+ FAIL_ERRNO("poll_connect");
+ return err;
+ }
+ }
+
+ switch (sotype & SOCK_TYPE_MASK) {
+ case SOCK_DGRAM:
+ err = xgetsockname(c, sockaddr(&addr), &len);
+ if (err)
+ return err;
+
+ err = xconnect(s, sockaddr(&addr), len);
+ if (err)
+ return err;
+
+ *p0 = take_fd(s);
+ break;
+ case SOCK_STREAM:
+ case SOCK_SEQPACKET:
+ p = xaccept_nonblock(s, NULL, NULL);
+ if (p < 0)
+ return p;
+
+ *p0 = take_fd(p);
+ break;
+ default:
+ FAIL("Unsupported socket type %#x", sotype);
+ return -EOPNOTSUPP;
+ }
+
+ *p1 = take_fd(c);
+ return 0;
+}
+
+static inline int create_socket_pairs(int family, int sotype, int *c0, int *c1,
+ int *p0, int *p1)
+{
+ int err;
+
+ err = create_pair(family, sotype, c0, p0);
+ if (err)
+ return err;
+
+ err = create_pair(family, sotype, c1, p1);
+ if (err) {
+ close(*c0);
+ close(*p0);
+ }
+
+ return err;
+}
+
+#endif // __SOCKET_HELPERS__
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
index 248754296d97..884ad87783d5 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
@@ -12,6 +12,7 @@
#include "test_sockmap_progs_query.skel.h"
#include "test_sockmap_pass_prog.skel.h"
#include "test_sockmap_drop_prog.skel.h"
+#include "test_sockmap_change_tail.skel.h"
#include "bpf_iter_sockmap.skel.h"
#include "sockmap_helpers.h"
@@ -643,6 +644,54 @@ out:
test_sockmap_drop_prog__destroy(drop);
}
+static void test_sockmap_skb_verdict_change_tail(void)
+{
+ struct test_sockmap_change_tail *skel;
+ int err, map, verdict;
+ int c1, p1, sent, recvd;
+ int zero = 0;
+ char buf[2];
+
+ skel = test_sockmap_change_tail__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_and_load"))
+ return;
+ verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
+ map = bpf_map__fd(skel->maps.sock_map_rx);
+
+ err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0);
+ if (!ASSERT_OK(err, "bpf_prog_attach"))
+ goto out;
+ err = create_pair(AF_INET, SOCK_STREAM, &c1, &p1);
+ if (!ASSERT_OK(err, "create_pair()"))
+ goto out;
+ err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST);
+ if (!ASSERT_OK(err, "bpf_map_update_elem(c1)"))
+ goto out_close;
+ sent = xsend(p1, "Tr", 2, 0);
+ ASSERT_EQ(sent, 2, "xsend(p1)");
+ recvd = recv(c1, buf, 2, 0);
+ ASSERT_EQ(recvd, 1, "recv(c1)");
+ ASSERT_EQ(skel->data->change_tail_ret, 0, "change_tail_ret");
+
+ sent = xsend(p1, "G", 1, 0);
+ ASSERT_EQ(sent, 1, "xsend(p1)");
+ recvd = recv(c1, buf, 2, 0);
+ ASSERT_EQ(recvd, 2, "recv(c1)");
+ ASSERT_EQ(skel->data->change_tail_ret, 0, "change_tail_ret");
+
+ sent = xsend(p1, "E", 1, 0);
+ ASSERT_EQ(sent, 1, "xsend(p1)");
+ recvd = recv(c1, buf, 1, 0);
+ ASSERT_EQ(recvd, 1, "recv(c1)");
+ ASSERT_EQ(skel->data->change_tail_ret, -EINVAL, "change_tail_ret");
+
+out_close:
+ close(c1);
+ close(p1);
+out:
+ test_sockmap_change_tail__destroy(skel);
+}
+
static void test_sockmap_skb_verdict_peek_helper(int map)
{
int err, c1, p1, zero = 0, sent, recvd, avail;
@@ -1058,6 +1107,8 @@ void test_sockmap_basic(void)
test_sockmap_skb_verdict_fionread(true);
if (test__start_subtest("sockmap skb_verdict fionread on drop"))
test_sockmap_skb_verdict_fionread(false);
+ if (test__start_subtest("sockmap skb_verdict change tail"))
+ test_sockmap_skb_verdict_change_tail();
if (test__start_subtest("sockmap skb_verdict msg_f_peek"))
test_sockmap_skb_verdict_peek();
if (test__start_subtest("sockmap skb_verdict msg_f_peek with link"))
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h b/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h
index 38e35c72bdaa..3e5571dd578d 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h
+++ b/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h
@@ -1,139 +1,12 @@
#ifndef __SOCKMAP_HELPERS__
#define __SOCKMAP_HELPERS__
-#include <linux/vm_sockets.h>
+#include "socket_helpers.h"
-/* include/linux/net.h */
-#define SOCK_TYPE_MASK 0xf
-
-#define IO_TIMEOUT_SEC 30
-#define MAX_STRERR_LEN 256
#define MAX_TEST_NAME 80
-/* workaround for older vm_sockets.h */
-#ifndef VMADDR_CID_LOCAL
-#define VMADDR_CID_LOCAL 1
-#endif
-
#define __always_unused __attribute__((__unused__))
-/* include/linux/cleanup.h */
-#define __get_and_null(p, nullvalue) \
- ({ \
- __auto_type __ptr = &(p); \
- __auto_type __val = *__ptr; \
- *__ptr = nullvalue; \
- __val; \
- })
-
-#define take_fd(fd) __get_and_null(fd, -EBADF)
-
-#define _FAIL(errnum, fmt...) \
- ({ \
- error_at_line(0, (errnum), __func__, __LINE__, fmt); \
- CHECK_FAIL(true); \
- })
-#define FAIL(fmt...) _FAIL(0, fmt)
-#define FAIL_ERRNO(fmt...) _FAIL(errno, fmt)
-#define FAIL_LIBBPF(err, msg) \
- ({ \
- char __buf[MAX_STRERR_LEN]; \
- libbpf_strerror((err), __buf, sizeof(__buf)); \
- FAIL("%s: %s", (msg), __buf); \
- })
-
-/* Wrappers that fail the test on error and report it. */
-
-#define xaccept_nonblock(fd, addr, len) \
- ({ \
- int __ret = \
- accept_timeout((fd), (addr), (len), IO_TIMEOUT_SEC); \
- if (__ret == -1) \
- FAIL_ERRNO("accept"); \
- __ret; \
- })
-
-#define xbind(fd, addr, len) \
- ({ \
- int __ret = bind((fd), (addr), (len)); \
- if (__ret == -1) \
- FAIL_ERRNO("bind"); \
- __ret; \
- })
-
-#define xclose(fd) \
- ({ \
- int __ret = close((fd)); \
- if (__ret == -1) \
- FAIL_ERRNO("close"); \
- __ret; \
- })
-
-#define xconnect(fd, addr, len) \
- ({ \
- int __ret = connect((fd), (addr), (len)); \
- if (__ret == -1) \
- FAIL_ERRNO("connect"); \
- __ret; \
- })
-
-#define xgetsockname(fd, addr, len) \
- ({ \
- int __ret = getsockname((fd), (addr), (len)); \
- if (__ret == -1) \
- FAIL_ERRNO("getsockname"); \
- __ret; \
- })
-
-#define xgetsockopt(fd, level, name, val, len) \
- ({ \
- int __ret = getsockopt((fd), (level), (name), (val), (len)); \
- if (__ret == -1) \
- FAIL_ERRNO("getsockopt(" #name ")"); \
- __ret; \
- })
-
-#define xlisten(fd, backlog) \
- ({ \
- int __ret = listen((fd), (backlog)); \
- if (__ret == -1) \
- FAIL_ERRNO("listen"); \
- __ret; \
- })
-
-#define xsetsockopt(fd, level, name, val, len) \
- ({ \
- int __ret = setsockopt((fd), (level), (name), (val), (len)); \
- if (__ret == -1) \
- FAIL_ERRNO("setsockopt(" #name ")"); \
- __ret; \
- })
-
-#define xsend(fd, buf, len, flags) \
- ({ \
- ssize_t __ret = send((fd), (buf), (len), (flags)); \
- if (__ret == -1) \
- FAIL_ERRNO("send"); \
- __ret; \
- })
-
-#define xrecv_nonblock(fd, buf, len, flags) \
- ({ \
- ssize_t __ret = recv_timeout((fd), (buf), (len), (flags), \
- IO_TIMEOUT_SEC); \
- if (__ret == -1) \
- FAIL_ERRNO("recv"); \
- __ret; \
- })
-
-#define xsocket(family, sotype, flags) \
- ({ \
- int __ret = socket(family, sotype, flags); \
- if (__ret == -1) \
- FAIL_ERRNO("socket"); \
- __ret; \
- })
-
#define xbpf_map_delete_elem(fd, key) \
({ \
int __ret = bpf_map_delete_elem((fd), (key)); \
@@ -193,130 +66,6 @@
__ret; \
})
-static inline void close_fd(int *fd)
-{
- if (*fd >= 0)
- xclose(*fd);
-}
-
-#define __close_fd __attribute__((cleanup(close_fd)))
-
-static inline int poll_connect(int fd, unsigned int timeout_sec)
-{
- struct timeval timeout = { .tv_sec = timeout_sec };
- fd_set wfds;
- int r, eval;
- socklen_t esize = sizeof(eval);
-
- FD_ZERO(&wfds);
- FD_SET(fd, &wfds);
-
- r = select(fd + 1, NULL, &wfds, NULL, &timeout);
- if (r == 0)
- errno = ETIME;
- if (r != 1)
- return -1;
-
- if (getsockopt(fd, SOL_SOCKET, SO_ERROR, &eval, &esize) < 0)
- return -1;
- if (eval != 0) {
- errno = eval;
- return -1;
- }
-
- return 0;
-}
-
-static inline int poll_read(int fd, unsigned int timeout_sec)
-{
- struct timeval timeout = { .tv_sec = timeout_sec };
- fd_set rfds;
- int r;
-
- FD_ZERO(&rfds);
- FD_SET(fd, &rfds);
-
- r = select(fd + 1, &rfds, NULL, NULL, &timeout);
- if (r == 0)
- errno = ETIME;
-
- return r == 1 ? 0 : -1;
-}
-
-static inline int accept_timeout(int fd, struct sockaddr *addr, socklen_t *len,
- unsigned int timeout_sec)
-{
- if (poll_read(fd, timeout_sec))
- return -1;
-
- return accept(fd, addr, len);
-}
-
-static inline int recv_timeout(int fd, void *buf, size_t len, int flags,
- unsigned int timeout_sec)
-{
- if (poll_read(fd, timeout_sec))
- return -1;
-
- return recv(fd, buf, len, flags);
-}
-
-static inline void init_addr_loopback4(struct sockaddr_storage *ss,
- socklen_t *len)
-{
- struct sockaddr_in *addr4 = memset(ss, 0, sizeof(*ss));
-
- addr4->sin_family = AF_INET;
- addr4->sin_port = 0;
- addr4->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
- *len = sizeof(*addr4);
-}
-
-static inline void init_addr_loopback6(struct sockaddr_storage *ss,
- socklen_t *len)
-{
- struct sockaddr_in6 *addr6 = memset(ss, 0, sizeof(*ss));
-
- addr6->sin6_family = AF_INET6;
- addr6->sin6_port = 0;
- addr6->sin6_addr = in6addr_loopback;
- *len = sizeof(*addr6);
-}
-
-static inline void init_addr_loopback_vsock(struct sockaddr_storage *ss,
- socklen_t *len)
-{
- struct sockaddr_vm *addr = memset(ss, 0, sizeof(*ss));
-
- addr->svm_family = AF_VSOCK;
- addr->svm_port = VMADDR_PORT_ANY;
- addr->svm_cid = VMADDR_CID_LOCAL;
- *len = sizeof(*addr);
-}
-
-static inline void init_addr_loopback(int family, struct sockaddr_storage *ss,
- socklen_t *len)
-{
- switch (family) {
- case AF_INET:
- init_addr_loopback4(ss, len);
- return;
- case AF_INET6:
- init_addr_loopback6(ss, len);
- return;
- case AF_VSOCK:
- init_addr_loopback_vsock(ss, len);
- return;
- default:
- FAIL("unsupported address family %d", family);
- }
-}
-
-static inline struct sockaddr *sockaddr(struct sockaddr_storage *ss)
-{
- return (struct sockaddr *)ss;
-}
-
static inline int add_to_sockmap(int sock_mapfd, int fd1, int fd2)
{
u64 value;
@@ -334,136 +83,4 @@ static inline int add_to_sockmap(int sock_mapfd, int fd1, int fd2)
return xbpf_map_update_elem(sock_mapfd, &key, &value, BPF_NOEXIST);
}
-static inline int enable_reuseport(int s, int progfd)
-{
- int err, one = 1;
-
- err = xsetsockopt(s, SOL_SOCKET, SO_REUSEPORT, &one, sizeof(one));
- if (err)
- return -1;
- err = xsetsockopt(s, SOL_SOCKET, SO_ATTACH_REUSEPORT_EBPF, &progfd,
- sizeof(progfd));
- if (err)
- return -1;
-
- return 0;
-}
-
-static inline int socket_loopback_reuseport(int family, int sotype, int progfd)
-{
- struct sockaddr_storage addr;
- socklen_t len = 0;
- int err, s;
-
- init_addr_loopback(family, &addr, &len);
-
- s = xsocket(family, sotype, 0);
- if (s == -1)
- return -1;
-
- if (progfd >= 0)
- enable_reuseport(s, progfd);
-
- err = xbind(s, sockaddr(&addr), len);
- if (err)
- goto close;
-
- if (sotype & SOCK_DGRAM)
- return s;
-
- err = xlisten(s, SOMAXCONN);
- if (err)
- goto close;
-
- return s;
-close:
- xclose(s);
- return -1;
-}
-
-static inline int socket_loopback(int family, int sotype)
-{
- return socket_loopback_reuseport(family, sotype, -1);
-}
-
-static inline int create_pair(int family, int sotype, int *p0, int *p1)
-{
- __close_fd int s, c = -1, p = -1;
- struct sockaddr_storage addr;
- socklen_t len = sizeof(addr);
- int err;
-
- s = socket_loopback(family, sotype);
- if (s < 0)
- return s;
-
- err = xgetsockname(s, sockaddr(&addr), &len);
- if (err)
- return err;
-
- c = xsocket(family, sotype, 0);
- if (c < 0)
- return c;
-
- err = connect(c, sockaddr(&addr), len);
- if (err) {
- if (errno != EINPROGRESS) {
- FAIL_ERRNO("connect");
- return err;
- }
-
- err = poll_connect(c, IO_TIMEOUT_SEC);
- if (err) {
- FAIL_ERRNO("poll_connect");
- return err;
- }
- }
-
- switch (sotype & SOCK_TYPE_MASK) {
- case SOCK_DGRAM:
- err = xgetsockname(c, sockaddr(&addr), &len);
- if (err)
- return err;
-
- err = xconnect(s, sockaddr(&addr), len);
- if (err)
- return err;
-
- *p0 = take_fd(s);
- break;
- case SOCK_STREAM:
- case SOCK_SEQPACKET:
- p = xaccept_nonblock(s, NULL, NULL);
- if (p < 0)
- return p;
-
- *p0 = take_fd(p);
- break;
- default:
- FAIL("Unsupported socket type %#x", sotype);
- return -EOPNOTSUPP;
- }
-
- *p1 = take_fd(c);
- return 0;
-}
-
-static inline int create_socket_pairs(int family, int sotype, int *c0, int *c1,
- int *p0, int *p1)
-{
- int err;
-
- err = create_pair(family, sotype, c0, p0);
- if (err)
- return err;
-
- err = create_pair(family, sotype, c1, p1);
- if (err) {
- close(*c0);
- close(*p0);
- }
-
- return err;
-}
-
#endif // __SOCKMAP_HELPERS__
diff --git a/tools/testing/selftests/bpf/prog_tests/tc_change_tail.c b/tools/testing/selftests/bpf/prog_tests/tc_change_tail.c
new file mode 100644
index 000000000000..74752233e779
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/tc_change_tail.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <error.h>
+#include <test_progs.h>
+#include <linux/pkt_cls.h>
+
+#include "test_tc_change_tail.skel.h"
+#include "socket_helpers.h"
+
+#define LO_IFINDEX 1
+
+void test_tc_change_tail(void)
+{
+ LIBBPF_OPTS(bpf_tcx_opts, tcx_opts);
+ struct test_tc_change_tail *skel = NULL;
+ struct bpf_link *link;
+ int c1, p1;
+ char buf[2];
+ int ret;
+
+ skel = test_tc_change_tail__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_tc_change_tail__open_and_load"))
+ return;
+
+ link = bpf_program__attach_tcx(skel->progs.change_tail, LO_IFINDEX,
+ &tcx_opts);
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach_tcx"))
+ goto destroy;
+
+ skel->links.change_tail = link;
+ ret = create_pair(AF_INET, SOCK_DGRAM, &c1, &p1);
+ if (!ASSERT_OK(ret, "create_pair"))
+ goto destroy;
+
+ ret = xsend(p1, "Tr", 2, 0);
+ ASSERT_EQ(ret, 2, "xsend(p1)");
+ ret = recv(c1, buf, 2, 0);
+ ASSERT_EQ(ret, 2, "recv(c1)");
+ ASSERT_EQ(skel->data->change_tail_ret, 0, "change_tail_ret");
+
+ ret = xsend(p1, "G", 1, 0);
+ ASSERT_EQ(ret, 1, "xsend(p1)");
+ ret = recv(c1, buf, 2, 0);
+ ASSERT_EQ(ret, 1, "recv(c1)");
+ ASSERT_EQ(skel->data->change_tail_ret, 0, "change_tail_ret");
+
+ ret = xsend(p1, "E", 1, 0);
+ ASSERT_EQ(ret, 1, "xsend(p1)");
+ ret = recv(c1, buf, 1, 0);
+ ASSERT_EQ(ret, 1, "recv(c1)");
+ ASSERT_EQ(skel->data->change_tail_ret, -EINVAL, "change_tail_ret");
+
+ ret = xsend(p1, "Z", 1, 0);
+ ASSERT_EQ(ret, 1, "xsend(p1)");
+ ret = recv(c1, buf, 1, 0);
+ ASSERT_EQ(ret, 1, "recv(c1)");
+ ASSERT_EQ(skel->data->change_tail_ret, -EINVAL, "change_tail_ret");
+
+ close(c1);
+ close(p1);
+destroy:
+ test_tc_change_tail__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_change_tail.c b/tools/testing/selftests/bpf/progs/test_sockmap_change_tail.c
new file mode 100644
index 000000000000..2796dd8545eb
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_change_tail.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 ByteDance */
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, int);
+} sock_map_rx SEC(".maps");
+
+long change_tail_ret = 1;
+
+SEC("sk_skb")
+int prog_skb_verdict(struct __sk_buff *skb)
+{
+ char *data, *data_end;
+
+ bpf_skb_pull_data(skb, 1);
+ data = (char *)(unsigned long)skb->data;
+ data_end = (char *)(unsigned long)skb->data_end;
+
+ if (data + 1 > data_end)
+ return SK_PASS;
+
+ if (data[0] == 'T') { /* Trim the packet */
+ change_tail_ret = bpf_skb_change_tail(skb, skb->len - 1, 0);
+ return SK_PASS;
+ } else if (data[0] == 'G') { /* Grow the packet */
+ change_tail_ret = bpf_skb_change_tail(skb, skb->len + 1, 0);
+ return SK_PASS;
+ } else if (data[0] == 'E') { /* Error */
+ change_tail_ret = bpf_skb_change_tail(skb, 65535, 0);
+ return SK_PASS;
+ }
+ return SK_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_tc_change_tail.c b/tools/testing/selftests/bpf/progs/test_tc_change_tail.c
new file mode 100644
index 000000000000..28edafe803f0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_tc_change_tail.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <linux/if_ether.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/pkt_cls.h>
+
+long change_tail_ret = 1;
+
+static __always_inline struct iphdr *parse_ip_header(struct __sk_buff *skb, int *ip_proto)
+{
+ void *data_end = (void *)(long)skb->data_end;
+ void *data = (void *)(long)skb->data;
+ struct ethhdr *eth = data;
+ struct iphdr *iph;
+
+ /* Verify Ethernet header */
+ if ((void *)(data + sizeof(*eth)) > data_end)
+ return NULL;
+
+ /* Skip Ethernet header to get to IP header */
+ iph = (void *)(data + sizeof(struct ethhdr));
+
+ /* Verify IP header */
+ if ((void *)(data + sizeof(struct ethhdr) + sizeof(*iph)) > data_end)
+ return NULL;
+
+ /* Basic IP header validation */
+ if (iph->version != 4) /* Only support IPv4 */
+ return NULL;
+
+ if (iph->ihl < 5) /* Minimum IP header length */
+ return NULL;
+
+ *ip_proto = iph->protocol;
+ return iph;
+}
+
+static __always_inline struct udphdr *parse_udp_header(struct __sk_buff *skb, struct iphdr *iph)
+{
+ void *data_end = (void *)(long)skb->data_end;
+ void *hdr = (void *)iph;
+ struct udphdr *udp;
+
+ /* Calculate UDP header position */
+ udp = hdr + (iph->ihl * 4);
+ hdr = (void *)udp;
+
+ /* Verify UDP header bounds */
+ if ((void *)(hdr + sizeof(*udp)) > data_end)
+ return NULL;
+
+ return udp;
+}
+
+SEC("tc/ingress")
+int change_tail(struct __sk_buff *skb)
+{
+ int len = skb->len;
+ struct udphdr *udp;
+ struct iphdr *iph;
+ void *data_end;
+ char *payload;
+ int ip_proto;
+
+ bpf_skb_pull_data(skb, len);
+
+ data_end = (void *)(long)skb->data_end;
+ iph = parse_ip_header(skb, &ip_proto);
+ if (!iph)
+ return TCX_PASS;
+
+ if (ip_proto != IPPROTO_UDP)
+ return TCX_PASS;
+
+ udp = parse_udp_header(skb, iph);
+ if (!udp)
+ return TCX_PASS;
+
+ payload = (char *)udp + (sizeof(struct udphdr));
+ if (payload + 1 > (char *)data_end)
+ return TCX_PASS;
+
+ if (payload[0] == 'T') { /* Trim the packet */
+ change_tail_ret = bpf_skb_change_tail(skb, len - 1, 0);
+ if (!change_tail_ret)
+ bpf_skb_change_tail(skb, len, 0);
+ return TCX_PASS;
+ } else if (payload[0] == 'G') { /* Grow the packet */
+ change_tail_ret = bpf_skb_change_tail(skb, len + 1, 0);
+ if (!change_tail_ret)
+ bpf_skb_change_tail(skb, len, 0);
+ return TCX_PASS;
+ } else if (payload[0] == 'E') { /* Error */
+ change_tail_ret = bpf_skb_change_tail(skb, 65535, 0);
+ return TCX_PASS;
+ } else if (payload[0] == 'Z') { /* Zero */
+ change_tail_ret = bpf_skb_change_tail(skb, 0, 0);
+ return TCX_PASS;
+ }
+ return TCX_DROP;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/sdt.h b/tools/testing/selftests/bpf/sdt.h
index ca0162b4dc57..1fcfa5160231 100644
--- a/tools/testing/selftests/bpf/sdt.h
+++ b/tools/testing/selftests/bpf/sdt.h
@@ -102,6 +102,8 @@
# define STAP_SDT_ARG_CONSTRAINT nZr
# elif defined __arm__
# define STAP_SDT_ARG_CONSTRAINT g
+# elif defined __loongarch__
+# define STAP_SDT_ARG_CONSTRAINT nmr
# else
# define STAP_SDT_ARG_CONSTRAINT nor
# endif
diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c
index 2d742fdac6b9..81943c6254e6 100644
--- a/tools/testing/selftests/bpf/trace_helpers.c
+++ b/tools/testing/selftests/bpf/trace_helpers.c
@@ -293,6 +293,10 @@ static int procmap_query(int fd, const void *addr, __u32 query_flags, size_t *st
return 0;
}
#else
+# ifndef PROCMAP_QUERY_VMA_EXECUTABLE
+# define PROCMAP_QUERY_VMA_EXECUTABLE 0x04
+# endif
+
static int procmap_query(int fd, const void *addr, __u32 query_flags, size_t *start, size_t *offset, int *flags)
{
return -EOPNOTSUPP;
diff --git a/tools/testing/selftests/drivers/net/queues.py b/tools/testing/selftests/drivers/net/queues.py
index 30f29096e27c..38303da957ee 100755
--- a/tools/testing/selftests/drivers/net/queues.py
+++ b/tools/testing/selftests/drivers/net/queues.py
@@ -1,32 +1,37 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0
-from lib.py import ksft_run, ksft_exit, ksft_eq, KsftSkipEx
-from lib.py import EthtoolFamily, NetdevFamily
+from lib.py import ksft_disruptive, ksft_exit, ksft_run
+from lib.py import ksft_eq, ksft_raises, KsftSkipEx
+from lib.py import EthtoolFamily, NetdevFamily, NlError
from lib.py import NetDrvEnv
-from lib.py import cmd
+from lib.py import cmd, defer, ip
+import errno
import glob
-def sys_get_queues(ifname) -> int:
- folders = glob.glob(f'/sys/class/net/{ifname}/queues/rx-*')
+def sys_get_queues(ifname, qtype='rx') -> int:
+ folders = glob.glob(f'/sys/class/net/{ifname}/queues/{qtype}-*')
return len(folders)
-def nl_get_queues(cfg, nl):
+def nl_get_queues(cfg, nl, qtype='rx'):
queues = nl.queue_get({'ifindex': cfg.ifindex}, dump=True)
if queues:
- return len([q for q in queues if q['type'] == 'rx'])
+ return len([q for q in queues if q['type'] == qtype])
return None
def get_queues(cfg, nl) -> None:
- queues = nl_get_queues(cfg, nl)
- if not queues:
- raise KsftSkipEx('queue-get not supported by device')
+ snl = NetdevFamily(recv_size=4096)
- expected = sys_get_queues(cfg.dev['ifname'])
- ksft_eq(queues, expected)
+ for qtype in ['rx', 'tx']:
+ queues = nl_get_queues(cfg, snl, qtype)
+ if not queues:
+ raise KsftSkipEx('queue-get not supported by device')
+
+ expected = sys_get_queues(cfg.dev['ifname'], qtype)
+ ksft_eq(queues, expected)
def addremove_queues(cfg, nl) -> None:
@@ -56,9 +61,27 @@ def addremove_queues(cfg, nl) -> None:
ksft_eq(queues, expected)
+@ksft_disruptive
+def check_down(cfg, nl) -> None:
+ # Check the NAPI IDs before interface goes down and hides them
+ napis = nl.napi_get({'ifindex': cfg.ifindex}, dump=True)
+
+ ip(f"link set dev {cfg.dev['ifname']} down")
+ defer(ip, f"link set dev {cfg.dev['ifname']} up")
+
+ with ksft_raises(NlError) as cm:
+ nl.queue_get({'ifindex': cfg.ifindex, 'id': 0, 'type': 'rx'})
+ ksft_eq(cm.exception.nl_msg.error, -errno.ENOENT)
+
+ if napis:
+ with ksft_raises(NlError) as cm:
+ nl.napi_get({'id': napis[0]['id']})
+ ksft_eq(cm.exception.nl_msg.error, -errno.ENOENT)
+
+
def main() -> None:
- with NetDrvEnv(__file__, queue_count=3) as cfg:
- ksft_run([get_queues, addremove_queues], args=(cfg, NetdevFamily()))
+ with NetDrvEnv(__file__, queue_count=100) as cfg:
+ ksft_run([get_queues, addremove_queues, check_down], args=(cfg, NetdevFamily()))
ksft_exit()
diff --git a/tools/testing/selftests/drivers/net/stats.py b/tools/testing/selftests/drivers/net/stats.py
index 63e3c045a3b2..031ac9def6c0 100755
--- a/tools/testing/selftests/drivers/net/stats.py
+++ b/tools/testing/selftests/drivers/net/stats.py
@@ -110,6 +110,23 @@ def qstat_by_ifindex(cfg) -> None:
ksft_ge(triple[1][key], triple[0][key], comment="bad key: " + key)
ksft_ge(triple[2][key], triple[1][key], comment="bad key: " + key)
+ # Sanity check the dumps
+ queues = NetdevFamily(recv_size=4096).qstats_get({"scope": "queue"}, dump=True)
+ # Reformat the output into {ifindex: {rx: [id, id, ...], tx: [id, id, ...]}}
+ parsed = {}
+ for entry in queues:
+ ifindex = entry["ifindex"]
+ if ifindex not in parsed:
+ parsed[ifindex] = {"rx":[], "tx": []}
+ parsed[ifindex][entry["queue-type"]].append(entry['queue-id'])
+ # Now, validate
+ for ifindex, queues in parsed.items():
+ for qtype in ['rx', 'tx']:
+ ksft_eq(len(queues[qtype]), len(set(queues[qtype])),
+ comment="repeated queue keys")
+ ksft_eq(len(queues[qtype]), max(queues[qtype]) + 1,
+ comment="missing queue keys")
+
# Test invalid dumps
# 0 is invalid
with ksft_raises(NlError) as cm:
@@ -158,7 +175,7 @@ def check_down(cfg) -> None:
def main() -> None:
- with NetDrvEnv(__file__) as cfg:
+ with NetDrvEnv(__file__, queue_count=100) as cfg:
ksft_run([check_pause, check_fec, pkt_byte_sum, qstat_by_ifindex,
check_down],
args=(cfg, ))
diff --git a/tools/testing/selftests/memfd/memfd_test.c b/tools/testing/selftests/memfd/memfd_test.c
index 95af2d78fd31..c0c53451a16d 100644
--- a/tools/testing/selftests/memfd/memfd_test.c
+++ b/tools/testing/selftests/memfd/memfd_test.c
@@ -9,6 +9,7 @@
#include <fcntl.h>
#include <linux/memfd.h>
#include <sched.h>
+#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <signal.h>
@@ -281,6 +282,24 @@ static void *mfd_assert_mmap_shared(int fd)
return p;
}
+static void *mfd_assert_mmap_read_shared(int fd)
+{
+ void *p;
+
+ p = mmap(NULL,
+ mfd_def_size,
+ PROT_READ,
+ MAP_SHARED,
+ fd,
+ 0);
+ if (p == MAP_FAILED) {
+ printf("mmap() failed: %m\n");
+ abort();
+ }
+
+ return p;
+}
+
static void *mfd_assert_mmap_private(int fd)
{
void *p;
@@ -979,6 +998,30 @@ static void test_seal_future_write(void)
close(fd);
}
+static void test_seal_write_map_read_shared(void)
+{
+ int fd;
+ void *p;
+
+ printf("%s SEAL-WRITE-MAP-READ\n", memfd_str);
+
+ fd = mfd_assert_new("kern_memfd_seal_write_map_read",
+ mfd_def_size,
+ MFD_CLOEXEC | MFD_ALLOW_SEALING);
+
+ mfd_assert_add_seals(fd, F_SEAL_WRITE);
+ mfd_assert_has_seals(fd, F_SEAL_WRITE);
+
+ p = mfd_assert_mmap_read_shared(fd);
+
+ mfd_assert_read(fd);
+ mfd_assert_read_shared(fd);
+ mfd_fail_write(fd);
+
+ munmap(p, mfd_def_size);
+ close(fd);
+}
+
/*
* Test SEAL_SHRINK
* Test whether SEAL_SHRINK actually prevents shrinking
@@ -1557,6 +1600,11 @@ static void test_share_fork(char *banner, char *b_suffix)
close(fd);
}
+static bool pid_ns_supported(void)
+{
+ return access("/proc/self/ns/pid", F_OK) == 0;
+}
+
int main(int argc, char **argv)
{
pid_t pid;
@@ -1587,12 +1635,17 @@ int main(int argc, char **argv)
test_seal_write();
test_seal_future_write();
+ test_seal_write_map_read_shared();
test_seal_shrink();
test_seal_grow();
test_seal_resize();
- test_sysctl_simple();
- test_sysctl_nested();
+ if (pid_ns_supported()) {
+ test_sysctl_simple();
+ test_sysctl_nested();
+ } else {
+ printf("PID namespaces are not supported; skipping sysctl tests\n");
+ }
test_share_dup("SHARE-DUP", "");
test_share_mmap("SHARE-MMAP", "");
diff --git a/tools/testing/selftests/net/forwarding/local_termination.sh b/tools/testing/selftests/net/forwarding/local_termination.sh
index c35548767756..ecd34f364125 100755
--- a/tools/testing/selftests/net/forwarding/local_termination.sh
+++ b/tools/testing/selftests/net/forwarding/local_termination.sh
@@ -7,7 +7,6 @@ ALL_TESTS="standalone vlan_unaware_bridge vlan_aware_bridge test_vlan \
NUM_NETIFS=2
PING_COUNT=1
REQUIRE_MTOOLS=yes
-REQUIRE_MZ=no
source lib.sh
diff --git a/tools/testing/selftests/net/lib/py/ynl.py b/tools/testing/selftests/net/lib/py/ynl.py
index a0d689d58c57..076a7e8dc3eb 100644
--- a/tools/testing/selftests/net/lib/py/ynl.py
+++ b/tools/testing/selftests/net/lib/py/ynl.py
@@ -32,23 +32,23 @@ except ModuleNotFoundError as e:
# Set schema='' to avoid jsonschema validation, it's slow
#
class EthtoolFamily(YnlFamily):
- def __init__(self):
+ def __init__(self, recv_size=0):
super().__init__((SPEC_PATH / Path('ethtool.yaml')).as_posix(),
- schema='')
+ schema='', recv_size=recv_size)
class RtnlFamily(YnlFamily):
- def __init__(self):
+ def __init__(self, recv_size=0):
super().__init__((SPEC_PATH / Path('rt_link.yaml')).as_posix(),
- schema='')
+ schema='', recv_size=recv_size)
class NetdevFamily(YnlFamily):
- def __init__(self):
+ def __init__(self, recv_size=0):
super().__init__((SPEC_PATH / Path('netdev.yaml')).as_posix(),
- schema='')
+ schema='', recv_size=recv_size)
class NetshaperFamily(YnlFamily):
- def __init__(self):
+ def __init__(self, recv_size=0):
super().__init__((SPEC_PATH / Path('net_shaper.yaml')).as_posix(),
- schema='')
+ schema='', recv_size=recv_size)
diff --git a/tools/testing/selftests/net/openvswitch/openvswitch.sh b/tools/testing/selftests/net/openvswitch/openvswitch.sh
index cc0bfae2bafa..960e1ab4dd04 100755
--- a/tools/testing/selftests/net/openvswitch/openvswitch.sh
+++ b/tools/testing/selftests/net/openvswitch/openvswitch.sh
@@ -171,8 +171,10 @@ ovs_add_netns_and_veths () {
ovs_add_if "$1" "$2" "$4" -u || return 1
fi
- [ $TRACING -eq 1 ] && ovs_netns_spawn_daemon "$1" "$ns" \
- tcpdump -i any -s 65535
+ if [ $TRACING -eq 1 ]; then
+ ovs_netns_spawn_daemon "$1" "$3" tcpdump -l -i any -s 6553
+ ovs_wait grep -q "listening on any" ${ovs_dir}/stderr
+ fi
return 0
}
diff --git a/tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.bpf.c b/tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.bpf.c
index 37d9bf6fb745..6f4c3f5a1c5d 100644
--- a/tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.bpf.c
+++ b/tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.bpf.c
@@ -20,7 +20,7 @@ s32 BPF_STRUCT_OPS(ddsp_bogus_dsq_fail_select_cpu, struct task_struct *p,
* If we dispatch to a bogus DSQ that will fall back to the
* builtin global DSQ, we fail gracefully.
*/
- scx_bpf_dispatch_vtime(p, 0xcafef00d, SCX_SLICE_DFL,
+ scx_bpf_dsq_insert_vtime(p, 0xcafef00d, SCX_SLICE_DFL,
p->scx.dsq_vtime, 0);
return cpu;
}
diff --git a/tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.bpf.c b/tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.bpf.c
index dffc97d9cdf1..e4a55027778f 100644
--- a/tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.bpf.c
+++ b/tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.bpf.c
@@ -17,8 +17,8 @@ s32 BPF_STRUCT_OPS(ddsp_vtimelocal_fail_select_cpu, struct task_struct *p,
if (cpu >= 0) {
/* Shouldn't be allowed to vtime dispatch to a builtin DSQ. */
- scx_bpf_dispatch_vtime(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL,
- p->scx.dsq_vtime, 0);
+ scx_bpf_dsq_insert_vtime(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL,
+ p->scx.dsq_vtime, 0);
return cpu;
}
diff --git a/tools/testing/selftests/sched_ext/dsp_local_on.bpf.c b/tools/testing/selftests/sched_ext/dsp_local_on.bpf.c
index 6a7db1502c29..fbda6bf54671 100644
--- a/tools/testing/selftests/sched_ext/dsp_local_on.bpf.c
+++ b/tools/testing/selftests/sched_ext/dsp_local_on.bpf.c
@@ -43,9 +43,12 @@ void BPF_STRUCT_OPS(dsp_local_on_dispatch, s32 cpu, struct task_struct *prev)
if (!p)
return;
- target = bpf_get_prandom_u32() % nr_cpus;
+ if (p->nr_cpus_allowed == nr_cpus)
+ target = bpf_get_prandom_u32() % nr_cpus;
+ else
+ target = scx_bpf_task_cpu(p);
- scx_bpf_dispatch(p, SCX_DSQ_LOCAL_ON | target, SCX_SLICE_DFL, 0);
+ scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL_ON | target, SCX_SLICE_DFL, 0);
bpf_task_release(p);
}
diff --git a/tools/testing/selftests/sched_ext/dsp_local_on.c b/tools/testing/selftests/sched_ext/dsp_local_on.c
index 472851b56854..0ff27e57fe43 100644
--- a/tools/testing/selftests/sched_ext/dsp_local_on.c
+++ b/tools/testing/selftests/sched_ext/dsp_local_on.c
@@ -34,9 +34,10 @@ static enum scx_test_status run(void *ctx)
/* Just sleeping is fine, plenty of scheduling events happening */
sleep(1);
- SCX_EQ(skel->data->uei.kind, EXIT_KIND(SCX_EXIT_ERROR));
bpf_link__destroy(link);
+ SCX_EQ(skel->data->uei.kind, EXIT_KIND(SCX_EXIT_UNREG));
+
return SCX_TEST_PASS;
}
@@ -50,7 +51,7 @@ static void cleanup(void *ctx)
struct scx_test dsp_local_on = {
.name = "dsp_local_on",
.description = "Verify we can directly dispatch tasks to a local DSQs "
- "from osp.dispatch()",
+ "from ops.dispatch()",
.setup = setup,
.run = run,
.cleanup = cleanup,
diff --git a/tools/testing/selftests/sched_ext/enq_select_cpu_fails.bpf.c b/tools/testing/selftests/sched_ext/enq_select_cpu_fails.bpf.c
index 1efb50d61040..a7cf868d5e31 100644
--- a/tools/testing/selftests/sched_ext/enq_select_cpu_fails.bpf.c
+++ b/tools/testing/selftests/sched_ext/enq_select_cpu_fails.bpf.c
@@ -31,7 +31,7 @@ void BPF_STRUCT_OPS(enq_select_cpu_fails_enqueue, struct task_struct *p,
/* Can only call from ops.select_cpu() */
scx_bpf_select_cpu_dfl(p, 0, 0, &found);
- scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
+ scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
}
SEC(".struct_ops.link")
diff --git a/tools/testing/selftests/sched_ext/exit.bpf.c b/tools/testing/selftests/sched_ext/exit.bpf.c
index d75d4faf07f6..4bc36182d3ff 100644
--- a/tools/testing/selftests/sched_ext/exit.bpf.c
+++ b/tools/testing/selftests/sched_ext/exit.bpf.c
@@ -33,7 +33,7 @@ void BPF_STRUCT_OPS(exit_enqueue, struct task_struct *p, u64 enq_flags)
if (exit_point == EXIT_ENQUEUE)
EXIT_CLEANLY();
- scx_bpf_dispatch(p, DSQ_ID, SCX_SLICE_DFL, enq_flags);
+ scx_bpf_dsq_insert(p, DSQ_ID, SCX_SLICE_DFL, enq_flags);
}
void BPF_STRUCT_OPS(exit_dispatch, s32 cpu, struct task_struct *p)
@@ -41,7 +41,7 @@ void BPF_STRUCT_OPS(exit_dispatch, s32 cpu, struct task_struct *p)
if (exit_point == EXIT_DISPATCH)
EXIT_CLEANLY();
- scx_bpf_consume(DSQ_ID);
+ scx_bpf_dsq_move_to_local(DSQ_ID);
}
void BPF_STRUCT_OPS(exit_enable, struct task_struct *p)
diff --git a/tools/testing/selftests/sched_ext/maximal.bpf.c b/tools/testing/selftests/sched_ext/maximal.bpf.c
index 4d4cd8d966db..430f5e13bf55 100644
--- a/tools/testing/selftests/sched_ext/maximal.bpf.c
+++ b/tools/testing/selftests/sched_ext/maximal.bpf.c
@@ -12,6 +12,8 @@
char _license[] SEC("license") = "GPL";
+#define DSQ_ID 0
+
s32 BPF_STRUCT_OPS(maximal_select_cpu, struct task_struct *p, s32 prev_cpu,
u64 wake_flags)
{
@@ -20,7 +22,7 @@ s32 BPF_STRUCT_OPS(maximal_select_cpu, struct task_struct *p, s32 prev_cpu,
void BPF_STRUCT_OPS(maximal_enqueue, struct task_struct *p, u64 enq_flags)
{
- scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
+ scx_bpf_dsq_insert(p, DSQ_ID, SCX_SLICE_DFL, enq_flags);
}
void BPF_STRUCT_OPS(maximal_dequeue, struct task_struct *p, u64 deq_flags)
@@ -28,7 +30,7 @@ void BPF_STRUCT_OPS(maximal_dequeue, struct task_struct *p, u64 deq_flags)
void BPF_STRUCT_OPS(maximal_dispatch, s32 cpu, struct task_struct *prev)
{
- scx_bpf_consume(SCX_DSQ_GLOBAL);
+ scx_bpf_dsq_move_to_local(DSQ_ID);
}
void BPF_STRUCT_OPS(maximal_runnable, struct task_struct *p, u64 enq_flags)
@@ -123,7 +125,7 @@ void BPF_STRUCT_OPS(maximal_cgroup_set_weight, struct cgroup *cgrp, u32 weight)
s32 BPF_STRUCT_OPS_SLEEPABLE(maximal_init)
{
- return 0;
+ return scx_bpf_create_dsq(DSQ_ID, -1);
}
void BPF_STRUCT_OPS(maximal_exit, struct scx_exit_info *info)
diff --git a/tools/testing/selftests/sched_ext/select_cpu_dfl.bpf.c b/tools/testing/selftests/sched_ext/select_cpu_dfl.bpf.c
index f171ac470970..13d0f5be788d 100644
--- a/tools/testing/selftests/sched_ext/select_cpu_dfl.bpf.c
+++ b/tools/testing/selftests/sched_ext/select_cpu_dfl.bpf.c
@@ -30,7 +30,7 @@ void BPF_STRUCT_OPS(select_cpu_dfl_enqueue, struct task_struct *p,
}
scx_bpf_put_idle_cpumask(idle_mask);
- scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
+ scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
}
SEC(".struct_ops.link")
diff --git a/tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.bpf.c b/tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.bpf.c
index 9efdbb7da928..815f1d5d61ac 100644
--- a/tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.bpf.c
+++ b/tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.bpf.c
@@ -67,7 +67,7 @@ void BPF_STRUCT_OPS(select_cpu_dfl_nodispatch_enqueue, struct task_struct *p,
saw_local = true;
}
- scx_bpf_dispatch(p, dsq_id, SCX_SLICE_DFL, enq_flags);
+ scx_bpf_dsq_insert(p, dsq_id, SCX_SLICE_DFL, enq_flags);
}
s32 BPF_STRUCT_OPS(select_cpu_dfl_nodispatch_init_task,
diff --git a/tools/testing/selftests/sched_ext/select_cpu_dispatch.bpf.c b/tools/testing/selftests/sched_ext/select_cpu_dispatch.bpf.c
index 59bfc4f36167..4bb99699e920 100644
--- a/tools/testing/selftests/sched_ext/select_cpu_dispatch.bpf.c
+++ b/tools/testing/selftests/sched_ext/select_cpu_dispatch.bpf.c
@@ -29,7 +29,7 @@ s32 BPF_STRUCT_OPS(select_cpu_dispatch_select_cpu, struct task_struct *p,
cpu = prev_cpu;
dispatch:
- scx_bpf_dispatch(p, dsq_id, SCX_SLICE_DFL, 0);
+ scx_bpf_dsq_insert(p, dsq_id, SCX_SLICE_DFL, 0);
return cpu;
}
diff --git a/tools/testing/selftests/sched_ext/select_cpu_dispatch_bad_dsq.bpf.c b/tools/testing/selftests/sched_ext/select_cpu_dispatch_bad_dsq.bpf.c
index 3bbd5fcdfb18..2a75de11b2cf 100644
--- a/tools/testing/selftests/sched_ext/select_cpu_dispatch_bad_dsq.bpf.c
+++ b/tools/testing/selftests/sched_ext/select_cpu_dispatch_bad_dsq.bpf.c
@@ -18,7 +18,7 @@ s32 BPF_STRUCT_OPS(select_cpu_dispatch_bad_dsq_select_cpu, struct task_struct *p
s32 prev_cpu, u64 wake_flags)
{
/* Dispatching to a random DSQ should fail. */
- scx_bpf_dispatch(p, 0xcafef00d, SCX_SLICE_DFL, 0);
+ scx_bpf_dsq_insert(p, 0xcafef00d, SCX_SLICE_DFL, 0);
return prev_cpu;
}
diff --git a/tools/testing/selftests/sched_ext/select_cpu_dispatch_dbl_dsp.bpf.c b/tools/testing/selftests/sched_ext/select_cpu_dispatch_dbl_dsp.bpf.c
index 0fda57fe0ecf..99d075695c97 100644
--- a/tools/testing/selftests/sched_ext/select_cpu_dispatch_dbl_dsp.bpf.c
+++ b/tools/testing/selftests/sched_ext/select_cpu_dispatch_dbl_dsp.bpf.c
@@ -18,8 +18,8 @@ s32 BPF_STRUCT_OPS(select_cpu_dispatch_dbl_dsp_select_cpu, struct task_struct *p
s32 prev_cpu, u64 wake_flags)
{
/* Dispatching twice in a row is disallowed. */
- scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0);
- scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0);
+ scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0);
+ scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0);
return prev_cpu;
}
diff --git a/tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c b/tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c
index e6c67bcf5e6e..bfcb96cd4954 100644
--- a/tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c
+++ b/tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c
@@ -2,8 +2,8 @@
/*
* A scheduler that validates that enqueue flags are properly stored and
* applied at dispatch time when a task is directly dispatched from
- * ops.select_cpu(). We validate this by using scx_bpf_dispatch_vtime(), and
- * making the test a very basic vtime scheduler.
+ * ops.select_cpu(). We validate this by using scx_bpf_dsq_insert_vtime(),
+ * and making the test a very basic vtime scheduler.
*
* Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
* Copyright (c) 2024 David Vernet <dvernet@meta.com>
@@ -47,13 +47,13 @@ s32 BPF_STRUCT_OPS(select_cpu_vtime_select_cpu, struct task_struct *p,
cpu = prev_cpu;
scx_bpf_test_and_clear_cpu_idle(cpu);
ddsp:
- scx_bpf_dispatch_vtime(p, VTIME_DSQ, SCX_SLICE_DFL, task_vtime(p), 0);
+ scx_bpf_dsq_insert_vtime(p, VTIME_DSQ, SCX_SLICE_DFL, task_vtime(p), 0);
return cpu;
}
void BPF_STRUCT_OPS(select_cpu_vtime_dispatch, s32 cpu, struct task_struct *p)
{
- if (scx_bpf_consume(VTIME_DSQ))
+ if (scx_bpf_dsq_move_to_local(VTIME_DSQ))
consumed = true;
}
diff --git a/tools/tracing/rtla/src/timerlat_hist.c b/tools/tracing/rtla/src/timerlat_hist.c
index 8b66387e5f35..4403cc4eba30 100644
--- a/tools/tracing/rtla/src/timerlat_hist.c
+++ b/tools/tracing/rtla/src/timerlat_hist.c
@@ -282,6 +282,21 @@ static void timerlat_hist_header(struct osnoise_tool *tool)
}
/*
+ * format_summary_value - format a line of summary value (min, max or avg)
+ * of hist data
+ */
+static void format_summary_value(struct trace_seq *seq,
+ int count,
+ unsigned long long val,
+ bool avg)
+{
+ if (count)
+ trace_seq_printf(seq, "%9llu ", avg ? val / count : val);
+ else
+ trace_seq_printf(seq, "%9c ", '-');
+}
+
+/*
* timerlat_print_summary - print the summary of the hist data to the output
*/
static void
@@ -328,29 +343,23 @@ timerlat_print_summary(struct timerlat_hist_params *params,
if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
continue;
- if (!params->no_irq) {
- if (data->hist[cpu].irq_count)
- trace_seq_printf(trace->seq, "%9llu ",
- data->hist[cpu].min_irq);
- else
- trace_seq_printf(trace->seq, " - ");
- }
+ if (!params->no_irq)
+ format_summary_value(trace->seq,
+ data->hist[cpu].irq_count,
+ data->hist[cpu].min_irq,
+ false);
- if (!params->no_thread) {
- if (data->hist[cpu].thread_count)
- trace_seq_printf(trace->seq, "%9llu ",
- data->hist[cpu].min_thread);
- else
- trace_seq_printf(trace->seq, " - ");
- }
+ if (!params->no_thread)
+ format_summary_value(trace->seq,
+ data->hist[cpu].thread_count,
+ data->hist[cpu].min_thread,
+ false);
- if (params->user_hist) {
- if (data->hist[cpu].user_count)
- trace_seq_printf(trace->seq, "%9llu ",
- data->hist[cpu].min_user);
- else
- trace_seq_printf(trace->seq, " - ");
- }
+ if (params->user_hist)
+ format_summary_value(trace->seq,
+ data->hist[cpu].user_count,
+ data->hist[cpu].min_user,
+ false);
}
trace_seq_printf(trace->seq, "\n");
@@ -364,29 +373,23 @@ timerlat_print_summary(struct timerlat_hist_params *params,
if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
continue;
- if (!params->no_irq) {
- if (data->hist[cpu].irq_count)
- trace_seq_printf(trace->seq, "%9llu ",
- data->hist[cpu].sum_irq / data->hist[cpu].irq_count);
- else
- trace_seq_printf(trace->seq, " - ");
- }
+ if (!params->no_irq)
+ format_summary_value(trace->seq,
+ data->hist[cpu].irq_count,
+ data->hist[cpu].sum_irq,
+ true);
- if (!params->no_thread) {
- if (data->hist[cpu].thread_count)
- trace_seq_printf(trace->seq, "%9llu ",
- data->hist[cpu].sum_thread / data->hist[cpu].thread_count);
- else
- trace_seq_printf(trace->seq, " - ");
- }
+ if (!params->no_thread)
+ format_summary_value(trace->seq,
+ data->hist[cpu].thread_count,
+ data->hist[cpu].sum_thread,
+ true);
- if (params->user_hist) {
- if (data->hist[cpu].user_count)
- trace_seq_printf(trace->seq, "%9llu ",
- data->hist[cpu].sum_user / data->hist[cpu].user_count);
- else
- trace_seq_printf(trace->seq, " - ");
- }
+ if (params->user_hist)
+ format_summary_value(trace->seq,
+ data->hist[cpu].user_count,
+ data->hist[cpu].sum_user,
+ true);
}
trace_seq_printf(trace->seq, "\n");
@@ -400,29 +403,23 @@ timerlat_print_summary(struct timerlat_hist_params *params,
if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
continue;
- if (!params->no_irq) {
- if (data->hist[cpu].irq_count)
- trace_seq_printf(trace->seq, "%9llu ",
- data->hist[cpu].max_irq);
- else
- trace_seq_printf(trace->seq, " - ");
- }
+ if (!params->no_irq)
+ format_summary_value(trace->seq,
+ data->hist[cpu].irq_count,
+ data->hist[cpu].max_irq,
+ false);
- if (!params->no_thread) {
- if (data->hist[cpu].thread_count)
- trace_seq_printf(trace->seq, "%9llu ",
- data->hist[cpu].max_thread);
- else
- trace_seq_printf(trace->seq, " - ");
- }
+ if (!params->no_thread)
+ format_summary_value(trace->seq,
+ data->hist[cpu].thread_count,
+ data->hist[cpu].max_thread,
+ false);
- if (params->user_hist) {
- if (data->hist[cpu].user_count)
- trace_seq_printf(trace->seq, "%9llu ",
- data->hist[cpu].max_user);
- else
- trace_seq_printf(trace->seq, " - ");
- }
+ if (params->user_hist)
+ format_summary_value(trace->seq,
+ data->hist[cpu].user_count,
+ data->hist[cpu].max_user,
+ false);
}
trace_seq_printf(trace->seq, "\n");
trace_seq_do_printf(trace->seq);
@@ -506,16 +503,22 @@ timerlat_print_stats_all(struct timerlat_hist_params *params,
trace_seq_printf(trace->seq, "min: ");
if (!params->no_irq)
- trace_seq_printf(trace->seq, "%9llu ",
- sum.min_irq);
+ format_summary_value(trace->seq,
+ sum.irq_count,
+ sum.min_irq,
+ false);
if (!params->no_thread)
- trace_seq_printf(trace->seq, "%9llu ",
- sum.min_thread);
+ format_summary_value(trace->seq,
+ sum.thread_count,
+ sum.min_thread,
+ false);
if (params->user_hist)
- trace_seq_printf(trace->seq, "%9llu ",
- sum.min_user);
+ format_summary_value(trace->seq,
+ sum.user_count,
+ sum.min_user,
+ false);
trace_seq_printf(trace->seq, "\n");
@@ -523,16 +526,22 @@ timerlat_print_stats_all(struct timerlat_hist_params *params,
trace_seq_printf(trace->seq, "avg: ");
if (!params->no_irq)
- trace_seq_printf(trace->seq, "%9llu ",
- sum.sum_irq / sum.irq_count);
+ format_summary_value(trace->seq,
+ sum.irq_count,
+ sum.sum_irq,
+ true);
if (!params->no_thread)
- trace_seq_printf(trace->seq, "%9llu ",
- sum.sum_thread / sum.thread_count);
+ format_summary_value(trace->seq,
+ sum.thread_count,
+ sum.sum_thread,
+ true);
if (params->user_hist)
- trace_seq_printf(trace->seq, "%9llu ",
- sum.sum_user / sum.user_count);
+ format_summary_value(trace->seq,
+ sum.user_count,
+ sum.sum_user,
+ true);
trace_seq_printf(trace->seq, "\n");
@@ -540,16 +549,22 @@ timerlat_print_stats_all(struct timerlat_hist_params *params,
trace_seq_printf(trace->seq, "max: ");
if (!params->no_irq)
- trace_seq_printf(trace->seq, "%9llu ",
- sum.max_irq);
+ format_summary_value(trace->seq,
+ sum.irq_count,
+ sum.max_irq,
+ false);
if (!params->no_thread)
- trace_seq_printf(trace->seq, "%9llu ",
- sum.max_thread);
+ format_summary_value(trace->seq,
+ sum.thread_count,
+ sum.max_thread,
+ false);
if (params->user_hist)
- trace_seq_printf(trace->seq, "%9llu ",
- sum.max_user);
+ format_summary_value(trace->seq,
+ sum.user_count,
+ sum.max_user,
+ false);
trace_seq_printf(trace->seq, "\n");
trace_seq_do_printf(trace->seq);
diff --git a/usr/include/Makefile b/usr/include/Makefile
index 771e32872b2a..6c6de1b1622b 100644
--- a/usr/include/Makefile
+++ b/usr/include/Makefile
@@ -78,7 +78,7 @@ quiet_cmd_hdrtest = HDRTEST $<
cmd_hdrtest = \
$(CC) $(c_flags) -fsyntax-only -x c /dev/null \
$(if $(filter-out $(no-header-test), $*.h), -include $< -include $<); \
- $(PERL) $(src)/headers_check.pl $(obj) $(SRCARCH) $<; \
+ $(PERL) $(src)/headers_check.pl $(obj) $<; \
touch $@
$(obj)/%.hdrtest: $(obj)/%.h FORCE
diff --git a/usr/include/headers_check.pl b/usr/include/headers_check.pl
index b6aec5e4365f..2b70bfa5558e 100755
--- a/usr/include/headers_check.pl
+++ b/usr/include/headers_check.pl
@@ -3,9 +3,8 @@
#
# headers_check.pl execute a number of trivial consistency checks
#
-# Usage: headers_check.pl dir arch [files...]
+# Usage: headers_check.pl dir [files...]
# dir: dir to look for included files
-# arch: architecture
# files: list of files to check
#
# The script reads the supplied files line by line and:
@@ -23,7 +22,7 @@ use warnings;
use strict;
use File::Basename;
-my ($dir, $arch, @files) = @ARGV;
+my ($dir, @files) = @ARGV;
my $ret = 0;
my $line;
@@ -55,10 +54,6 @@ sub check_include
my $found;
$found = stat($dir . "/" . $inc);
if (!$found) {
- $inc =~ s#asm/#asm-$arch/#;
- $found = stat($dir . "/" . $inc);
- }
- if (!$found) {
printf STDERR "$filename:$lineno: included file '$inc' is not exported\n";
$ret = 1;
}