summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accel/amdxdna/aie2_psp.c4
-rw-r--r--drivers/acpi/acpi_pad.c2
-rw-r--r--drivers/acpi/acpica/dsmethod.c7
-rw-r--r--drivers/acpi/apei/einj-core.c9
-rw-r--r--drivers/acpi/cppc_acpi.c2
-rw-r--r--drivers/acpi/ec.c17
-rw-r--r--drivers/acpi/internal.h6
-rw-r--r--drivers/acpi/processor_driver.c3
-rw-r--r--drivers/acpi/processor_idle.c8
-rw-r--r--drivers/acpi/resource.c7
-rw-r--r--drivers/ata/ahci.c39
-rw-r--r--drivers/ata/libata-acpi.c24
-rw-r--r--drivers/ata/pata_cs5536.c2
-rw-r--r--drivers/ata/pata_macio.c2
-rw-r--r--drivers/ata/pata_via.c9
-rw-r--r--drivers/atm/atmtcp.c4
-rw-r--r--drivers/atm/idt77252.c5
-rw-r--r--drivers/base/faux.c3
-rw-r--r--drivers/block/aoe/aoe.h1
-rw-r--r--drivers/block/aoe/aoecmd.c8
-rw-r--r--drivers/block/aoe/aoedev.c13
-rw-r--r--drivers/block/loop.c11
-rw-r--r--drivers/block/ublk_drv.c50
-rw-r--r--drivers/bluetooth/btintel_pcie.c64
-rw-r--r--drivers/bluetooth/btintel_pcie.h10
-rw-r--r--drivers/bluetooth/hci_qca.c13
-rw-r--r--drivers/cpufreq/rcpufreq_dt.rs4
-rw-r--r--drivers/cxl/core/edac.c18
-rw-r--r--drivers/cxl/core/features.c2
-rw-r--r--drivers/cxl/core/ras.c47
-rw-r--r--drivers/dma-buf/dma-buf.c2
-rw-r--r--drivers/dma-buf/udmabuf.c5
-rw-r--r--drivers/edac/amd64_edac.c58
-rw-r--r--drivers/edac/igen6_edac.c24
-rw-r--r--drivers/gpio/gpio-loongson-64bit.c2
-rw-r--r--drivers/gpio/gpio-mlxbf3.c54
-rw-r--r--drivers/gpio/gpio-pca953x.c2
-rw-r--r--drivers/gpio/gpio-spacemit-k1.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v11_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c6
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c55
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c12
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c2
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c1
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c69
-rw-r--r--drivers/gpu/drm/display/drm_bridge_connector.c7
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c2
-rw-r--r--drivers/gpu/drm/drm_writeback.c7
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c4
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c4
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c6
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_hdmi.c2
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.c55
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ddc.c1
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpummu.c5
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c18
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c39
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c54
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c14
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c7
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c7
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c32
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c10
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h23
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c17
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c20
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h9
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c12
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h2
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml3
-rw-r--r--drivers/gpu/drm/msm/registers/gen_header.py8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c2
-rw-r--r--drivers/gpu/drm/sitronix/Kconfig1
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.c2
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c8
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c12
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.c2
-rw-r--r--drivers/gpu/drm/xe/display/xe_dsb_buffer.c11
-rw-r--r--drivers/gpu/drm/xe/display/xe_fb_pin.c5
-rw-r--r--drivers/gpu/drm/xe/regs/xe_mchbar_regs.h1
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.c11
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c8
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c17
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.h5
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.c2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c3
-rw-r--r--drivers/gpu/drm/xe/xe_hwmon.c34
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.c24
-rw-r--r--drivers/gpu/drm/xe/xe_svm.c2
-rw-r--r--drivers/hid/hid-appletb-kbd.c5
-rw-r--r--drivers/hid/hid-ids.h6
-rw-r--r--drivers/hid/hid-input.c2
-rw-r--r--drivers/hid/hid-lenovo.c19
-rw-r--r--drivers/hid/hid-multitouch.c8
-rw-r--r--drivers/hid/hid-nintendo.c38
-rw-r--r--drivers/hid/hid-quirks.c3
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h1
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c12
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-protocol.c26
-rw-r--r--drivers/hid/wacom_sys.c7
-rw-r--r--drivers/hwmon/ftsteutates.c9
-rw-r--r--drivers/hwmon/ltc4282.c7
-rw-r--r--drivers/hwmon/occ/common.c240
-rw-r--r--drivers/i2c/algos/i2c-algo-bit.c4
-rw-r--r--drivers/i2c/algos/i2c-algo-pca.c4
-rw-r--r--drivers/i2c/algos/i2c-algo-pcf.c4
-rw-r--r--drivers/i2c/busses/Kconfig2
-rw-r--r--drivers/i2c/busses/i2c-amd-mp2-plat.c2
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c8
-rw-r--r--drivers/i2c/busses/i2c-at91-master.c4
-rw-r--r--drivers/i2c/busses/i2c-axxia.c2
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c2
-rw-r--r--drivers/i2c/busses/i2c-cadence.c10
-rw-r--r--drivers/i2c/busses/i2c-cgbc.c4
-rw-r--r--drivers/i2c/busses/i2c-designware-amdisp.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c5
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c2
-rw-r--r--drivers/i2c/busses/i2c-emev2.c6
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c6
-rw-r--r--drivers/i2c/busses/i2c-gxp.c6
-rw-r--r--drivers/i2c/busses/i2c-img-scb.c2
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c8
-rw-r--r--drivers/i2c/busses/i2c-imx.c11
-rw-r--r--drivers/i2c/busses/i2c-k1.c2
-rw-r--r--drivers/i2c/busses/i2c-keba.c2
-rw-r--r--drivers/i2c/busses/i2c-mchp-pci1xxxx.c2
-rw-r--r--drivers/i2c/busses/i2c-meson.c4
-rw-r--r--drivers/i2c/busses/i2c-microchip-corei2c.c2
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c2
-rw-r--r--drivers/i2c/busses/i2c-mxs.c2
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c4
-rw-r--r--drivers/i2c/busses/i2c-npcm7xx.c6
-rw-r--r--drivers/i2c/busses/i2c-omap.c13
-rw-r--r--drivers/i2c/busses/i2c-pnx.c2
-rw-r--r--drivers/i2c/busses/i2c-pxa.c16
-rw-r--r--drivers/i2c/busses/i2c-qcom-cci.c4
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c4
-rw-r--r--drivers/i2c/busses/i2c-qup.c8
-rw-r--r--drivers/i2c/busses/i2c-rcar.c10
-rw-r--r--drivers/i2c/busses/i2c-robotfuzz-osif.c6
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c6
-rw-r--r--drivers/i2c/busses/i2c-sh7760.c4
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c4
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c4
-rw-r--r--drivers/i2c/busses/i2c-synquacer.c4
-rw-r--r--drivers/i2c/busses/i2c-tegra.c6
-rw-r--r--drivers/i2c/busses/i2c-tiny-usb.c6
-rw-r--r--drivers/i2c/busses/i2c-xiic.c4
-rw-r--r--drivers/i2c/busses/i2c-xlp9xx.c2
-rw-r--r--drivers/i2c/i2c-atr.c2
-rw-r--r--drivers/i2c/i2c-mux.c6
-rw-r--r--drivers/i2c/muxes/i2c-demux-pinctrl.c4
-rw-r--r--drivers/idle/intel_idle.c12
-rw-r--r--drivers/infiniband/core/cache.c4
-rw-r--r--drivers/infiniband/core/umem_odp.c11
-rw-r--r--drivers/infiniband/hw/mlx5/counters.c4
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c10
-rw-r--r--drivers/infiniband/hw/mlx5/main.c33
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c61
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c8
-rw-r--r--drivers/iommu/tegra-smmu.c4
-rw-r--r--drivers/irqchip/irq-ath79-misc.c20
-rw-r--r--drivers/md/bcache/Kconfig1
-rw-r--r--drivers/md/bcache/alloc.c57
-rw-r--r--drivers/md/bcache/bcache.h2
-rw-r--r--drivers/md/bcache/bset.c116
-rw-r--r--drivers/md/bcache/bset.h40
-rw-r--r--drivers/md/bcache/btree.c69
-rw-r--r--drivers/md/bcache/extents.c45
-rw-r--r--drivers/md/bcache/movinggc.c33
-rw-r--r--drivers/md/bcache/super.c3
-rw-r--r--drivers/md/bcache/sysfs.c4
-rw-r--r--drivers/md/bcache/util.h67
-rw-r--r--drivers/md/bcache/writeback.c13
-rw-r--r--drivers/md/dm-crypt.c11
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/mfd/88pm860x-core.c3
-rw-r--r--drivers/mfd/max8925-core.c6
-rw-r--r--drivers/mfd/twl4030-irq.c3
-rw-r--r--drivers/mtd/mtdchar.c2
-rw-r--r--drivers/mtd/mtdcore.c152
-rw-r--r--drivers/mtd/mtdcore.h2
-rw-r--r--drivers/mtd/mtdpart.c16
-rw-r--r--drivers/mtd/nand/spi/core.c1
-rw-r--r--drivers/mtd/nand/spi/winbond.c10
-rw-r--r--drivers/net/can/m_can/tcan4x5x-core.c9
-rw-r--r--drivers/net/dsa/b53/b53_common.c6
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.c27
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe.c4
-rw-r--r--drivers/net/ethernet/airoha/airoha_regs.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c92
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c24
-rw-r--r--drivers/net/ethernet/faraday/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig6
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c8
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c14
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c11
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c11
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.c48
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c6
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.c5
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.h4
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c3
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c3
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c8
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_common.c19
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c2
-rw-r--r--drivers/net/macsec.c40
-rw-r--r--drivers/net/netconsole.c3
-rw-r--r--drivers/net/netdevsim/netdev.c3
-rw-r--r--drivers/net/phy/mdio_bus.c12
-rw-r--r--drivers/net/phy/phy_caps.c18
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/usb/r8152.c1
-rw-r--r--drivers/net/veth.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c33
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c29
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.c148
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.h10
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c127
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c49
-rw-r--r--drivers/net/wireless/ath/ath12k/core.c14
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h10
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs.c58
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs.h7
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.h3
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.c6
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c394
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.h2
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.c6
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.h4
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c829
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.h180
-rw-r--r--drivers/net/wireless/ath/ath6kl/bmi.c4
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c19
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c26
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-rs.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mld.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c24
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.c6
-rw-r--r--drivers/nvme/host/core.c87
-rw-r--r--drivers/nvme/host/ioctl.c21
-rw-r--r--drivers/nvme/host/multipath.c2
-rw-r--r--drivers/nvme/host/nvme.h3
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c2
-rw-r--r--drivers/pci/pci-acpi.c23
-rw-r--r--drivers/pci/pci.c5
-rw-r--r--drivers/pci/pcie/ptm.c2
-rw-r--r--drivers/pinctrl/pinctrl-st.c5
-rw-r--r--drivers/pinctrl/pinctrl-tb10x.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-apq8064.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-apq8084.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq4019.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq5018.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq5332.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq5424.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq6018.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq8064.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq8074.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq9574.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-mdm9607.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-mdm9615.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c11
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.h1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8226.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8660.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8909.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8916.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8917.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8953.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8960.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8976.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8994.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8996.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8998.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8x74.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcm2290.c10
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs404.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs615.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs8300.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qdf2xxx.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qdu1000.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sa8775p.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sar2130p.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7180.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7280.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc8180x.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc8280xp.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm660.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm670.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm845.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdx55.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdx65.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdx75.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm4450.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6115.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6125.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6350.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6375.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm7150.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8150.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8250.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8350.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8450.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8550.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8650.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8750.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-x1e80100.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c8
-rw-r--r--drivers/platform/x86/amd/amd_isp4.c3
-rw-r--r--drivers/platform/x86/amd/hsmp/hsmp.c14
-rw-r--r--drivers/platform/x86/amd/pmc/pmc-quirks.c9
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.c2
-rw-r--r--drivers/platform/x86/amd/pmf/core.c3
-rw-r--r--drivers/platform/x86/amd/pmf/tee-if.c108
-rw-r--r--drivers/platform/x86/dell/alienware-wmi-wmax.c2
-rw-r--r--drivers/platform/x86/dell/dell_rbu.c10
-rw-r--r--drivers/platform/x86/ideapad-laptop.c19
-rw-r--r--drivers/platform/x86/intel/pmc/core.h7
-rw-r--r--drivers/platform/x86/intel/pmc/ssram_telemetry.c3
-rw-r--r--drivers/platform/x86/intel/tpmi_power_domains.c4
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c2
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c9
-rw-r--r--drivers/platform/x86/samsung-galaxybook.c1
-rw-r--r--drivers/ptp/ptp_clock.c3
-rw-r--r--drivers/ptp/ptp_private.h12
-rw-r--r--drivers/rapidio/rio_cm.c3
-rw-r--r--drivers/regulator/fan53555.c14
-rw-r--r--drivers/regulator/max20086-regulator.c6
-rw-r--r--drivers/rtc/rtc-cmos.c10
-rw-r--r--drivers/rtc/rtc-pcf2127.c7
-rw-r--r--drivers/rtc/rtc-s5m.c197
-rw-r--r--drivers/s390/crypto/pkey_api.c2
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c2
-rw-r--r--drivers/scsi/elx/efct/efct_hw.c5
-rw-r--r--drivers/scsi/fnic/fdls_disc.c187
-rw-r--r--drivers/scsi/fnic/fnic.h2
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c2
-rw-r--r--drivers/scsi/fnic/fnic_fdls.h1
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c6
-rw-r--r--drivers/scsi/mvsas/mv_defs.h4
-rw-r--r--drivers/scsi/scsi_error.c3
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c11
-rw-r--r--drivers/scsi/storvsc_drv.c10
-rw-r--r--drivers/spi/spi-cadence-quadspi.c12
-rw-r--r--drivers/spi/spi-loongson-core.c1
-rw-r--r--drivers/spi/spi-offload.c2
-rw-r--r--drivers/spi/spi-omap2-mcspi.c30
-rw-r--r--drivers/spi/spi-pci1xxxx.c4
-rw-r--r--drivers/spi/spi-stm32-ospi.c24
-rw-r--r--drivers/spi/spi-tegra210-quad.c14
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_security.c44
-rw-r--r--drivers/target/target_core_pr.c4
-rw-r--r--drivers/tty/serial/imx.c17
-rw-r--r--drivers/tty/serial/serial_base_bus.c1
-rw-r--r--drivers/tty/vt/ucs.c2
-rw-r--r--drivers/tty/vt/vt.c1
-rw-r--r--drivers/ufs/core/ufshcd.c10
427 files changed, 4639 insertions, 2141 deletions
diff --git a/drivers/accel/amdxdna/aie2_psp.c b/drivers/accel/amdxdna/aie2_psp.c
index dc3a072ce3b6..f28a060a8810 100644
--- a/drivers/accel/amdxdna/aie2_psp.c
+++ b/drivers/accel/amdxdna/aie2_psp.c
@@ -126,8 +126,8 @@ struct psp_device *aie2m_psp_create(struct drm_device *ddev, struct psp_config *
psp->ddev = ddev;
memcpy(psp->psp_regs, conf->psp_regs, sizeof(psp->psp_regs));
- psp->fw_buf_sz = ALIGN(conf->fw_size, PSP_FW_ALIGN) + PSP_FW_ALIGN;
- psp->fw_buffer = drmm_kmalloc(ddev, psp->fw_buf_sz, GFP_KERNEL);
+ psp->fw_buf_sz = ALIGN(conf->fw_size, PSP_FW_ALIGN);
+ psp->fw_buffer = drmm_kmalloc(ddev, psp->fw_buf_sz + PSP_FW_ALIGN, GFP_KERNEL);
if (!psp->fw_buffer) {
drm_err(ddev, "no memory for fw buffer");
return NULL;
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 6f8bbe1247a5..c9a0bcaba2e4 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -33,7 +33,7 @@
static DEFINE_MUTEX(isolated_cpus_lock);
static DEFINE_MUTEX(round_robin_lock);
-static unsigned long power_saving_mwait_eax;
+static unsigned int power_saving_mwait_eax;
static unsigned char tsc_detected_unstable;
static unsigned char tsc_marked_unstable;
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index c8f37f4e6626..fef6fb29ece4 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -483,6 +483,13 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
return_ACPI_STATUS(AE_NULL_OBJECT);
}
+ if (this_walk_state->num_operands < obj_desc->method.param_count) {
+ ACPI_ERROR((AE_INFO, "Missing argument for method [%4.4s]",
+ acpi_ut_get_node_name(method_node)));
+
+ return_ACPI_STATUS(AE_AML_UNINITIALIZED_ARG);
+ }
+
/* Init for new method, possibly wait on method mutex */
status =
diff --git a/drivers/acpi/apei/einj-core.c b/drivers/acpi/apei/einj-core.c
index fea11a35eea3..9b041415a9d0 100644
--- a/drivers/acpi/apei/einj-core.c
+++ b/drivers/acpi/apei/einj-core.c
@@ -883,19 +883,16 @@ static int __init einj_init(void)
}
einj_dev = faux_device_create("acpi-einj", NULL, &einj_device_ops);
- if (!einj_dev)
- return -ENODEV;
- einj_initialized = true;
+ if (einj_dev)
+ einj_initialized = true;
return 0;
}
static void __exit einj_exit(void)
{
- if (einj_initialized)
- faux_device_destroy(einj_dev);
-
+ faux_device_destroy(einj_dev);
}
module_init(einj_init);
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index a9ae2fd62863..6b649031808f 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -476,7 +476,7 @@ bool cppc_allow_fast_switch(void)
struct cpc_desc *cpc_ptr;
int cpu;
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
desired_reg = &cpc_ptr->cpc_regs[DESIRED_PERF];
if (!CPC_IN_SYSTEM_MEMORY(desired_reg) &&
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 6f4203716b53..75c7db8b156a 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -23,8 +23,10 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/list.h>
+#include <linux/printk.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <linux/suspend.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
@@ -2031,6 +2033,21 @@ void __init acpi_ec_ecdt_probe(void)
goto out;
}
+ if (!strstarts(ecdt_ptr->id, "\\")) {
+ /*
+ * The ECDT table on some MSI notebooks contains invalid data, together
+ * with an empty ID string ("").
+ *
+ * Section 5.2.15 of the ACPI specification requires the ID string to be
+ * a "fully qualified reference to the (...) embedded controller device",
+ * so this string always has to start with a backslash.
+ *
+ * By verifying this we can avoid such faulty ECDT tables in a safe way.
+ */
+ pr_err(FW_BUG "Ignoring ECDT due to invalid ID string \"%s\"\n", ecdt_ptr->id);
+ goto out;
+ }
+
ec = acpi_ec_alloc();
if (!ec)
goto out;
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 00910ccd7eda..e2781864fdce 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -175,6 +175,12 @@ bool processor_physically_present(acpi_handle handle);
static inline void acpi_early_processor_control_setup(void) {}
#endif
+#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
+void acpi_idle_rescan_dead_smt_siblings(void);
+#else
+static inline void acpi_idle_rescan_dead_smt_siblings(void) {}
+#endif
+
/* --------------------------------------------------------------------------
Embedded Controller
-------------------------------------------------------------------------- */
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 3b281bc1e73c..65e779be64ff 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -279,6 +279,9 @@ static int __init acpi_processor_driver_init(void)
* after acpi_cppc_processor_probe() has been called for all online CPUs
*/
acpi_processor_init_invariance_cppc();
+
+ acpi_idle_rescan_dead_smt_siblings();
+
return 0;
err:
driver_unregister(&acpi_processor_driver);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index e2febca2ec13..2c2dc559e0f8 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -24,6 +24,8 @@
#include <acpi/processor.h>
#include <linux/context_tracking.h>
+#include "internal.h"
+
/*
* Include the apic definitions for x86 to have the APIC timer related defines
* available also for UP (on SMP it gets magically included via linux/smp.h).
@@ -55,6 +57,12 @@ struct cpuidle_driver acpi_idle_driver = {
};
#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
+void acpi_idle_rescan_dead_smt_siblings(void)
+{
+ if (cpuidle_get_driver() == &acpi_idle_driver)
+ arch_cpu_rescan_dead_smt_siblings();
+}
+
static
DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], acpi_cstate);
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 7d59c6c9185f..b1ab192d7a08 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -667,6 +667,13 @@ static const struct dmi_system_id irq1_edge_low_force_override[] = {
},
},
{
+ /* MACHENIKE L16P/L16P */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MACHENIKE"),
+ DMI_MATCH(DMI_BOARD_NAME, "L16P"),
+ },
+ },
+ {
/*
* TongFang GM5HG0A in case of the SKIKK Vanaheim relabel the
* board-name is changed, so check OEM strings instead. Note
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 163ac909bd06..aa93b0ecbbc6 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1410,8 +1410,15 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
static bool ahci_broken_lpm(struct pci_dev *pdev)
{
+ /*
+ * Platforms with LPM problems.
+ * If driver_data is NULL, there is no existing BIOS version with
+ * functioning LPM.
+ * If driver_data is non-NULL, then driver_data contains the DMI BIOS
+ * build date of the first BIOS version with functioning LPM (i.e. older
+ * BIOS versions have broken LPM).
+ */
static const struct dmi_system_id sysids[] = {
- /* Various Lenovo 50 series have LPM issues with older BIOSen */
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
@@ -1438,13 +1445,30 @@ static bool ahci_broken_lpm(struct pci_dev *pdev)
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W541"),
},
+ .driver_data = "20180409", /* 2.35 */
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ASUSPRO D840MB_M840SA"),
+ },
+ /* 320 is broken, there is no known good version. */
+ },
+ {
/*
- * Note date based on release notes, 2.35 has been
- * reported to be good, but I've been unable to get
- * a hold of the reporter to get the DMI BIOS date.
- * TODO: fix this.
+ * AMD 500 Series Chipset SATA Controller [1022:43eb]
+ * on this motherboard timeouts on ports 5 and 6 when
+ * LPM is enabled, at least with WDC WD20EFAX-68FB5N0
+ * hard drives. LPM with the same drive works fine on
+ * all other ports on the same controller.
*/
- .driver_data = "20180310", /* 2.35 */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR,
+ "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME,
+ "ROG STRIX B550-F GAMING (WI-FI)"),
+ },
+ /* 3621 is broken, there is no known good version. */
},
{ } /* terminate list */
};
@@ -1455,6 +1479,9 @@ static bool ahci_broken_lpm(struct pci_dev *pdev)
if (!dmi)
return false;
+ if (!dmi->driver_data)
+ return true;
+
dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index b7f0bf795521..f2140fc06ba0 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -514,15 +514,19 @@ unsigned int ata_acpi_gtm_xfermask(struct ata_device *dev,
EXPORT_SYMBOL_GPL(ata_acpi_gtm_xfermask);
/**
- * ata_acpi_cbl_80wire - Check for 80 wire cable
+ * ata_acpi_cbl_pata_type - Return PATA cable type
* @ap: Port to check
- * @gtm: GTM data to use
*
- * Return 1 if the @gtm indicates the BIOS selected an 80wire mode.
+ * Return ATA_CBL_PATA* according to the transfer mode selected by BIOS
*/
-int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm)
+int ata_acpi_cbl_pata_type(struct ata_port *ap)
{
struct ata_device *dev;
+ int ret = ATA_CBL_PATA_UNK;
+ const struct ata_acpi_gtm *gtm = ata_acpi_init_gtm(ap);
+
+ if (!gtm)
+ return ATA_CBL_PATA40;
ata_for_each_dev(dev, &ap->link, ENABLED) {
unsigned int xfer_mask, udma_mask;
@@ -530,13 +534,17 @@ int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm)
xfer_mask = ata_acpi_gtm_xfermask(dev, gtm);
ata_unpack_xfermask(xfer_mask, NULL, NULL, &udma_mask);
- if (udma_mask & ~ATA_UDMA_MASK_40C)
- return 1;
+ ret = ATA_CBL_PATA40;
+
+ if (udma_mask & ~ATA_UDMA_MASK_40C) {
+ ret = ATA_CBL_PATA80;
+ break;
+ }
}
- return 0;
+ return ret;
}
-EXPORT_SYMBOL_GPL(ata_acpi_cbl_80wire);
+EXPORT_SYMBOL_GPL(ata_acpi_cbl_pata_type);
static void ata_acpi_gtf_to_tf(struct ata_device *dev,
const struct ata_acpi_gtf *gtf,
diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
index b811efd2cc34..73e81e160c91 100644
--- a/drivers/ata/pata_cs5536.c
+++ b/drivers/ata/pata_cs5536.c
@@ -27,7 +27,7 @@
#include <scsi/scsi_host.h>
#include <linux/dmi.h>
-#ifdef CONFIG_X86_32
+#if defined(CONFIG_X86) && defined(CONFIG_X86_32)
#include <asm/msr.h>
static int use_msr;
module_param_named(msr, use_msr, int, 0644);
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index fbf5f07ea357..f7a933eefe05 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -1298,7 +1298,7 @@ static int pata_macio_pci_attach(struct pci_dev *pdev,
priv->dev = &pdev->dev;
/* Get MMIO regions */
- if (pci_request_regions(pdev, "pata-macio")) {
+ if (pcim_request_all_regions(pdev, "pata-macio")) {
dev_err(&pdev->dev,
"Cannot obtain PCI resources\n");
return -EBUSY;
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 696b99720dcb..bb80e7800dcb 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -201,11 +201,9 @@ static int via_cable_detect(struct ata_port *ap) {
two drives */
if (ata66 & (0x10100000 >> (16 * ap->port_no)))
return ATA_CBL_PATA80;
+
/* Check with ACPI so we can spot BIOS reported SATA bridges */
- if (ata_acpi_init_gtm(ap) &&
- ata_acpi_cbl_80wire(ap, ata_acpi_init_gtm(ap)))
- return ATA_CBL_PATA80;
- return ATA_CBL_PATA40;
+ return ata_acpi_cbl_pata_type(ap);
}
static int via_pre_reset(struct ata_link *link, unsigned long deadline)
@@ -368,7 +366,8 @@ static unsigned int via_mode_filter(struct ata_device *dev, unsigned int mask)
}
if (dev->class == ATA_DEV_ATAPI &&
- dmi_check_system(no_atapi_dma_dmi_table)) {
+ (dmi_check_system(no_atapi_dma_dmi_table) ||
+ config->id == PCI_DEVICE_ID_VIA_6415)) {
ata_dev_warn(dev, "controller locks up on ATAPI DMA, forcing PIO\n");
mask &= ATA_MASK_PIO;
}
diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
index d4aa0f353b6c..eeae160c898d 100644
--- a/drivers/atm/atmtcp.c
+++ b/drivers/atm/atmtcp.c
@@ -288,7 +288,9 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
struct sk_buff *new_skb;
int result = 0;
- if (!skb->len) return 0;
+ if (skb->len < sizeof(struct atmtcp_hdr))
+ goto done;
+
dev = vcc->dev_data;
hdr = (struct atmtcp_hdr *) skb->data;
if (hdr->length == ATMTCP_HDR_MAGIC) {
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 1206ab764ba9..f2e91b7d79f0 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -852,6 +852,8 @@ queue_skb(struct idt77252_dev *card, struct vc_map *vc,
IDT77252_PRV_PADDR(skb) = dma_map_single(&card->pcidev->dev, skb->data,
skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&card->pcidev->dev, IDT77252_PRV_PADDR(skb)))
+ return -ENOMEM;
error = -EINVAL;
@@ -1857,6 +1859,8 @@ add_rx_skb(struct idt77252_dev *card, int queue,
paddr = dma_map_single(&card->pcidev->dev, skb->data,
skb_end_pointer(skb) - skb->data,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(&card->pcidev->dev, paddr))
+ goto outpoolrm;
IDT77252_PRV_PADDR(skb) = paddr;
if (push_rx_skb(card, skb, queue)) {
@@ -1871,6 +1875,7 @@ outunmap:
dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
skb_end_pointer(skb) - skb->data, DMA_FROM_DEVICE);
+outpoolrm:
handle = IDT77252_PRV_POOL(skb);
card->sbpool[POOL_QUEUE(handle)].skb[POOL_INDEX(handle)] = NULL;
diff --git a/drivers/base/faux.c b/drivers/base/faux.c
index 9054d346bd7f..f5fbda0a9a44 100644
--- a/drivers/base/faux.c
+++ b/drivers/base/faux.c
@@ -86,6 +86,7 @@ static struct device_driver faux_driver = {
.name = "faux_driver",
.bus = &faux_bus_type,
.probe_type = PROBE_FORCE_SYNCHRONOUS,
+ .suppress_bind_attrs = true,
};
static void faux_device_release(struct device *dev)
@@ -169,7 +170,7 @@ struct faux_device *faux_device_create_with_groups(const char *name,
* successful is almost impossible to determine by the caller.
*/
if (!dev->driver) {
- dev_err(dev, "probe did not succeed, tearing down the device\n");
+ dev_dbg(dev, "probe did not succeed, tearing down the device\n");
faux_device_destroy(faux_dev);
faux_dev = NULL;
}
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index 749ae1246f4c..d35caa3c69e1 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -80,6 +80,7 @@ enum {
DEVFL_NEWSIZE = (1<<6), /* need to update dev size in block layer */
DEVFL_FREEING = (1<<7), /* set when device is being cleaned up */
DEVFL_FREED = (1<<8), /* device has been cleaned up */
+ DEVFL_DEAD = (1<<9), /* device has timed out of aoe_deadsecs */
};
enum {
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 50cc90f6ab35..6298f8e271e3 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -754,7 +754,7 @@ rexmit_timer(struct timer_list *timer)
utgts = count_targets(d, NULL);
- if (d->flags & DEVFL_TKILL) {
+ if (d->flags & (DEVFL_TKILL | DEVFL_DEAD)) {
spin_unlock_irqrestore(&d->lock, flags);
return;
}
@@ -786,7 +786,8 @@ rexmit_timer(struct timer_list *timer)
* to clean up.
*/
list_splice(&flist, &d->factive[0]);
- aoedev_downdev(d);
+ d->flags |= DEVFL_DEAD;
+ queue_work(aoe_wq, &d->work);
goto out;
}
@@ -898,6 +899,9 @@ aoecmd_sleepwork(struct work_struct *work)
{
struct aoedev *d = container_of(work, struct aoedev, work);
+ if (d->flags & DEVFL_DEAD)
+ aoedev_downdev(d);
+
if (d->flags & DEVFL_GDALLOC)
aoeblk_gdalloc(d);
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index bba05f0c5bbd..3a240755045b 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -198,9 +198,13 @@ aoedev_downdev(struct aoedev *d)
{
struct aoetgt *t, **tt, **te;
struct list_head *head, *pos, *nx;
+ struct request *rq, *rqnext;
int i;
+ unsigned long flags;
- d->flags &= ~DEVFL_UP;
+ spin_lock_irqsave(&d->lock, flags);
+ d->flags &= ~(DEVFL_UP | DEVFL_DEAD);
+ spin_unlock_irqrestore(&d->lock, flags);
/* clean out active and to-be-retransmitted buffers */
for (i = 0; i < NFACTIVE; i++) {
@@ -223,6 +227,13 @@ aoedev_downdev(struct aoedev *d)
/* clean out the in-process request (if any) */
aoe_failip(d);
+ /* clean out any queued block requests */
+ list_for_each_entry_safe(rq, rqnext, &d->rq_list, queuelist) {
+ list_del_init(&rq->queuelist);
+ blk_mq_start_request(rq);
+ blk_mq_end_request(rq, BLK_STS_IOERR);
+ }
+
/* fast fail all pending I/O */
if (d->blkq) {
/* UP is cleared, freeze+quiesce to insure all are errored */
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index f8d136684109..500840e4a74e 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1248,12 +1248,6 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
lo->lo_flags &= ~LOOP_SET_STATUS_CLEARABLE_FLAGS;
lo->lo_flags |= (info->lo_flags & LOOP_SET_STATUS_SETTABLE_FLAGS);
- if (size_changed) {
- loff_t new_size = get_size(lo->lo_offset, lo->lo_sizelimit,
- lo->lo_backing_file);
- loop_set_size(lo, new_size);
- }
-
/* update the direct I/O flag if lo_offset changed */
loop_update_dio(lo);
@@ -1261,6 +1255,11 @@ out_unfreeze:
blk_mq_unfreeze_queue(lo->lo_queue, memflags);
if (partscan)
clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
+ if (!err && size_changed) {
+ loff_t new_size = get_size(lo->lo_offset, lo->lo_sizelimit,
+ lo->lo_backing_file);
+ loop_set_size(lo, new_size);
+ }
out_unlock:
mutex_unlock(&lo->lo_mutex);
if (partscan)
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index c637ea010d34..c3e3c3b65a6d 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -1148,8 +1148,8 @@ exit:
blk_mq_end_request(req, res);
}
-static void ublk_complete_io_cmd(struct ublk_io *io, struct request *req,
- int res, unsigned issue_flags)
+static struct io_uring_cmd *__ublk_prep_compl_io_cmd(struct ublk_io *io,
+ struct request *req)
{
/* read cmd first because req will overwrite it */
struct io_uring_cmd *cmd = io->cmd;
@@ -1164,6 +1164,13 @@ static void ublk_complete_io_cmd(struct ublk_io *io, struct request *req,
io->flags &= ~UBLK_IO_FLAG_ACTIVE;
io->req = req;
+ return cmd;
+}
+
+static void ublk_complete_io_cmd(struct ublk_io *io, struct request *req,
+ int res, unsigned issue_flags)
+{
+ struct io_uring_cmd *cmd = __ublk_prep_compl_io_cmd(io, req);
/* tell ublksrv one io request is coming */
io_uring_cmd_done(cmd, res, 0, issue_flags);
@@ -1416,6 +1423,14 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
}
+static inline bool ublk_belong_to_same_batch(const struct ublk_io *io,
+ const struct ublk_io *io2)
+{
+ return (io_uring_cmd_ctx_handle(io->cmd) ==
+ io_uring_cmd_ctx_handle(io2->cmd)) &&
+ (io->task == io2->task);
+}
+
static void ublk_queue_rqs(struct rq_list *rqlist)
{
struct rq_list requeue_list = { };
@@ -1427,7 +1442,8 @@ static void ublk_queue_rqs(struct rq_list *rqlist)
struct ublk_queue *this_q = req->mq_hctx->driver_data;
struct ublk_io *this_io = &this_q->ios[req->tag];
- if (io && io->task != this_io->task && !rq_list_empty(&submit_list))
+ if (io && !ublk_belong_to_same_batch(io, this_io) &&
+ !rq_list_empty(&submit_list))
ublk_queue_cmd_list(io, &submit_list);
io = this_io;
@@ -2148,10 +2164,9 @@ static int ublk_commit_and_fetch(const struct ublk_queue *ubq,
return 0;
}
-static bool ublk_get_data(const struct ublk_queue *ubq, struct ublk_io *io)
+static bool ublk_get_data(const struct ublk_queue *ubq, struct ublk_io *io,
+ struct request *req)
{
- struct request *req = io->req;
-
/*
* We have handled UBLK_IO_NEED_GET_DATA command,
* so clear UBLK_IO_FLAG_NEED_GET_DATA now and just
@@ -2178,6 +2193,7 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
u32 cmd_op = cmd->cmd_op;
unsigned tag = ub_cmd->tag;
int ret = -EINVAL;
+ struct request *req;
pr_devel("%s: received: cmd op %d queue %d tag %d result %d\n",
__func__, cmd->cmd_op, ub_cmd->q_id, tag,
@@ -2236,11 +2252,19 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
goto out;
break;
case UBLK_IO_NEED_GET_DATA:
- io->addr = ub_cmd->addr;
- if (!ublk_get_data(ubq, io))
- return -EIOCBQUEUED;
-
- return UBLK_IO_RES_OK;
+ /*
+ * ublk_get_data() may fail and fallback to requeue, so keep
+ * uring_cmd active first and prepare for handling new requeued
+ * request
+ */
+ req = io->req;
+ ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
+ io->flags &= ~UBLK_IO_FLAG_OWNED_BY_SRV;
+ if (likely(ublk_get_data(ubq, io, req))) {
+ __ublk_prep_compl_io_cmd(io, req);
+ return UBLK_IO_RES_OK;
+ }
+ break;
default:
goto out;
}
@@ -2825,6 +2849,10 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header)
if (copy_from_user(&info, argp, sizeof(info)))
return -EFAULT;
+ if (info.queue_depth > UBLK_MAX_QUEUE_DEPTH || !info.queue_depth ||
+ info.nr_hw_queues > UBLK_MAX_NR_QUEUES || !info.nr_hw_queues)
+ return -EINVAL;
+
if (capable(CAP_SYS_ADMIN))
info.flags &= ~UBLK_F_UNPRIVILEGED_DEV;
else if (!(info.flags & UBLK_F_UNPRIVILEGED_DEV))
diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c
index 50fe17f1e1d1..e1c688dd2d45 100644
--- a/drivers/bluetooth/btintel_pcie.c
+++ b/drivers/bluetooth/btintel_pcie.c
@@ -396,8 +396,13 @@ static int btintel_pcie_submit_rx(struct btintel_pcie_data *data)
static int btintel_pcie_start_rx(struct btintel_pcie_data *data)
{
int i, ret;
+ struct rxq *rxq = &data->rxq;
+
+ /* Post (BTINTEL_PCIE_RX_DESCS_COUNT - 3) buffers to overcome the
+ * hardware issues leading to race condition at the firmware.
+ */
- for (i = 0; i < BTINTEL_PCIE_RX_MAX_QUEUE; i++) {
+ for (i = 0; i < rxq->count - 3; i++) {
ret = btintel_pcie_submit_rx(data);
if (ret)
return ret;
@@ -1782,8 +1787,8 @@ static int btintel_pcie_alloc(struct btintel_pcie_data *data)
* + size of index * Number of queues(2) * type of index array(4)
* + size of context information
*/
- total = (sizeof(struct tfd) + sizeof(struct urbd0) + sizeof(struct frbd)
- + sizeof(struct urbd1)) * BTINTEL_DESCS_COUNT;
+ total = (sizeof(struct tfd) + sizeof(struct urbd0)) * BTINTEL_PCIE_TX_DESCS_COUNT;
+ total += (sizeof(struct frbd) + sizeof(struct urbd1)) * BTINTEL_PCIE_RX_DESCS_COUNT;
/* Add the sum of size of index array and size of ci struct */
total += (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4) + sizeof(struct ctx_info);
@@ -1808,36 +1813,36 @@ static int btintel_pcie_alloc(struct btintel_pcie_data *data)
data->dma_v_addr = v_addr;
/* Setup descriptor count */
- data->txq.count = BTINTEL_DESCS_COUNT;
- data->rxq.count = BTINTEL_DESCS_COUNT;
+ data->txq.count = BTINTEL_PCIE_TX_DESCS_COUNT;
+ data->rxq.count = BTINTEL_PCIE_RX_DESCS_COUNT;
/* Setup tfds */
data->txq.tfds_p_addr = p_addr;
data->txq.tfds = v_addr;
- p_addr += (sizeof(struct tfd) * BTINTEL_DESCS_COUNT);
- v_addr += (sizeof(struct tfd) * BTINTEL_DESCS_COUNT);
+ p_addr += (sizeof(struct tfd) * BTINTEL_PCIE_TX_DESCS_COUNT);
+ v_addr += (sizeof(struct tfd) * BTINTEL_PCIE_TX_DESCS_COUNT);
/* Setup urbd0 */
data->txq.urbd0s_p_addr = p_addr;
data->txq.urbd0s = v_addr;
- p_addr += (sizeof(struct urbd0) * BTINTEL_DESCS_COUNT);
- v_addr += (sizeof(struct urbd0) * BTINTEL_DESCS_COUNT);
+ p_addr += (sizeof(struct urbd0) * BTINTEL_PCIE_TX_DESCS_COUNT);
+ v_addr += (sizeof(struct urbd0) * BTINTEL_PCIE_TX_DESCS_COUNT);
/* Setup FRBD*/
data->rxq.frbds_p_addr = p_addr;
data->rxq.frbds = v_addr;
- p_addr += (sizeof(struct frbd) * BTINTEL_DESCS_COUNT);
- v_addr += (sizeof(struct frbd) * BTINTEL_DESCS_COUNT);
+ p_addr += (sizeof(struct frbd) * BTINTEL_PCIE_RX_DESCS_COUNT);
+ v_addr += (sizeof(struct frbd) * BTINTEL_PCIE_RX_DESCS_COUNT);
/* Setup urbd1 */
data->rxq.urbd1s_p_addr = p_addr;
data->rxq.urbd1s = v_addr;
- p_addr += (sizeof(struct urbd1) * BTINTEL_DESCS_COUNT);
- v_addr += (sizeof(struct urbd1) * BTINTEL_DESCS_COUNT);
+ p_addr += (sizeof(struct urbd1) * BTINTEL_PCIE_RX_DESCS_COUNT);
+ v_addr += (sizeof(struct urbd1) * BTINTEL_PCIE_RX_DESCS_COUNT);
/* Setup data buffers for txq */
err = btintel_pcie_setup_txq_bufs(data, &data->txq);
@@ -2028,6 +2033,28 @@ static void btintel_pcie_release_hdev(struct btintel_pcie_data *data)
data->hdev = NULL;
}
+static void btintel_pcie_disable_interrupts(struct btintel_pcie_data *data)
+{
+ spin_lock(&data->irq_lock);
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, data->fh_init_mask);
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, data->hw_init_mask);
+ spin_unlock(&data->irq_lock);
+}
+
+static void btintel_pcie_enable_interrupts(struct btintel_pcie_data *data)
+{
+ spin_lock(&data->irq_lock);
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, ~data->fh_init_mask);
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, ~data->hw_init_mask);
+ spin_unlock(&data->irq_lock);
+}
+
+static void btintel_pcie_synchronize_irqs(struct btintel_pcie_data *data)
+{
+ for (int i = 0; i < data->alloc_vecs; i++)
+ synchronize_irq(data->msix_entries[i].vector);
+}
+
static int btintel_pcie_setup_internal(struct hci_dev *hdev)
{
struct btintel_pcie_data *data = hci_get_drvdata(hdev);
@@ -2147,6 +2174,8 @@ static int btintel_pcie_setup(struct hci_dev *hdev)
bt_dev_err(hdev, "Firmware download retry count: %d",
fw_dl_retry);
btintel_pcie_dump_debug_registers(hdev);
+ btintel_pcie_disable_interrupts(data);
+ btintel_pcie_synchronize_irqs(data);
err = btintel_pcie_reset_bt(data);
if (err) {
bt_dev_err(hdev, "Failed to do shr reset: %d", err);
@@ -2154,6 +2183,7 @@ static int btintel_pcie_setup(struct hci_dev *hdev)
}
usleep_range(10000, 12000);
btintel_pcie_reset_ia(data);
+ btintel_pcie_enable_interrupts(data);
btintel_pcie_config_msix(data);
err = btintel_pcie_enable_bt(data);
if (err) {
@@ -2286,6 +2316,12 @@ static void btintel_pcie_remove(struct pci_dev *pdev)
data = pci_get_drvdata(pdev);
+ btintel_pcie_disable_interrupts(data);
+
+ btintel_pcie_synchronize_irqs(data);
+
+ flush_work(&data->rx_work);
+
btintel_pcie_reset_bt(data);
for (int i = 0; i < data->alloc_vecs; i++) {
struct msix_entry *msix_entry;
@@ -2298,8 +2334,6 @@ static void btintel_pcie_remove(struct pci_dev *pdev)
btintel_pcie_release_hdev(data);
- flush_work(&data->rx_work);
-
destroy_workqueue(data->workqueue);
btintel_pcie_free(data);
diff --git a/drivers/bluetooth/btintel_pcie.h b/drivers/bluetooth/btintel_pcie.h
index 21b964b15c1c..7dad4523236c 100644
--- a/drivers/bluetooth/btintel_pcie.h
+++ b/drivers/bluetooth/btintel_pcie.h
@@ -154,8 +154,11 @@ enum msix_mbox_int_causes {
/* Default interrupt timeout in msec */
#define BTINTEL_DEFAULT_INTR_TIMEOUT_MS 3000
-/* The number of descriptors in TX/RX queues */
-#define BTINTEL_DESCS_COUNT 16
+/* The number of descriptors in TX queues */
+#define BTINTEL_PCIE_TX_DESCS_COUNT 32
+
+/* The number of descriptors in RX queues */
+#define BTINTEL_PCIE_RX_DESCS_COUNT 64
/* Number of Queue for TX and RX
* It indicates the index of the IA(Index Array)
@@ -177,9 +180,6 @@ enum {
/* Doorbell vector for TFD */
#define BTINTEL_PCIE_TX_DB_VEC 0
-/* Number of pending RX requests for downlink */
-#define BTINTEL_PCIE_RX_MAX_QUEUE 6
-
/* Doorbell vector for FRBD */
#define BTINTEL_PCIE_RX_DB_VEC 513
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 5fe5879881f5..3ec0be496820 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -2392,10 +2392,17 @@ static int qca_serdev_probe(struct serdev_device *serdev)
*/
qcadev->bt_power->pwrseq = devm_pwrseq_get(&serdev->dev,
"bluetooth");
- if (IS_ERR(qcadev->bt_power->pwrseq))
- return PTR_ERR(qcadev->bt_power->pwrseq);
- break;
+ /*
+ * Some modules have BT_EN enabled via a hardware pull-up,
+ * meaning it is not defined in the DTS and is not controlled
+ * through the power sequence. In such cases, fall through
+ * to follow the legacy flow.
+ */
+ if (IS_ERR(qcadev->bt_power->pwrseq))
+ qcadev->bt_power->pwrseq = NULL;
+ else
+ break;
}
fallthrough;
case QCA_WCN3950:
diff --git a/drivers/cpufreq/rcpufreq_dt.rs b/drivers/cpufreq/rcpufreq_dt.rs
index 94ed81644fe1..43c87d0259b6 100644
--- a/drivers/cpufreq/rcpufreq_dt.rs
+++ b/drivers/cpufreq/rcpufreq_dt.rs
@@ -26,9 +26,9 @@ fn find_supply_name_exact(dev: &Device, name: &str) -> Option<CString> {
}
/// Finds supply name for the CPU from DT.
-fn find_supply_names(dev: &Device, cpu: u32) -> Option<KVec<CString>> {
+fn find_supply_names(dev: &Device, cpu: cpu::CpuId) -> Option<KVec<CString>> {
// Try "cpu0" for older DTs, fallback to "cpu".
- let name = (cpu == 0)
+ let name = (cpu.as_u32() == 0)
.then(|| find_supply_name_exact(dev, "cpu0"))
.flatten()
.or_else(|| find_supply_name_exact(dev, "cpu"))?;
diff --git a/drivers/cxl/core/edac.c b/drivers/cxl/core/edac.c
index 2cbc664e5d62..623aaa4439c4 100644
--- a/drivers/cxl/core/edac.c
+++ b/drivers/cxl/core/edac.c
@@ -103,10 +103,10 @@ static int cxl_scrub_get_attrbs(struct cxl_patrol_scrub_context *cxl_ps_ctx,
u8 *cap, u16 *cycle, u8 *flags, u8 *min_cycle)
{
struct cxl_mailbox *cxl_mbox;
- u8 min_scrub_cycle = U8_MAX;
struct cxl_region_params *p;
struct cxl_memdev *cxlmd;
struct cxl_region *cxlr;
+ u8 min_scrub_cycle = 0;
int i, ret;
if (!cxl_ps_ctx->cxlr) {
@@ -133,8 +133,12 @@ static int cxl_scrub_get_attrbs(struct cxl_patrol_scrub_context *cxl_ps_ctx,
if (ret)
return ret;
+ /*
+ * The min_scrub_cycle of a region is the max of minimum scrub
+ * cycles supported by memdevs that back the region.
+ */
if (min_cycle)
- min_scrub_cycle = min(*min_cycle, min_scrub_cycle);
+ min_scrub_cycle = max(*min_cycle, min_scrub_cycle);
}
if (min_cycle)
@@ -1099,8 +1103,10 @@ int cxl_store_rec_gen_media(struct cxl_memdev *cxlmd, union cxl_event *evt)
old_rec = xa_store(&array_rec->rec_gen_media,
le64_to_cpu(rec->media_hdr.phys_addr), rec,
GFP_KERNEL);
- if (xa_is_err(old_rec))
+ if (xa_is_err(old_rec)) {
+ kfree(rec);
return xa_err(old_rec);
+ }
kfree(old_rec);
@@ -1127,8 +1133,10 @@ int cxl_store_rec_dram(struct cxl_memdev *cxlmd, union cxl_event *evt)
old_rec = xa_store(&array_rec->rec_dram,
le64_to_cpu(rec->media_hdr.phys_addr), rec,
GFP_KERNEL);
- if (xa_is_err(old_rec))
+ if (xa_is_err(old_rec)) {
+ kfree(rec);
return xa_err(old_rec);
+ }
kfree(old_rec);
@@ -1315,7 +1323,7 @@ cxl_mem_get_rec_dram(struct cxl_memdev *cxlmd,
attrbs.bank = ctx->bank;
break;
case EDAC_REPAIR_RANK_SPARING:
- attrbs.repair_type = CXL_BANK_SPARING;
+ attrbs.repair_type = CXL_RANK_SPARING;
break;
default:
return NULL;
diff --git a/drivers/cxl/core/features.c b/drivers/cxl/core/features.c
index 6f2eae1eb126..7c750599ea69 100644
--- a/drivers/cxl/core/features.c
+++ b/drivers/cxl/core/features.c
@@ -544,7 +544,7 @@ static bool cxlctl_validate_set_features(struct cxl_features_state *cxlfs,
u32 flags;
if (rpc_in->op_size < sizeof(uuid_t))
- return ERR_PTR(-EINVAL);
+ return false;
feat = cxl_feature_info(cxlfs, &rpc_in->set_feat_in.uuid);
if (IS_ERR(feat))
diff --git a/drivers/cxl/core/ras.c b/drivers/cxl/core/ras.c
index 485a831695c7..2731ba3a0799 100644
--- a/drivers/cxl/core/ras.c
+++ b/drivers/cxl/core/ras.c
@@ -31,40 +31,38 @@ static void cxl_cper_trace_uncorr_port_prot_err(struct pci_dev *pdev,
ras_cap.header_log);
}
-static void cxl_cper_trace_corr_prot_err(struct pci_dev *pdev,
- struct cxl_ras_capability_regs ras_cap)
+static void cxl_cper_trace_corr_prot_err(struct cxl_memdev *cxlmd,
+ struct cxl_ras_capability_regs ras_cap)
{
u32 status = ras_cap.cor_status & ~ras_cap.cor_mask;
- struct cxl_dev_state *cxlds;
- cxlds = pci_get_drvdata(pdev);
- if (!cxlds)
- return;
-
- trace_cxl_aer_correctable_error(cxlds->cxlmd, status);
+ trace_cxl_aer_correctable_error(cxlmd, status);
}
-static void cxl_cper_trace_uncorr_prot_err(struct pci_dev *pdev,
- struct cxl_ras_capability_regs ras_cap)
+static void
+cxl_cper_trace_uncorr_prot_err(struct cxl_memdev *cxlmd,
+ struct cxl_ras_capability_regs ras_cap)
{
u32 status = ras_cap.uncor_status & ~ras_cap.uncor_mask;
- struct cxl_dev_state *cxlds;
u32 fe;
- cxlds = pci_get_drvdata(pdev);
- if (!cxlds)
- return;
-
if (hweight32(status) > 1)
fe = BIT(FIELD_GET(CXL_RAS_CAP_CONTROL_FE_MASK,
ras_cap.cap_control));
else
fe = status;
- trace_cxl_aer_uncorrectable_error(cxlds->cxlmd, status, fe,
+ trace_cxl_aer_uncorrectable_error(cxlmd, status, fe,
ras_cap.header_log);
}
+static int match_memdev_by_parent(struct device *dev, const void *uport)
+{
+ if (is_cxl_memdev(dev) && dev->parent == uport)
+ return 1;
+ return 0;
+}
+
static void cxl_cper_handle_prot_err(struct cxl_cper_prot_err_work_data *data)
{
unsigned int devfn = PCI_DEVFN(data->prot_err.agent_addr.device,
@@ -73,13 +71,12 @@ static void cxl_cper_handle_prot_err(struct cxl_cper_prot_err_work_data *data)
pci_get_domain_bus_and_slot(data->prot_err.agent_addr.segment,
data->prot_err.agent_addr.bus,
devfn);
+ struct cxl_memdev *cxlmd;
int port_type;
if (!pdev)
return;
- guard(device)(&pdev->dev);
-
port_type = pci_pcie_type(pdev);
if (port_type == PCI_EXP_TYPE_ROOT_PORT ||
port_type == PCI_EXP_TYPE_DOWNSTREAM ||
@@ -92,10 +89,20 @@ static void cxl_cper_handle_prot_err(struct cxl_cper_prot_err_work_data *data)
return;
}
+ guard(device)(&pdev->dev);
+ if (!pdev->dev.driver)
+ return;
+
+ struct device *mem_dev __free(put_device) = bus_find_device(
+ &cxl_bus_type, NULL, pdev, match_memdev_by_parent);
+ if (!mem_dev)
+ return;
+
+ cxlmd = to_cxl_memdev(mem_dev);
if (data->severity == AER_CORRECTABLE)
- cxl_cper_trace_corr_prot_err(pdev, data->ras_cap);
+ cxl_cper_trace_corr_prot_err(cxlmd, data->ras_cap);
else
- cxl_cper_trace_uncorr_prot_err(pdev, data->ras_cap);
+ cxl_cper_trace_uncorr_prot_err(cxlmd, data->ras_cap);
}
static void cxl_cper_prot_err_work_fn(struct work_struct *work)
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 890ecac04dac..2bcf9ceca997 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -1118,7 +1118,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
* Catch exporters making buffers inaccessible even when
* attachments preventing that exist.
*/
- WARN_ON_ONCE(ret == EBUSY);
+ WARN_ON_ONCE(ret == -EBUSY);
if (ret)
return ERR_PTR(ret);
}
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index 7eee3eb47a8e..c9d0c68d2fcb 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -264,8 +264,7 @@ static int begin_cpu_udmabuf(struct dma_buf *buf,
ubuf->sg = NULL;
}
} else {
- dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents,
- direction);
+ dma_sync_sgtable_for_cpu(dev, ubuf->sg, direction);
}
return ret;
@@ -280,7 +279,7 @@ static int end_cpu_udmabuf(struct dma_buf *buf,
if (!ubuf->sg)
return -EINVAL;
- dma_sync_sg_for_device(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
+ dma_sync_sgtable_for_device(dev, ubuf->sg, direction);
return 0;
}
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 58b1482a0fbb..07f1e9dc1ca7 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -1209,7 +1209,9 @@ static int umc_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
if (csrow_enabled(2 * dimm + 1, ctrl, pvt))
cs_mode |= CS_ODD_PRIMARY;
- /* Asymmetric dual-rank DIMM support. */
+ if (csrow_sec_enabled(2 * dimm, ctrl, pvt))
+ cs_mode |= CS_EVEN_SECONDARY;
+
if (csrow_sec_enabled(2 * dimm + 1, ctrl, pvt))
cs_mode |= CS_ODD_SECONDARY;
@@ -1230,12 +1232,13 @@ static int umc_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
return cs_mode;
}
-static int __addr_mask_to_cs_size(u32 addr_mask_orig, unsigned int cs_mode,
- int csrow_nr, int dimm)
+static int calculate_cs_size(u32 mask, unsigned int cs_mode)
{
- u32 msb, weight, num_zero_bits;
- u32 addr_mask_deinterleaved;
- int size = 0;
+ int msb, weight, num_zero_bits;
+ u32 deinterleaved_mask;
+
+ if (!mask)
+ return 0;
/*
* The number of zero bits in the mask is equal to the number of bits
@@ -1248,19 +1251,30 @@ static int __addr_mask_to_cs_size(u32 addr_mask_orig, unsigned int cs_mode,
* without swapping with the most significant bit. This can be handled
* by keeping the MSB where it is and ignoring the single zero bit.
*/
- msb = fls(addr_mask_orig) - 1;
- weight = hweight_long(addr_mask_orig);
+ msb = fls(mask) - 1;
+ weight = hweight_long(mask);
num_zero_bits = msb - weight - !!(cs_mode & CS_3R_INTERLEAVE);
/* Take the number of zero bits off from the top of the mask. */
- addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1);
+ deinterleaved_mask = GENMASK(msb - num_zero_bits, 1);
+ edac_dbg(1, " Deinterleaved AddrMask: 0x%x\n", deinterleaved_mask);
+
+ return (deinterleaved_mask >> 2) + 1;
+}
+
+static int __addr_mask_to_cs_size(u32 addr_mask, u32 addr_mask_sec,
+ unsigned int cs_mode, int csrow_nr, int dimm)
+{
+ int size;
edac_dbg(1, "CS%d DIMM%d AddrMasks:\n", csrow_nr, dimm);
- edac_dbg(1, " Original AddrMask: 0x%x\n", addr_mask_orig);
- edac_dbg(1, " Deinterleaved AddrMask: 0x%x\n", addr_mask_deinterleaved);
+ edac_dbg(1, " Primary AddrMask: 0x%x\n", addr_mask);
/* Register [31:1] = Address [39:9]. Size is in kBs here. */
- size = (addr_mask_deinterleaved >> 2) + 1;
+ size = calculate_cs_size(addr_mask, cs_mode);
+
+ edac_dbg(1, " Secondary AddrMask: 0x%x\n", addr_mask_sec);
+ size += calculate_cs_size(addr_mask_sec, cs_mode);
/* Return size in MBs. */
return size >> 10;
@@ -1269,8 +1283,8 @@ static int __addr_mask_to_cs_size(u32 addr_mask_orig, unsigned int cs_mode,
static int umc_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
unsigned int cs_mode, int csrow_nr)
{
+ u32 addr_mask = 0, addr_mask_sec = 0;
int cs_mask_nr = csrow_nr;
- u32 addr_mask_orig;
int dimm, size = 0;
/* No Chip Selects are enabled. */
@@ -1308,13 +1322,13 @@ static int umc_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
if (!pvt->flags.zn_regs_v2)
cs_mask_nr >>= 1;
- /* Asymmetric dual-rank DIMM support. */
- if ((csrow_nr & 1) && (cs_mode & CS_ODD_SECONDARY))
- addr_mask_orig = pvt->csels[umc].csmasks_sec[cs_mask_nr];
- else
- addr_mask_orig = pvt->csels[umc].csmasks[cs_mask_nr];
+ if (cs_mode & (CS_EVEN_PRIMARY | CS_ODD_PRIMARY))
+ addr_mask = pvt->csels[umc].csmasks[cs_mask_nr];
- return __addr_mask_to_cs_size(addr_mask_orig, cs_mode, csrow_nr, dimm);
+ if (cs_mode & (CS_EVEN_SECONDARY | CS_ODD_SECONDARY))
+ addr_mask_sec = pvt->csels[umc].csmasks_sec[cs_mask_nr];
+
+ return __addr_mask_to_cs_size(addr_mask, addr_mask_sec, cs_mode, csrow_nr, dimm);
}
static void umc_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
@@ -3512,9 +3526,10 @@ static void gpu_get_err_info(struct mce *m, struct err_info *err)
static int gpu_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
unsigned int cs_mode, int csrow_nr)
{
- u32 addr_mask_orig = pvt->csels[umc].csmasks[csrow_nr];
+ u32 addr_mask = pvt->csels[umc].csmasks[csrow_nr];
+ u32 addr_mask_sec = pvt->csels[umc].csmasks_sec[csrow_nr];
- return __addr_mask_to_cs_size(addr_mask_orig, cs_mode, csrow_nr, csrow_nr >> 1);
+ return __addr_mask_to_cs_size(addr_mask, addr_mask_sec, cs_mode, csrow_nr, csrow_nr >> 1);
}
static void gpu_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
@@ -3879,6 +3894,7 @@ static int per_family_init(struct amd64_pvt *pvt)
break;
case 0x70 ... 0x7f:
pvt->ctl_name = "F19h_M70h";
+ pvt->max_mcs = 4;
pvt->flags.zn_regs_v2 = 1;
break;
case 0x90 ... 0x9f:
diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c
index 1930dc00c791..1cb5c67e78ae 100644
--- a/drivers/edac/igen6_edac.c
+++ b/drivers/edac/igen6_edac.c
@@ -125,7 +125,7 @@
#define MEM_SLICE_HASH_MASK(v) (GET_BITFIELD(v, 6, 19) << 6)
#define MEM_SLICE_HASH_LSB_MASK_BIT(v) GET_BITFIELD(v, 24, 26)
-static const struct res_config {
+static struct res_config {
bool machine_check;
/* The number of present memory controllers. */
int num_imc;
@@ -479,7 +479,7 @@ static u64 rpl_p_err_addr(u64 ecclog)
return ECC_ERROR_LOG_ADDR45(ecclog);
}
-static const struct res_config ehl_cfg = {
+static struct res_config ehl_cfg = {
.num_imc = 1,
.imc_base = 0x5000,
.ibecc_base = 0xdc00,
@@ -489,7 +489,7 @@ static const struct res_config ehl_cfg = {
.err_addr_to_imc_addr = ehl_err_addr_to_imc_addr,
};
-static const struct res_config icl_cfg = {
+static struct res_config icl_cfg = {
.num_imc = 1,
.imc_base = 0x5000,
.ibecc_base = 0xd800,
@@ -499,7 +499,7 @@ static const struct res_config icl_cfg = {
.err_addr_to_imc_addr = ehl_err_addr_to_imc_addr,
};
-static const struct res_config tgl_cfg = {
+static struct res_config tgl_cfg = {
.machine_check = true,
.num_imc = 2,
.imc_base = 0x5000,
@@ -513,7 +513,7 @@ static const struct res_config tgl_cfg = {
.err_addr_to_imc_addr = tgl_err_addr_to_imc_addr,
};
-static const struct res_config adl_cfg = {
+static struct res_config adl_cfg = {
.machine_check = true,
.num_imc = 2,
.imc_base = 0xd800,
@@ -524,7 +524,7 @@ static const struct res_config adl_cfg = {
.err_addr_to_imc_addr = adl_err_addr_to_imc_addr,
};
-static const struct res_config adl_n_cfg = {
+static struct res_config adl_n_cfg = {
.machine_check = true,
.num_imc = 1,
.imc_base = 0xd800,
@@ -535,7 +535,7 @@ static const struct res_config adl_n_cfg = {
.err_addr_to_imc_addr = adl_err_addr_to_imc_addr,
};
-static const struct res_config rpl_p_cfg = {
+static struct res_config rpl_p_cfg = {
.machine_check = true,
.num_imc = 2,
.imc_base = 0xd800,
@@ -547,7 +547,7 @@ static const struct res_config rpl_p_cfg = {
.err_addr_to_imc_addr = adl_err_addr_to_imc_addr,
};
-static const struct res_config mtl_ps_cfg = {
+static struct res_config mtl_ps_cfg = {
.machine_check = true,
.num_imc = 2,
.imc_base = 0xd800,
@@ -558,7 +558,7 @@ static const struct res_config mtl_ps_cfg = {
.err_addr_to_imc_addr = adl_err_addr_to_imc_addr,
};
-static const struct res_config mtl_p_cfg = {
+static struct res_config mtl_p_cfg = {
.machine_check = true,
.num_imc = 2,
.imc_base = 0xd800,
@@ -569,7 +569,7 @@ static const struct res_config mtl_p_cfg = {
.err_addr_to_imc_addr = adl_err_addr_to_imc_addr,
};
-static const struct pci_device_id igen6_pci_tbl[] = {
+static struct pci_device_id igen6_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, DID_EHL_SKU5), (kernel_ulong_t)&ehl_cfg },
{ PCI_VDEVICE(INTEL, DID_EHL_SKU6), (kernel_ulong_t)&ehl_cfg },
{ PCI_VDEVICE(INTEL, DID_EHL_SKU7), (kernel_ulong_t)&ehl_cfg },
@@ -1350,9 +1350,11 @@ static int igen6_register_mcis(struct pci_dev *pdev, u64 mchbar)
return -ENODEV;
}
- if (lmc < res_cfg->num_imc)
+ if (lmc < res_cfg->num_imc) {
igen6_printk(KERN_WARNING, "Expected %d mcs, but only %d detected.",
res_cfg->num_imc, lmc);
+ res_cfg->num_imc = lmc;
+ }
return 0;
diff --git a/drivers/gpio/gpio-loongson-64bit.c b/drivers/gpio/gpio-loongson-64bit.c
index 26227669f026..70a01c5b8ad1 100644
--- a/drivers/gpio/gpio-loongson-64bit.c
+++ b/drivers/gpio/gpio-loongson-64bit.c
@@ -268,7 +268,7 @@ static const struct loongson_gpio_chip_data loongson_gpio_ls7a2000_data0 = {
/* LS7A2000 ACPI GPIO */
static const struct loongson_gpio_chip_data loongson_gpio_ls7a2000_data1 = {
.label = "ls7a2000_gpio",
- .mode = BYTE_CTRL_MODE,
+ .mode = BIT_CTRL_MODE,
.conf_offset = 0x4,
.in_offset = 0x8,
.out_offset = 0x0,
diff --git a/drivers/gpio/gpio-mlxbf3.c b/drivers/gpio/gpio-mlxbf3.c
index 10ea71273c89..9875e34bde72 100644
--- a/drivers/gpio/gpio-mlxbf3.c
+++ b/drivers/gpio/gpio-mlxbf3.c
@@ -190,7 +190,9 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev)
struct mlxbf3_gpio_context *gs;
struct gpio_irq_chip *girq;
struct gpio_chip *gc;
+ char *colon_ptr;
int ret, irq;
+ long num;
gs = devm_kzalloc(dev, sizeof(*gs), GFP_KERNEL);
if (!gs)
@@ -227,25 +229,39 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev)
gc->owner = THIS_MODULE;
gc->add_pin_ranges = mlxbf3_gpio_add_pin_ranges;
- irq = platform_get_irq(pdev, 0);
- if (irq >= 0) {
- girq = &gs->gc.irq;
- gpio_irq_chip_set_chip(girq, &gpio_mlxbf3_irqchip);
- girq->default_type = IRQ_TYPE_NONE;
- /* This will let us handle the parent IRQ in the driver */
- girq->num_parents = 0;
- girq->parents = NULL;
- girq->parent_handler = NULL;
- girq->handler = handle_bad_irq;
-
- /*
- * Directly request the irq here instead of passing
- * a flow-handler because the irq is shared.
- */
- ret = devm_request_irq(dev, irq, mlxbf3_gpio_irq_handler,
- IRQF_SHARED, dev_name(dev), gs);
- if (ret)
- return dev_err_probe(dev, ret, "failed to request IRQ");
+ colon_ptr = strchr(dev_name(dev), ':');
+ if (!colon_ptr) {
+ dev_err(dev, "invalid device name format\n");
+ return -EINVAL;
+ }
+
+ ret = kstrtol(++colon_ptr, 16, &num);
+ if (ret) {
+ dev_err(dev, "invalid device instance\n");
+ return ret;
+ }
+
+ if (!num) {
+ irq = platform_get_irq(pdev, 0);
+ if (irq >= 0) {
+ girq = &gs->gc.irq;
+ gpio_irq_chip_set_chip(girq, &gpio_mlxbf3_irqchip);
+ girq->default_type = IRQ_TYPE_NONE;
+ /* This will let us handle the parent IRQ in the driver */
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->parent_handler = NULL;
+ girq->handler = handle_bad_irq;
+
+ /*
+ * Directly request the irq here instead of passing
+ * a flow-handler because the irq is shared.
+ */
+ ret = devm_request_irq(dev, irq, mlxbf3_gpio_irq_handler,
+ IRQF_SHARED, dev_name(dev), gs);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to request IRQ");
+ }
}
platform_set_drvdata(pdev, gs);
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index b852e4997629..e80a96f39788 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -974,7 +974,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, int irq_base)
IRQF_ONESHOT | IRQF_SHARED, dev_name(dev),
chip);
if (ret)
- return dev_err_probe(dev, client->irq, "failed to request irq\n");
+ return dev_err_probe(dev, ret, "failed to request irq\n");
return 0;
}
diff --git a/drivers/gpio/gpio-spacemit-k1.c b/drivers/gpio/gpio-spacemit-k1.c
index f027066365ff..3cc75c701ec4 100644
--- a/drivers/gpio/gpio-spacemit-k1.c
+++ b/drivers/gpio/gpio-spacemit-k1.c
@@ -278,6 +278,7 @@ static const struct of_device_id spacemit_gpio_dt_ids[] = {
{ .compatible = "spacemit,k1-gpio" },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, spacemit_gpio_dt_ids);
static struct platform_driver spacemit_gpio_driver = {
.probe = spacemit_gpio_probe,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 8e626f50b362..f81608330a3d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1902,7 +1902,7 @@ no_preempt:
continue;
}
job = to_amdgpu_job(s_job);
- if (preempted && (&job->hw_fence) == fence)
+ if (preempted && (&job->hw_fence.base) == fence)
/* mark the job as preempted */
job->preemption_status |= AMDGPU_IB_PREEMPTED;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index e1bab6a96cb6..78f8755996f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -6019,16 +6019,12 @@ static int amdgpu_device_health_check(struct list_head *device_list_handle)
return ret;
}
-static int amdgpu_device_halt_activities(struct amdgpu_device *adev,
- struct amdgpu_job *job,
- struct amdgpu_reset_context *reset_context,
- struct list_head *device_list,
- struct amdgpu_hive_info *hive,
- bool need_emergency_restart)
+static int amdgpu_device_recovery_prepare(struct amdgpu_device *adev,
+ struct list_head *device_list,
+ struct amdgpu_hive_info *hive)
{
- struct list_head *device_list_handle = NULL;
struct amdgpu_device *tmp_adev = NULL;
- int i, r = 0;
+ int r;
/*
* Build list of devices to reset.
@@ -6045,26 +6041,54 @@ static int amdgpu_device_halt_activities(struct amdgpu_device *adev,
}
if (!list_is_first(&adev->reset_list, device_list))
list_rotate_to_front(&adev->reset_list, device_list);
- device_list_handle = device_list;
} else {
list_add_tail(&adev->reset_list, device_list);
- device_list_handle = device_list;
}
if (!amdgpu_sriov_vf(adev) && (!adev->pcie_reset_ctx.occurs_dpc)) {
- r = amdgpu_device_health_check(device_list_handle);
+ r = amdgpu_device_health_check(device_list);
if (r)
return r;
}
- /* We need to lock reset domain only once both for XGMI and single device */
- tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
- reset_list);
+ return 0;
+}
+
+static void amdgpu_device_recovery_get_reset_lock(struct amdgpu_device *adev,
+ struct list_head *device_list)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+
+ if (list_empty(device_list))
+ return;
+ tmp_adev =
+ list_first_entry(device_list, struct amdgpu_device, reset_list);
amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
+}
- /* block all schedulers and reset given job's ring */
- list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+static void amdgpu_device_recovery_put_reset_lock(struct amdgpu_device *adev,
+ struct list_head *device_list)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+ if (list_empty(device_list))
+ return;
+ tmp_adev =
+ list_first_entry(device_list, struct amdgpu_device, reset_list);
+ amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
+}
+
+static int amdgpu_device_halt_activities(
+ struct amdgpu_device *adev, struct amdgpu_job *job,
+ struct amdgpu_reset_context *reset_context,
+ struct list_head *device_list, struct amdgpu_hive_info *hive,
+ bool need_emergency_restart)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+ int i, r = 0;
+
+ /* block all schedulers and reset given job's ring */
+ list_for_each_entry(tmp_adev, device_list, reset_list) {
amdgpu_device_set_mp1_state(tmp_adev);
/*
@@ -6252,11 +6276,6 @@ static void amdgpu_device_gpu_resume(struct amdgpu_device *adev,
amdgpu_ras_set_error_query_ready(tmp_adev, true);
}
-
- tmp_adev = list_first_entry(device_list, struct amdgpu_device,
- reset_list);
- amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
-
}
@@ -6324,10 +6343,16 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
reset_context->hive = hive;
INIT_LIST_HEAD(&device_list);
+ if (amdgpu_device_recovery_prepare(adev, &device_list, hive))
+ goto end_reset;
+
+ /* We need to lock reset domain only once both for XGMI and single device */
+ amdgpu_device_recovery_get_reset_lock(adev, &device_list);
+
r = amdgpu_device_halt_activities(adev, job, reset_context, &device_list,
hive, need_emergency_restart);
if (r)
- goto end_reset;
+ goto reset_unlock;
if (need_emergency_restart)
goto skip_sched_resume;
@@ -6337,7 +6362,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
*
* job->base holds a reference to parent fence
*/
- if (job && dma_fence_is_signaled(&job->hw_fence)) {
+ if (job && dma_fence_is_signaled(&job->hw_fence.base)) {
job_signaled = true;
dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
goto skip_hw_reset;
@@ -6345,13 +6370,15 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
r = amdgpu_device_asic_reset(adev, &device_list, reset_context);
if (r)
- goto end_reset;
+ goto reset_unlock;
skip_hw_reset:
r = amdgpu_device_sched_resume(&device_list, reset_context, job_signaled);
if (r)
- goto end_reset;
+ goto reset_unlock;
skip_sched_resume:
amdgpu_device_gpu_resume(adev, &device_list, need_emergency_restart);
+reset_unlock:
+ amdgpu_device_recovery_put_reset_lock(adev, &device_list);
end_reset:
if (hive) {
mutex_unlock(&hive->hive_lock);
@@ -6763,6 +6790,8 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
memset(&reset_context, 0, sizeof(reset_context));
INIT_LIST_HEAD(&device_list);
+ amdgpu_device_recovery_prepare(adev, &device_list, hive);
+ amdgpu_device_recovery_get_reset_lock(adev, &device_list);
r = amdgpu_device_halt_activities(adev, NULL, &reset_context, &device_list,
hive, false);
if (hive) {
@@ -6880,8 +6909,8 @@ out:
if (hive) {
list_for_each_entry(tmp_adev, &device_list, reset_list)
amdgpu_device_unset_mp1_state(tmp_adev);
- amdgpu_device_unlock_reset_domain(adev->reset_domain);
}
+ amdgpu_device_recovery_put_reset_lock(adev, &device_list);
}
if (hive) {
@@ -6927,6 +6956,7 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
amdgpu_device_sched_resume(&device_list, NULL, NULL);
amdgpu_device_gpu_resume(adev, &device_list, false);
+ amdgpu_device_recovery_put_reset_lock(adev, &device_list);
adev->pcie_reset_ctx.occurs_dpc = false;
if (hive) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index a0e9bf9b2710..81b3443c8d7f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -321,10 +321,12 @@ static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev,
const struct firmware *fw;
int r;
- r = request_firmware(&fw, fw_name, adev->dev);
+ r = firmware_request_nowarn(&fw, fw_name, adev->dev);
if (r) {
- dev_err(adev->dev, "can't load firmware \"%s\"\n",
- fw_name);
+ if (amdgpu_discovery == 2)
+ dev_err(adev->dev, "can't load firmware \"%s\"\n", fw_name);
+ else
+ drm_info(&adev->ddev, "Optional firmware \"%s\" was not found\n", fw_name);
return r;
}
@@ -459,16 +461,12 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
/* Read from file if it is the preferred option */
fw_name = amdgpu_discovery_get_fw_name(adev);
if (fw_name != NULL) {
- dev_info(adev->dev, "use ip discovery information from file");
+ drm_dbg(&adev->ddev, "use ip discovery information from file");
r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin, fw_name);
-
- if (r) {
- dev_err(adev->dev, "failed to read ip discovery binary from file\n");
- r = -EINVAL;
+ if (r)
goto out;
- }
-
} else {
+ drm_dbg(&adev->ddev, "use ip discovery information from memory");
r = amdgpu_discovery_read_binary_from_mem(
adev, adev->mman.discovery_bin);
if (r)
@@ -1338,10 +1336,8 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
int r;
r = amdgpu_discovery_init(adev);
- if (r) {
- DRM_ERROR("amdgpu_discovery_init failed\n");
+ if (r)
return r;
- }
wafl_ver = 0;
adev->gfx.xcc_mask = 0;
@@ -2579,8 +2575,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
break;
default:
r = amdgpu_discovery_reg_base_init(adev);
- if (r)
- return -EINVAL;
+ if (r) {
+ drm_err(&adev->ddev, "discovery failed: %d\n", r);
+ return r;
+ }
amdgpu_discovery_harvest_ip(adev);
amdgpu_discovery_get_gfx_info(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 8cecf25996ed..5fec808d7f54 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -41,22 +41,6 @@
#include "amdgpu_trace.h"
#include "amdgpu_reset.h"
-/*
- * Fences mark an event in the GPUs pipeline and are used
- * for GPU/CPU synchronization. When the fence is written,
- * it is expected that all buffers associated with that fence
- * are no longer in use by the associated ring on the GPU and
- * that the relevant GPU caches have been flushed.
- */
-
-struct amdgpu_fence {
- struct dma_fence base;
-
- /* RB, DMA, etc. */
- struct amdgpu_ring *ring;
- ktime_t start_timestamp;
-};
-
static struct kmem_cache *amdgpu_fence_slab;
int amdgpu_fence_slab_init(void)
@@ -151,12 +135,12 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd
am_fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_ATOMIC);
if (am_fence == NULL)
return -ENOMEM;
- fence = &am_fence->base;
- am_fence->ring = ring;
} else {
/* take use of job-embedded fence */
- fence = &job->hw_fence;
+ am_fence = &job->hw_fence;
}
+ fence = &am_fence->base;
+ am_fence->ring = ring;
seq = ++ring->fence_drv.sync_seq;
if (job && job->job_run_counter) {
@@ -718,7 +702,7 @@ void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
* it right here or we won't be able to track them in fence_drv
* and they will remain unsignaled during sa_bo free.
*/
- job = container_of(old, struct amdgpu_job, hw_fence);
+ job = container_of(old, struct amdgpu_job, hw_fence.base);
if (!job->base.s_fence && !dma_fence_is_signaled(old))
dma_fence_signal(old);
RCU_INIT_POINTER(*ptr, NULL);
@@ -780,7 +764,7 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f)
{
- struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
+ struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence.base);
return (const char *)to_amdgpu_ring(job->base.sched)->name;
}
@@ -810,7 +794,7 @@ static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
*/
static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f)
{
- struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
+ struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence.base);
if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer))
amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched));
@@ -845,7 +829,7 @@ static void amdgpu_job_fence_free(struct rcu_head *rcu)
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
/* free job if fence has a parent job */
- kfree(container_of(f, struct amdgpu_job, hw_fence));
+ kfree(container_of(f, struct amdgpu_job, hw_fence.base));
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index acb21fc8b3ce..ddb9d3269357 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -272,8 +272,8 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
/* Check if any fences where initialized */
if (job->base.s_fence && job->base.s_fence->finished.ops)
f = &job->base.s_fence->finished;
- else if (job->hw_fence.ops)
- f = &job->hw_fence;
+ else if (job->hw_fence.base.ops)
+ f = &job->hw_fence.base;
else
f = NULL;
@@ -290,10 +290,10 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
amdgpu_sync_free(&job->explicit_sync);
/* only put the hw fence if has embedded fence */
- if (!job->hw_fence.ops)
+ if (!job->hw_fence.base.ops)
kfree(job);
else
- dma_fence_put(&job->hw_fence);
+ dma_fence_put(&job->hw_fence.base);
}
void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
@@ -322,10 +322,10 @@ void amdgpu_job_free(struct amdgpu_job *job)
if (job->gang_submit != &job->base.s_fence->scheduled)
dma_fence_put(job->gang_submit);
- if (!job->hw_fence.ops)
+ if (!job->hw_fence.base.ops)
kfree(job);
else
- dma_fence_put(&job->hw_fence);
+ dma_fence_put(&job->hw_fence.base);
}
struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index f2c049129661..931fed8892cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -48,7 +48,7 @@ struct amdgpu_job {
struct drm_sched_job base;
struct amdgpu_vm *vm;
struct amdgpu_sync explicit_sync;
- struct dma_fence hw_fence;
+ struct amdgpu_fence hw_fence;
struct dma_fence *gang_submit;
uint32_t preamble_status;
uint32_t preemption_status;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index e6f0b035e20b..c14f63cefe67 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -3522,8 +3522,12 @@ int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
uint8_t *ucode_array_start_addr;
int err = 0;
- err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED,
- "amdgpu/%s_sos.bin", chip_name);
+ if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_sos_kicker.bin", chip_name);
+ else
+ err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_sos.bin", chip_name);
if (err)
goto out;
@@ -3799,8 +3803,12 @@ int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name)
struct amdgpu_device *adev = psp->adev;
int err;
- err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED,
- "amdgpu/%s_ta.bin", chip_name);
+ if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_ta_kicker.bin", chip_name);
+ else
+ err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_ta.bin", chip_name);
if (err)
return err;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index b95b47110769..e1f25218943a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -127,6 +127,22 @@ struct amdgpu_fence_driver {
struct dma_fence **fences;
};
+/*
+ * Fences mark an event in the GPUs pipeline and are used
+ * for GPU/CPU synchronization. When the fence is written,
+ * it is expected that all buffers associated with that fence
+ * are no longer in use by the associated ring on the GPU and
+ * that the relevant GPU caches have been flushed.
+ */
+
+struct amdgpu_fence {
+ struct dma_fence base;
+
+ /* RB, DMA, etc. */
+ struct amdgpu_ring *ring;
+ ktime_t start_timestamp;
+};
+
extern const struct drm_sched_backend_ops amdgpu_sched_ops;
void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index 6716ac281c49..9b54a1ece447 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -540,8 +540,10 @@ static int amdgpu_sdma_soft_reset(struct amdgpu_device *adev, u32 instance_id)
case IP_VERSION(4, 4, 2):
case IP_VERSION(4, 4, 4):
case IP_VERSION(4, 4, 5):
- /* For SDMA 4.x, use the existing DPM interface for backward compatibility */
- r = amdgpu_dpm_reset_sdma(adev, 1 << instance_id);
+ /* For SDMA 4.x, use the existing DPM interface for backward compatibility,
+ * we need to convert the logical instance ID to physical instance ID before reset.
+ */
+ r = amdgpu_dpm_reset_sdma(adev, 1 << GET_INST(SDMA0, instance_id));
break;
case IP_VERSION(5, 0, 0):
case IP_VERSION(5, 0, 1):
@@ -568,7 +570,7 @@ static int amdgpu_sdma_soft_reset(struct amdgpu_device *adev, u32 instance_id)
/**
* amdgpu_sdma_reset_engine - Reset a specific SDMA engine
* @adev: Pointer to the AMDGPU device
- * @instance_id: ID of the SDMA engine instance to reset
+ * @instance_id: Logical ID of the SDMA engine instance to reset
*
* Returns: 0 on success, or a negative error code on failure.
*/
@@ -601,7 +603,7 @@ int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id)
/* Perform the SDMA reset for the specified instance */
ret = amdgpu_sdma_soft_reset(adev, instance_id);
if (ret) {
- dev_err(adev->dev, "Failed to reset SDMA instance %u\n", instance_id);
+ dev_err(adev->dev, "Failed to reset SDMA logical instance %u\n", instance_id);
goto exit;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 2505c46a9c3d..eaddc441c51a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -30,6 +30,10 @@
#define AMDGPU_UCODE_NAME_MAX (128)
+static const struct kicker_device kicker_device_list[] = {
+ {0x744B, 0x00},
+};
+
static void amdgpu_ucode_print_common_hdr(const struct common_firmware_header *hdr)
{
DRM_DEBUG("size_bytes: %u\n", le32_to_cpu(hdr->size_bytes));
@@ -1387,6 +1391,19 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl
return NULL;
}
+bool amdgpu_is_kicker_fw(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(kicker_device_list); i++) {
+ if (adev->pdev->device == kicker_device_list[i].device &&
+ adev->pdev->revision == kicker_device_list[i].revision)
+ return true;
+ }
+
+ return false;
+}
+
void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, char *ucode_prefix, int len)
{
int maj, min, rev;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 9e89c3487be5..6349aad6da35 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -605,6 +605,11 @@ struct amdgpu_firmware {
uint32_t pldm_version;
};
+struct kicker_device{
+ unsigned short device;
+ u8 revision;
+};
+
void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr);
void amdgpu_ucode_print_smc_hdr(const struct common_firmware_header *hdr);
void amdgpu_ucode_print_imu_hdr(const struct common_firmware_header *hdr);
@@ -632,5 +637,6 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type);
const char *amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id);
void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, char *ucode_prefix, int len);
+bool amdgpu_is_kicker_fw(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index afd6d59164bf..ec9b84f92d46 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -85,6 +85,7 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_0_pfp.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_0_me.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_0_mec.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc_kicker.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc_1.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_0_toc.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_1_pfp.bin");
@@ -759,6 +760,10 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
AMDGPU_UCODE_REQUIRED,
"amdgpu/gc_11_0_0_rlc_1.bin");
+ else if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_rlc_kicker.bin", ucode_prefix);
else
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
AMDGPU_UCODE_REQUIRED,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index d377a7c57d5e..ad9be3656653 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -2235,6 +2235,25 @@ static int gfx_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
}
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 3, 0):
+ adev->gfx.cleaner_shader_ptr = gfx_9_4_2_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_2_cleaner_shader_hex);
+ if (adev->gfx.me_fw_version >= 167 &&
+ adev->gfx.pfp_fw_version >= 196 &&
+ adev->gfx.mec_fw_version >= 474) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
case IP_VERSION(9, 4, 2):
adev->gfx.cleaner_shader_ptr = gfx_9_4_2_cleaner_shader_hex;
adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_2_cleaner_shader_hex);
diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
index cfa91d709d49..cc626036ed9c 100644
--- a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
@@ -32,6 +32,7 @@
#include "gc/gc_11_0_0_sh_mask.h"
MODULE_FIRMWARE("amdgpu/gc_11_0_0_imu.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_0_0_imu_kicker.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_1_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_2_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_3_imu.bin");
@@ -51,8 +52,12 @@ static int imu_v11_0_init_microcode(struct amdgpu_device *adev)
DRM_DEBUG("\n");
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
- err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED,
- "amdgpu/%s_imu.bin", ucode_prefix);
+ if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_imu_kicker.bin", ucode_prefix);
+ else
+ err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_imu.bin", ucode_prefix);
if (err)
goto out;
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index c9eba537de09..28eb846280dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -1630,10 +1630,12 @@ static int mes_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
if (r)
goto failure;
- r = mes_v11_0_set_hw_resources_1(&adev->mes);
- if (r) {
- DRM_ERROR("failed mes_v11_0_set_hw_resources_1, r=%d\n", r);
- goto failure;
+ if ((adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x50) {
+ r = mes_v11_0_set_hw_resources_1(&adev->mes);
+ if (r) {
+ DRM_ERROR("failed mes_v11_0_set_hw_resources_1, r=%d\n", r);
+ goto failure;
+ }
}
r = mes_v11_0_query_sched_status(&adev->mes);
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
index b4f17332d466..6b222630f3fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
@@ -1742,7 +1742,8 @@ static int mes_v12_0_hw_init(struct amdgpu_ip_block *ip_block)
if (r)
goto failure;
- mes_v12_0_set_hw_resources_1(&adev->mes, AMDGPU_MES_SCHED_PIPE);
+ if ((adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x4b)
+ mes_v12_0_set_hw_resources_1(&adev->mes, AMDGPU_MES_SCHED_PIPE);
mes_v12_0_init_aggregated_doorbell(&adev->mes);
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index df612fd9cc50..ead616c11705 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -42,7 +42,9 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_5_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_8_toc.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_8_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_0_sos.bin");
+MODULE_FIRMWARE("amdgpu/psp_13_0_0_sos_kicker.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_0_ta.bin");
+MODULE_FIRMWARE("amdgpu/psp_13_0_0_ta_kicker.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_7_sos.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_7_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_10_sos.bin");
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
index 9c169112a5e7..cef68df4c663 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
@@ -490,7 +490,7 @@ static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev,
{
struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
u32 doorbell_offset, doorbell;
- u32 rb_cntl, ib_cntl;
+ u32 rb_cntl, ib_cntl, sdma_cntl;
int i;
for_each_inst(i, inst_mask) {
@@ -502,6 +502,9 @@ static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev,
ib_cntl = RREG32_SDMA(i, regSDMA_GFX_IB_CNTL);
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_GFX_IB_CNTL, IB_ENABLE, 0);
WREG32_SDMA(i, regSDMA_GFX_IB_CNTL, ib_cntl);
+ sdma_cntl = RREG32_SDMA(i, regSDMA_CNTL);
+ sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL, UTC_L1_ENABLE, 0);
+ WREG32_SDMA(i, regSDMA_CNTL, sdma_cntl);
if (sdma[i]->use_doorbell) {
doorbell = RREG32_SDMA(i, regSDMA_GFX_DOORBELL);
@@ -995,6 +998,7 @@ static int sdma_v4_4_2_inst_start(struct amdgpu_device *adev,
/* set utc l1 enable flag always to 1 */
temp = RREG32_SDMA(i, regSDMA_CNTL);
temp = REG_SET_FIELD(temp, SDMA_CNTL, UTC_L1_ENABLE, 1);
+ WREG32_SDMA(i, regSDMA_CNTL, temp);
if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) < IP_VERSION(4, 4, 5)) {
/* enable context empty interrupt during initialization */
@@ -1670,7 +1674,7 @@ static bool sdma_v4_4_2_page_ring_is_guilty(struct amdgpu_ring *ring)
static int sdma_v4_4_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
{
struct amdgpu_device *adev = ring->adev;
- u32 id = GET_INST(SDMA0, ring->me);
+ u32 id = ring->me;
int r;
if (!(adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
@@ -1686,7 +1690,7 @@ static int sdma_v4_4_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
static int sdma_v4_4_2_stop_queue(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- u32 instance_id = GET_INST(SDMA0, ring->me);
+ u32 instance_id = ring->me;
u32 inst_mask;
uint64_t rptr;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 9505ae96fbec..1813c3ed0aa6 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -1399,6 +1399,7 @@ static int sdma_v5_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
for (i = 0; i < adev->sdma.num_instances; i++) {
+ mutex_init(&adev->sdma.instance[i].engine_reset_mutex);
adev->sdma.instance[i].funcs = &sdma_v5_0_sdma_funcs;
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index a6e612b4a892..23f97da62808 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -1318,6 +1318,7 @@ static int sdma_v5_2_sw_init(struct amdgpu_ip_block *ip_block)
}
for (i = 0; i < adev->sdma.num_instances; i++) {
+ mutex_init(&adev->sdma.instance[i].engine_reset_mutex);
adev->sdma.instance[i].funcs = &sdma_v5_2_sdma_funcs;
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
index 5a70ae17be04..a9bdf8d61d6c 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
@@ -1374,9 +1374,22 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
else
DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
- /* add firmware version checks here */
- if (0 && !adev->sdma.disable_uq)
- adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
+ case IP_VERSION(6, 0, 0):
+ if ((adev->sdma.instance[0].fw_version >= 24) && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ break;
+ case IP_VERSION(6, 0, 2):
+ if ((adev->sdma.instance[0].fw_version >= 21) && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ break;
+ case IP_VERSION(6, 0, 3):
+ if ((adev->sdma.instance[0].fw_version >= 25) && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ break;
+ default:
+ break;
+ }
r = amdgpu_sdma_sysfs_reset_mask_init(adev);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
index ad47d0bdf777..86903eccbd4e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
@@ -1349,9 +1349,15 @@ static int sdma_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
else
DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
- /* add firmware version checks here */
- if (0 && !adev->sdma.disable_uq)
- adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
+ case IP_VERSION(7, 0, 0):
+ case IP_VERSION(7, 0, 1):
+ if ((adev->sdma.instance[0].fw_version >= 7836028) && !adev->sdma.disable_uq)
+ adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
+ break;
+ default:
+ break;
+ }
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
index 338cf43c45fe..cdefd7fcb0da 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
@@ -669,6 +669,9 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
if (indirect)
amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
+ /* resetting ring, fw should not check RB ring */
+ fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
+
/* Pause dpg */
vcn_v5_0_1_pause_dpg_mode(vinst, &state);
@@ -681,7 +684,7 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
- fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
+
WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
@@ -692,6 +695,7 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
+ /* resetting done, fw can check RB ring */
fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
index 8fa6489b6f5d..505036968a77 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
@@ -240,7 +240,7 @@ static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer,
packet->bitfields2.engine_sel =
engine_sel__mes_map_queues__compute_vi;
- packet->bitfields2.gws_control_queue = q->gws ? 1 : 0;
+ packet->bitfields2.gws_control_queue = q->properties.is_gws ? 1 : 0;
packet->bitfields2.extended_engine_sel =
extended_engine_sel__mes_map_queues__legacy_engine_sel;
packet->bitfields2.queue_type =
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index baa2374acdeb..4ec73f33535e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -510,6 +510,10 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->node_props.capability |=
HSA_CAP_AQL_QUEUE_DOUBLE_MAP;
+ if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(10, 0, 0) &&
+ (dev->gpu->adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
+ dev->node_props.capability2 |= HSA_CAP2_PER_SDMA_QUEUE_RESET_SUPPORTED;
+
sysfs_show_32bit_prop(buffer, offs, "max_engine_clk_fcompute",
dev->node_props.max_engine_clk_fcompute);
@@ -2008,8 +2012,6 @@ static void kfd_topology_set_capabilities(struct kfd_topology_device *dev)
if (!amdgpu_sriov_vf(dev->gpu->adev))
dev->node_props.capability |= HSA_CAP_PER_QUEUE_RESET_SUPPORTED;
- if (dev->gpu->adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE)
- dev->node_props.capability2 |= HSA_CAP2_PER_SDMA_QUEUE_RESET_SUPPORTED;
} else {
dev->node_props.debug_prop |= HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX10 |
HSA_DBG_WATCH_ADDR_MASK_HI_BIT;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index d3100f641ac6..0b8ac9edc070 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -4718,9 +4718,23 @@ static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
return 1;
}
+/* Rescale from [min..max] to [0..MAX_BACKLIGHT_LEVEL] */
+static inline u32 scale_input_to_fw(int min, int max, u64 input)
+{
+ return DIV_ROUND_CLOSEST_ULL(input * MAX_BACKLIGHT_LEVEL, max - min);
+}
+
+/* Rescale from [0..MAX_BACKLIGHT_LEVEL] to [min..max] */
+static inline u32 scale_fw_to_input(int min, int max, u64 input)
+{
+ return min + DIV_ROUND_CLOSEST_ULL(input * (max - min), MAX_BACKLIGHT_LEVEL);
+}
+
static void convert_custom_brightness(const struct amdgpu_dm_backlight_caps *caps,
- uint32_t *brightness)
+ unsigned int min, unsigned int max,
+ uint32_t *user_brightness)
{
+ u32 brightness = scale_input_to_fw(min, max, *user_brightness);
u8 prev_signal = 0, prev_lum = 0;
int i = 0;
@@ -4731,7 +4745,7 @@ static void convert_custom_brightness(const struct amdgpu_dm_backlight_caps *cap
return;
/* choose start to run less interpolation steps */
- if (caps->luminance_data[caps->data_points/2].input_signal > *brightness)
+ if (caps->luminance_data[caps->data_points/2].input_signal > brightness)
i = caps->data_points/2;
do {
u8 signal = caps->luminance_data[i].input_signal;
@@ -4742,17 +4756,18 @@ static void convert_custom_brightness(const struct amdgpu_dm_backlight_caps *cap
* brightness < signal: interpolate between previous and current luminance numerator
* brightness > signal: find next data point
*/
- if (*brightness > signal) {
+ if (brightness > signal) {
prev_signal = signal;
prev_lum = lum;
i++;
continue;
}
- if (*brightness < signal)
+ if (brightness < signal)
lum = prev_lum + DIV_ROUND_CLOSEST((lum - prev_lum) *
- (*brightness - prev_signal),
+ (brightness - prev_signal),
signal - prev_signal);
- *brightness = DIV_ROUND_CLOSEST(lum * *brightness, 101);
+ *user_brightness = scale_fw_to_input(min, max,
+ DIV_ROUND_CLOSEST(lum * brightness, 101));
return;
} while (i < caps->data_points);
}
@@ -4765,11 +4780,10 @@ static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *c
if (!get_brightness_range(caps, &min, &max))
return brightness;
- convert_custom_brightness(caps, &brightness);
+ convert_custom_brightness(caps, min, max, &brightness);
- // Rescale 0..255 to min..max
- return min + DIV_ROUND_CLOSEST((max - min) * brightness,
- AMDGPU_MAX_BL_LEVEL);
+ // Rescale 0..max to min..max
+ return min + DIV_ROUND_CLOSEST_ULL((u64)(max - min) * brightness, max);
}
static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
@@ -4782,8 +4796,8 @@ static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *cap
if (brightness < min)
return 0;
- // Rescale min..max to 0..255
- return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
+ // Rescale min..max to 0..max
+ return DIV_ROUND_CLOSEST_ULL((u64)max * (brightness - min),
max - min);
}
@@ -4908,7 +4922,7 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
struct drm_device *drm = aconnector->base.dev;
struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm;
struct backlight_properties props = { 0 };
- struct amdgpu_dm_backlight_caps caps = { 0 };
+ struct amdgpu_dm_backlight_caps *caps;
char bl_name[16];
int min, max;
@@ -4922,22 +4936,21 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
return;
}
- amdgpu_acpi_get_backlight_caps(&caps);
- if (caps.caps_valid && get_brightness_range(&caps, &min, &max)) {
+ caps = &dm->backlight_caps[aconnector->bl_idx];
+ if (get_brightness_range(caps, &min, &max)) {
if (power_supply_is_system_supplied() > 0)
- props.brightness = (max - min) * DIV_ROUND_CLOSEST(caps.ac_level, 100);
+ props.brightness = (max - min) * DIV_ROUND_CLOSEST(caps->ac_level, 100);
else
- props.brightness = (max - min) * DIV_ROUND_CLOSEST(caps.dc_level, 100);
+ props.brightness = (max - min) * DIV_ROUND_CLOSEST(caps->dc_level, 100);
/* min is zero, so max needs to be adjusted */
props.max_brightness = max - min;
drm_dbg(drm, "Backlight caps: min: %d, max: %d, ac %d, dc %d\n", min, max,
- caps.ac_level, caps.dc_level);
+ caps->ac_level, caps->dc_level);
} else
- props.brightness = AMDGPU_MAX_BL_LEVEL;
+ props.brightness = props.max_brightness = MAX_BACKLIGHT_LEVEL;
- if (caps.data_points && !(amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE))
+ if (caps->data_points && !(amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE))
drm_info(drm, "Using custom brightness curve\n");
- props.max_brightness = AMDGPU_MAX_BL_LEVEL;
props.type = BACKLIGHT_RAW;
snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index d4395b92fb85..9e3e51a2dc49 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -1029,6 +1029,10 @@ enum dc_edid_status dm_helpers_read_local_edid(
return EDID_NO_RESPONSE;
edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw()
+ if (!edid ||
+ edid->extensions >= sizeof(sink->dc_edid.raw_edid) / EDID_LENGTH)
+ return EDID_BAD_INPUT;
+
sink->dc_edid.length = EDID_LENGTH * (edid->extensions + 1);
memmove(sink->dc_edid.raw_edid, (uint8_t *)edid, sink->dc_edid.length);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 56d011a1323c..b34b5b52236d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -241,6 +241,7 @@ static bool create_links(
DC_LOG_DC("BIOS object table - end");
/* Create a link for each usb4 dpia port */
+ dc->lowest_dpia_link_index = MAX_LINKS;
for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) {
struct link_init_data link_init_params = {0};
struct dc_link *link;
@@ -253,6 +254,9 @@ static bool create_links(
link = dc->link_srv->create_link(&link_init_params);
if (link) {
+ if (dc->lowest_dpia_link_index > dc->link_count)
+ dc->lowest_dpia_link_index = dc->link_count;
+
dc->links[dc->link_count] = link;
link->dc = dc;
++dc->link_count;
@@ -6376,6 +6380,35 @@ unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context)
else
return 0;
}
+/**
+ ***********************************************************************************************
+ * dc_get_host_router_index: Get index of host router from a dpia link
+ *
+ * This function return a host router index of the target link. If the target link is dpia link.
+ *
+ * @param [in] link: target link
+ * @param [out] host_router_index: host router index of the target link
+ *
+ * @return: true if the host router index is found and valid.
+ *
+ ***********************************************************************************************
+ */
+bool dc_get_host_router_index(const struct dc_link *link, unsigned int *host_router_index)
+{
+ struct dc *dc = link->ctx->dc;
+
+ if (link->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
+ return false;
+
+ if (link->link_index < dc->lowest_dpia_link_index)
+ return false;
+
+ *host_router_index = (link->link_index - dc->lowest_dpia_link_index) / dc->caps.num_of_dpias_per_host_router;
+ if (*host_router_index < dc->caps.num_of_host_routers)
+ return true;
+ else
+ return false;
+}
bool dc_is_cursor_limit_pending(struct dc *dc)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 1d917be36fc4..f41073c0147e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -66,7 +66,8 @@ struct dmub_notification;
#define MAX_STREAMS 6
#define MIN_VIEWPORT_SIZE 12
#define MAX_NUM_EDP 2
-#define MAX_HOST_ROUTERS_NUM 2
+#define MAX_HOST_ROUTERS_NUM 3
+#define MAX_DPIA_PER_HOST_ROUTER 2
/* Display Core Interfaces */
struct dc_versions {
@@ -305,6 +306,8 @@ struct dc_caps {
/* Conservative limit for DCC cases which require ODM4:1 to support*/
uint32_t dcc_plane_width_limit;
struct dc_scl_caps scl_caps;
+ uint8_t num_of_host_routers;
+ uint8_t num_of_dpias_per_host_router;
};
struct dc_bug_wa {
@@ -1603,6 +1606,7 @@ struct dc {
uint8_t link_count;
struct dc_link *links[MAX_LINKS];
+ uint8_t lowest_dpia_link_index;
struct link_service *link_srv;
struct dc_state *current_state;
@@ -2595,6 +2599,8 @@ struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state
unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context);
+bool dc_get_host_router_index(const struct dc_link *link, unsigned int *host_router_index);
+
/* DSC Interfaces */
#include "dc_dsc.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 0bad8304ccf6..d346f8ae1634 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -1172,8 +1172,8 @@ struct dc_lttpr_caps {
union dp_128b_132b_supported_lttpr_link_rates supported_128b_132b_rates;
union dp_alpm_lttpr_cap alpm;
uint8_t aux_rd_interval[MAX_REPEATER_CNT - 1];
- uint8_t lttpr_ieee_oui[3];
- uint8_t lttpr_device_id[6];
+ uint8_t lttpr_ieee_oui[3]; // Always read from closest LTTPR to host
+ uint8_t lttpr_device_id[6]; // Always read from closest LTTPR to host
};
struct dc_dongle_dfp_cap_ext {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
index d47cacfdb695..2aa6d44bb359 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
@@ -788,6 +788,7 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
plane->pixel_format = dml2_420_10;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
plane->pixel_format = dml2_444_64;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
index c4dad7164d31..5b62cd19d979 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
@@ -4685,7 +4685,10 @@ static void calculate_tdlut_setting(
//the tdlut is fetched during the 2 row times of prefetch.
if (p->setup_for_tdlut) {
*p->tdlut_groups_per_2row_ub = (unsigned int)math_ceil2((double) *p->tdlut_bytes_per_frame / *p->tdlut_bytes_per_group, 1);
- *p->tdlut_opt_time = (*p->tdlut_bytes_per_frame - p->cursor_buffer_size * 1024) / tdlut_drain_rate;
+ if (*p->tdlut_bytes_per_frame > p->cursor_buffer_size * 1024)
+ *p->tdlut_opt_time = (*p->tdlut_bytes_per_frame - p->cursor_buffer_size * 1024) / tdlut_drain_rate;
+ else
+ *p->tdlut_opt_time = 0;
*p->tdlut_drain_time = p->cursor_buffer_size * 1024 / tdlut_drain_rate;
*p->tdlut_bytes_to_deliver = (unsigned int) (p->cursor_buffer_size * 1024.0);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
index 5de775fd8fce..208630754c8a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
@@ -953,6 +953,7 @@ static void populate_dml_surface_cfg_from_plane_state(enum dml_project_id dml2_p
out->SourcePixelFormat[location] = dml_420_10;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
out->SourcePixelFormat[location] = dml_444_64;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index e8730cc40edb..38e17b1796e1 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -1225,7 +1225,7 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
return;
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
- if (!link->skip_implict_edp_power_control)
+ if (!link->skip_implict_edp_power_control && hws)
hws->funcs.edp_backlight_control(link, false);
link->dc->hwss.set_abm_immediate_disable(pipe_ctx);
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index c814d957305a..a267f574b619 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -1047,6 +1047,15 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
if (dc->caps.sequential_ono) {
update_state->pg_pipe_res_update[PG_HUBP][pipe_ctx->stream_res.dsc->inst] = false;
update_state->pg_pipe_res_update[PG_DPP][pipe_ctx->stream_res.dsc->inst] = false;
+
+ /* All HUBP/DPP instances must be powered if the DSC inst != HUBP inst */
+ if (!pipe_ctx->top_pipe && pipe_ctx->plane_res.hubp &&
+ pipe_ctx->plane_res.hubp->inst != pipe_ctx->stream_res.dsc->inst) {
+ for (j = 0; j < dc->res_pool->pipe_count; ++j) {
+ update_state->pg_pipe_res_update[PG_HUBP][j] = false;
+ update_state->pg_pipe_res_update[PG_DPP][j] = false;
+ }
+ }
}
}
@@ -1193,6 +1202,25 @@ void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context,
update_state->pg_pipe_res_update[PG_HDMISTREAM][0] = true;
if (dc->caps.sequential_ono) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (new_pipe->stream_res.dsc && !new_pipe->top_pipe &&
+ update_state->pg_pipe_res_update[PG_DSC][new_pipe->stream_res.dsc->inst]) {
+ update_state->pg_pipe_res_update[PG_HUBP][new_pipe->stream_res.dsc->inst] = true;
+ update_state->pg_pipe_res_update[PG_DPP][new_pipe->stream_res.dsc->inst] = true;
+
+ /* All HUBP/DPP instances must be powered if the DSC inst != HUBP inst */
+ if (new_pipe->plane_res.hubp &&
+ new_pipe->plane_res.hubp->inst != new_pipe->stream_res.dsc->inst) {
+ for (j = 0; j < dc->res_pool->pipe_count; ++j) {
+ update_state->pg_pipe_res_update[PG_HUBP][j] = true;
+ update_state->pg_pipe_res_update[PG_DPP][j] = true;
+ }
+ }
+ }
+ }
+
for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
update_state->pg_pipe_res_update[PG_DPP][i]) {
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index a5127c2d47ef..0f965380a9b4 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -385,9 +385,15 @@ bool dp_is_128b_132b_signal(struct pipe_ctx *pipe_ctx)
bool dp_is_lttpr_present(struct dc_link *link)
{
/* Some sink devices report invalid LTTPR revision, so don't validate against that cap */
- return (dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) != 0 &&
+ uint32_t lttpr_count = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+ bool is_lttpr_present = (lttpr_count > 0 &&
link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
link->dpcd_caps.lttpr_caps.max_lane_count <= 4);
+
+ if (lttpr_count > 0 && !is_lttpr_present)
+ DC_LOG_ERROR("LTTPR count is nonzero but invalid lane count reported. Assuming no LTTPR present.\n");
+
+ return is_lttpr_present;
}
/* in DP compliance test, DPR-120 may have
@@ -1551,6 +1557,8 @@ enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link)
uint8_t lttpr_dpcd_data[10] = {0};
enum dc_status status;
bool is_lttpr_present;
+ uint32_t lttpr_count;
+ uint32_t closest_lttpr_offset;
/* Logic to determine LTTPR support*/
bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware;
@@ -1602,20 +1610,22 @@ enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link)
lttpr_dpcd_data[DP_LTTPR_ALPM_CAPABILITIES -
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+ lttpr_count = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+
/* If this chip cap is set, at least one retimer must exist in the chain
* Override count to 1 if we receive a known bad count (0 or an invalid value) */
if (((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
- (dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == 0)) {
+ lttpr_count == 0) {
/* If you see this message consistently, either the host platform has FIXED_VS flag
* incorrectly configured or the sink device is returning an invalid count.
*/
DC_LOG_ERROR("lttpr_caps phy_repeater_cnt is 0x%x, forcing it to 0x80.",
link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
link->dpcd_caps.lttpr_caps.phy_repeater_cnt = 0x80;
+ lttpr_count = 1;
DC_LOG_DC("lttpr_caps forced phy_repeater_cnt = %d\n", link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
}
- /* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */
is_lttpr_present = dp_is_lttpr_present(link);
DC_LOG_DC("is_lttpr_present = %d\n", is_lttpr_present);
@@ -1623,11 +1633,25 @@ enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link)
if (is_lttpr_present) {
CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: ");
- core_link_read_dpcd(link, DP_LTTPR_IEEE_OUI, link->dpcd_caps.lttpr_caps.lttpr_ieee_oui, sizeof(link->dpcd_caps.lttpr_caps.lttpr_ieee_oui));
- CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_ieee_oui, sizeof(link->dpcd_caps.lttpr_caps.lttpr_ieee_oui), "LTTPR IEEE OUI: ");
+ // Identify closest LTTPR to determine if workarounds required for known embedded LTTPR
+ closest_lttpr_offset = dp_get_closest_lttpr_offset(lttpr_count);
- core_link_read_dpcd(link, DP_LTTPR_DEVICE_ID, link->dpcd_caps.lttpr_caps.lttpr_device_id, sizeof(link->dpcd_caps.lttpr_caps.lttpr_device_id));
- CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_device_id, sizeof(link->dpcd_caps.lttpr_caps.lttpr_device_id), "LTTPR Device ID: ");
+ core_link_read_dpcd(link, (DP_LTTPR_IEEE_OUI + closest_lttpr_offset),
+ link->dpcd_caps.lttpr_caps.lttpr_ieee_oui, sizeof(link->dpcd_caps.lttpr_caps.lttpr_ieee_oui));
+ core_link_read_dpcd(link, (DP_LTTPR_DEVICE_ID + closest_lttpr_offset),
+ link->dpcd_caps.lttpr_caps.lttpr_device_id, sizeof(link->dpcd_caps.lttpr_caps.lttpr_device_id));
+
+ if (lttpr_count > 1) {
+ CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_ieee_oui, sizeof(link->dpcd_caps.lttpr_caps.lttpr_ieee_oui),
+ "Closest LTTPR To Host's IEEE OUI: ");
+ CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_device_id, sizeof(link->dpcd_caps.lttpr_caps.lttpr_device_id),
+ "Closest LTTPR To Host's LTTPR Device ID: ");
+ } else {
+ CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_ieee_oui, sizeof(link->dpcd_caps.lttpr_caps.lttpr_ieee_oui),
+ "LTTPR IEEE OUI: ");
+ CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_device_id, sizeof(link->dpcd_caps.lttpr_caps.lttpr_device_id),
+ "LTTPR Device ID: ");
+ }
}
return status;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
index 7e0af5297dc4..51ca0b2959fc 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
@@ -1954,6 +1954,9 @@ static bool dcn31_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ dc->caps.num_of_host_routers = 2;
+ dc->caps.num_of_dpias_per_host_router = 2;
+
/* Use pipe context based otg sync logic */
dc->config.use_pipe_ctx_sync_logic = true;
dc->config.disable_hbr_audio_dp2 = true;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
index d96bc6cb73ad..8383e2e59be5 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
@@ -1885,6 +1885,9 @@ static bool dcn314_resource_construct(
dc->caps.max_disp_clock_khz_at_vmin = 650000;
+ dc->caps.num_of_host_routers = 2;
+ dc->caps.num_of_dpias_per_host_router = 2;
+
/* Use pipe context based otg sync logic */
dc->config.use_pipe_ctx_sync_logic = true;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
index 72c6cf047db0..e01aa2f2e13e 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
@@ -1894,6 +1894,9 @@ static bool dcn35_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ dc->caps.num_of_host_routers = 2;
+ dc->caps.num_of_dpias_per_host_router = 2;
+
/* max_disp_clock_khz_at_vmin is slightly lower than the STA value in order
* to provide some margin.
* It's expected for furture ASIC to have equal or higher value, in order to
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
index 989a270f7dea..4ebe4e00a4f8 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
@@ -1866,6 +1866,9 @@ static bool dcn351_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ dc->caps.num_of_host_routers = 2;
+ dc->caps.num_of_dpias_per_host_router = 2;
+
/* max_disp_clock_khz_at_vmin is slightly lower than the STA value in order
* to provide some margin.
* It's expected for furture ASIC to have equal or higher value, in order to
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
index 48e1f234185f..db36b8f9ce65 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
@@ -1867,6 +1867,9 @@ static bool dcn36_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ dc->caps.num_of_host_routers = 2;
+ dc->caps.num_of_dpias_per_host_router = 2;
+
/* max_disp_clock_khz_at_vmin is slightly lower than the STA value in order
* to provide some margin.
* It's expected for furture ASIC to have equal or higher value, in order to
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index a7167668d189..1c7235935d14 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -58,6 +58,7 @@
MODULE_FIRMWARE("amdgpu/aldebaran_smc.bin");
MODULE_FIRMWARE("amdgpu/smu_13_0_0.bin");
+MODULE_FIRMWARE("amdgpu/smu_13_0_0_kicker.bin");
MODULE_FIRMWARE("amdgpu/smu_13_0_7.bin");
MODULE_FIRMWARE("amdgpu/smu_13_0_10.bin");
@@ -92,7 +93,7 @@ const int pmfw_decoded_link_width[7] = {0, 1, 2, 4, 8, 12, 16};
int smu_v13_0_init_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- char ucode_prefix[15];
+ char ucode_prefix[30];
int err = 0;
const struct smc_firmware_header_v1_0 *hdr;
const struct common_firmware_header *header;
@@ -103,8 +104,13 @@ int smu_v13_0_init_microcode(struct smu_context *smu)
return 0;
amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
- err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
- "amdgpu/%s.bin", ucode_prefix);
+
+ if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_kicker.bin", ucode_prefix);
+ else
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s.bin", ucode_prefix);
if (err)
goto out;
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 34547edf1ee3..87f2e5ee8790 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -159,7 +159,7 @@ bool malidp_format_mod_supported(struct drm_device *drm,
}
if (!fourcc_mod_is_vendor(modifier, ARM)) {
- DRM_ERROR("Unknown modifier (not Arm)\n");
+ DRM_DEBUG_KMS("Unknown modifier (not Arm)\n");
return false;
}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 1de832964e92..031980d8f3ab 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -29,7 +29,6 @@
*/
#include <linux/delay.h>
-#include <linux/export.h>
#include <linux/pci.h>
#include <drm/drm_atomic.h>
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 60224f476e1d..de9c23537465 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -348,12 +348,18 @@ static void ti_sn65dsi86_enable_comms(struct ti_sn65dsi86 *pdata,
* 200 ms. We'll assume that the panel driver will have the hardcoded
* delay in its prepare and always disable HPD.
*
- * If HPD somehow makes sense on some future panel we'll have to
- * change this to be conditional on someone specifying that HPD should
- * be used.
+ * For DisplayPort bridge type, we need HPD. So we use the bridge type
+ * to conditionally disable HPD.
+ * NOTE: The bridge type is set in ti_sn_bridge_probe() but enable_comms()
+ * can be called before. So for DisplayPort, HPD will be enabled once
+ * bridge type is set. We are using bridge type instead of "no-hpd"
+ * property because it is not used properly in devicetree description
+ * and hence is unreliable.
*/
- regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG, HPD_DISABLE,
- HPD_DISABLE);
+
+ if (pdata->bridge.type != DRM_MODE_CONNECTOR_DisplayPort)
+ regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG, HPD_DISABLE,
+ HPD_DISABLE);
pdata->comms_enabled = true;
@@ -1195,9 +1201,14 @@ static enum drm_connector_status ti_sn_bridge_detect(struct drm_bridge *bridge)
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
int val = 0;
- pm_runtime_get_sync(pdata->dev);
+ /*
+ * Runtime reference is grabbed in ti_sn_bridge_hpd_enable()
+ * as the chip won't report HPD just after being powered on.
+ * HPD_DEBOUNCED_STATE reflects correct state only after the
+ * debounce time (~100-400 ms).
+ */
+
regmap_read(pdata->regmap, SN_HPD_DISABLE_REG, &val);
- pm_runtime_put_autosuspend(pdata->dev);
return val & HPD_DEBOUNCED_STATE ? connector_status_connected
: connector_status_disconnected;
@@ -1220,6 +1231,26 @@ static void ti_sn65dsi86_debugfs_init(struct drm_bridge *bridge, struct dentry *
debugfs_create_file("status", 0600, debugfs, pdata, &status_fops);
}
+static void ti_sn_bridge_hpd_enable(struct drm_bridge *bridge)
+{
+ struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
+
+ /*
+ * Device needs to be powered on before reading the HPD state
+ * for reliable hpd detection in ti_sn_bridge_detect() due to
+ * the high debounce time.
+ */
+
+ pm_runtime_get_sync(pdata->dev);
+}
+
+static void ti_sn_bridge_hpd_disable(struct drm_bridge *bridge)
+{
+ struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
+
+ pm_runtime_put_autosuspend(pdata->dev);
+}
+
static const struct drm_bridge_funcs ti_sn_bridge_funcs = {
.attach = ti_sn_bridge_attach,
.detach = ti_sn_bridge_detach,
@@ -1234,6 +1265,8 @@ static const struct drm_bridge_funcs ti_sn_bridge_funcs = {
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.debugfs_init = ti_sn65dsi86_debugfs_init,
+ .hpd_enable = ti_sn_bridge_hpd_enable,
+ .hpd_disable = ti_sn_bridge_hpd_disable,
};
static void ti_sn_bridge_parse_lanes(struct ti_sn65dsi86 *pdata,
@@ -1321,8 +1354,26 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev,
pdata->bridge.type = pdata->next_bridge->type == DRM_MODE_CONNECTOR_DisplayPort
? DRM_MODE_CONNECTOR_DisplayPort : DRM_MODE_CONNECTOR_eDP;
- if (pdata->bridge.type == DRM_MODE_CONNECTOR_DisplayPort)
- pdata->bridge.ops = DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_DETECT;
+ if (pdata->bridge.type == DRM_MODE_CONNECTOR_DisplayPort) {
+ pdata->bridge.ops = DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_DETECT |
+ DRM_BRIDGE_OP_HPD;
+ /*
+ * If comms were already enabled they would have been enabled
+ * with the wrong value of HPD_DISABLE. Update it now. Comms
+ * could be enabled if anyone is holding a pm_runtime reference
+ * (like if a GPIO is in use). Note that in most cases nobody
+ * is doing AUX channel xfers before the bridge is added so
+ * HPD doesn't _really_ matter then. The only exception is in
+ * the eDP case where the panel wants to read the EDID before
+ * the bridge is added. We always consistently have HPD disabled
+ * for eDP.
+ */
+ mutex_lock(&pdata->comms_mutex);
+ if (pdata->comms_enabled)
+ regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG,
+ HPD_DISABLE, 0);
+ mutex_unlock(&pdata->comms_mutex);
+ };
drm_bridge_add(&pdata->bridge);
diff --git a/drivers/gpu/drm/display/drm_bridge_connector.c b/drivers/gpu/drm/display/drm_bridge_connector.c
index 7d2e499ea5de..262e93e07a28 100644
--- a/drivers/gpu/drm/display/drm_bridge_connector.c
+++ b/drivers/gpu/drm/display/drm_bridge_connector.c
@@ -708,11 +708,14 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
if (bridge_connector->bridge_hdmi_audio ||
bridge_connector->bridge_dp_audio) {
struct device *dev;
+ struct drm_bridge *bridge;
if (bridge_connector->bridge_hdmi_audio)
- dev = bridge_connector->bridge_hdmi_audio->hdmi_audio_dev;
+ bridge = bridge_connector->bridge_hdmi_audio;
else
- dev = bridge_connector->bridge_dp_audio->hdmi_audio_dev;
+ bridge = bridge_connector->bridge_dp_audio;
+
+ dev = bridge->hdmi_audio_dev;
ret = drm_connector_hdmi_audio_init(connector, dev,
&drm_bridge_connector_hdmi_audio_funcs,
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index f2a6559a2710..dc622c78db9d 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -725,7 +725,7 @@ ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
* monitor doesn't power down exactly after the throw away read.
*/
if (!aux->is_remote) {
- ret = drm_dp_dpcd_probe(aux, DP_DPCD_REV);
+ ret = drm_dp_dpcd_probe(aux, DP_LANE0_1_STATUS);
if (ret < 0)
return ret;
}
diff --git a/drivers/gpu/drm/drm_writeback.c b/drivers/gpu/drm/drm_writeback.c
index edbeab88ff2b..d983ee85cf13 100644
--- a/drivers/gpu/drm/drm_writeback.c
+++ b/drivers/gpu/drm/drm_writeback.c
@@ -343,17 +343,18 @@ EXPORT_SYMBOL(drm_writeback_connector_init_with_encoder);
/**
* drm_writeback_connector_cleanup - Cleanup the writeback connector
* @dev: DRM device
- * @wb_connector: Pointer to the writeback connector to clean up
+ * @data: Pointer to the writeback connector to clean up
*
* This will decrement the reference counter of blobs and destroy properties. It
* will also clean the remaining jobs in this writeback connector. Caution: This helper will not
* clean up the attached encoder and the drm_connector.
*/
static void drm_writeback_connector_cleanup(struct drm_device *dev,
- struct drm_writeback_connector *wb_connector)
+ void *data)
{
unsigned long flags;
struct drm_writeback_job *pos, *n;
+ struct drm_writeback_connector *wb_connector = data;
delete_writeback_properties(dev);
drm_property_blob_put(wb_connector->pixel_formats_blob_ptr);
@@ -405,7 +406,7 @@ int drmm_writeback_connector_init(struct drm_device *dev,
if (ret)
return ret;
- ret = drmm_add_action_or_reset(dev, (void *)drm_writeback_connector_cleanup,
+ ret = drmm_add_action_or_reset(dev, drm_writeback_connector_cleanup,
wb_connector);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index 76a3a3e517d8..71e2e6b9d713 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -35,6 +35,7 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
*sched_job)
{
struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
+ struct drm_gpu_scheduler *sched = sched_job->sched;
struct etnaviv_gpu *gpu = submit->gpu;
u32 dma_addr, primid = 0;
int change;
@@ -89,7 +90,9 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
return DRM_GPU_SCHED_STAT_NOMINAL;
out_no_timeout:
- list_add(&sched_job->list, &sched_job->sched->pending_list);
+ spin_lock(&sched->job_list_lock);
+ list_add(&sched_job->list, &sched->pending_list);
+ spin_unlock(&sched->job_list_lock);
return DRM_GPU_SCHED_STAT_NOMINAL;
}
diff --git a/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c b/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c
index 74bb3bedf30f..5111bdc3075b 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c
@@ -103,8 +103,8 @@ static void get_ana_cp_int_prop(u64 vco_clk,
DIV_ROUND_DOWN_ULL(curve_1_interpolated, CURVE0_MULTIPLIER)));
ana_cp_int_temp =
- DIV_ROUND_CLOSEST_ULL(DIV_ROUND_DOWN_ULL(adjusted_vco_clk1, curve_2_scaled1),
- CURVE2_MULTIPLIER);
+ DIV64_U64_ROUND_CLOSEST(DIV_ROUND_DOWN_ULL(adjusted_vco_clk1, curve_2_scaled1),
+ CURVE2_MULTIPLIER);
*ana_cp_int = max(1, min(ana_cp_int_temp, 127));
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index 346737f15fa9..21c1e10caf68 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -1056,7 +1056,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
BXT_MIPI_TRANS_VACTIVE(port));
adjusted_mode->crtc_vtotal =
intel_de_read(display,
- BXT_MIPI_TRANS_VTOTAL(port));
+ BXT_MIPI_TRANS_VTOTAL(port)) + 1;
hactive = adjusted_mode->crtc_hdisplay;
hfp = intel_de_read(display, MIPI_HFP_COUNT(display, port));
@@ -1260,7 +1260,7 @@ static void set_dsi_timings(struct intel_encoder *encoder,
intel_de_write(display, BXT_MIPI_TRANS_VACTIVE(port),
adjusted_mode->crtc_vdisplay);
intel_de_write(display, BXT_MIPI_TRANS_VTOTAL(port),
- adjusted_mode->crtc_vtotal);
+ adjusted_mode->crtc_vtotal - 1);
}
intel_de_write(display, MIPI_HACTIVE_AREA_COUNT(display, port),
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index e5a188ce3185..5bc696bfbb0f 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -108,11 +108,11 @@ static unsigned int config_bit(const u64 config)
return other_bit(config);
}
-static u32 config_mask(const u64 config)
+static __always_inline u32 config_mask(const u64 config)
{
unsigned int bit = config_bit(config);
- if (__builtin_constant_p(config))
+ if (__builtin_constant_p(bit))
BUILD_BUG_ON(bit >
BITS_PER_TYPE(typeof_member(struct i915_pmu,
enable)) - 1);
@@ -121,7 +121,7 @@ static u32 config_mask(const u64 config)
BITS_PER_TYPE(typeof_member(struct i915_pmu,
enable)) - 1);
- return BIT(config_bit(config));
+ return BIT(bit);
}
static bool is_engine_event(struct perf_event *event)
diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
index 47136bbbe8c6..ab08d690d882 100644
--- a/drivers/gpu/drm/meson/meson_encoder_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
@@ -109,7 +109,7 @@ static void meson_encoder_hdmi_set_vclk(struct meson_encoder_hdmi *encoder_hdmi,
venc_freq /= 2;
dev_dbg(priv->dev,
- "vclk:%lluHz phy=%lluHz venc=%lluHz hdmi=%lluHz enci=%d\n",
+ "phy:%lluHz vclk=%lluHz venc=%lluHz hdmi=%lluHz enci=%d\n",
phy_freq, vclk_freq, venc_freq, hdmi_freq,
priv->venc.hdmi_use_enci);
diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
index 3325580d885d..dfe0c28a0f05 100644
--- a/drivers/gpu/drm/meson/meson_vclk.c
+++ b/drivers/gpu/drm/meson/meson_vclk.c
@@ -110,10 +110,7 @@
#define HDMI_PLL_LOCK BIT(31)
#define HDMI_PLL_LOCK_G12A (3 << 30)
-#define PIXEL_FREQ_1000_1001(_freq) \
- DIV_ROUND_CLOSEST_ULL((_freq) * 1000ULL, 1001ULL)
-#define PHY_FREQ_1000_1001(_freq) \
- (PIXEL_FREQ_1000_1001(DIV_ROUND_DOWN_ULL(_freq, 10ULL)) * 10)
+#define FREQ_1000_1001(_freq) DIV_ROUND_CLOSEST_ULL((_freq) * 1000ULL, 1001ULL)
/* VID PLL Dividers */
enum {
@@ -772,6 +769,36 @@ static void meson_hdmi_pll_generic_set(struct meson_drm *priv,
pll_freq);
}
+static bool meson_vclk_freqs_are_matching_param(unsigned int idx,
+ unsigned long long phy_freq,
+ unsigned long long vclk_freq)
+{
+ DRM_DEBUG_DRIVER("i = %d vclk_freq = %lluHz alt = %lluHz\n",
+ idx, params[idx].vclk_freq,
+ FREQ_1000_1001(params[idx].vclk_freq));
+ DRM_DEBUG_DRIVER("i = %d phy_freq = %lluHz alt = %lluHz\n",
+ idx, params[idx].phy_freq,
+ FREQ_1000_1001(params[idx].phy_freq));
+
+ /* Match strict frequency */
+ if (phy_freq == params[idx].phy_freq &&
+ vclk_freq == params[idx].vclk_freq)
+ return true;
+
+ /* Match 1000/1001 variant: vclk deviation has to be less than 1kHz
+ * (drm EDID is defined in 1kHz steps, so everything smaller must be
+ * rounding error) and the PHY freq deviation has to be less than
+ * 10kHz (as the TMDS clock is 10 times the pixel clock, so anything
+ * smaller must be rounding error as well).
+ */
+ if (abs(vclk_freq - FREQ_1000_1001(params[idx].vclk_freq)) < 1000 &&
+ abs(phy_freq - FREQ_1000_1001(params[idx].phy_freq)) < 10000)
+ return true;
+
+ /* no match */
+ return false;
+}
+
enum drm_mode_status
meson_vclk_vic_supported_freq(struct meson_drm *priv,
unsigned long long phy_freq,
@@ -790,19 +817,7 @@ meson_vclk_vic_supported_freq(struct meson_drm *priv,
}
for (i = 0 ; params[i].pixel_freq ; ++i) {
- DRM_DEBUG_DRIVER("i = %d pixel_freq = %lluHz alt = %lluHz\n",
- i, params[i].pixel_freq,
- PIXEL_FREQ_1000_1001(params[i].pixel_freq));
- DRM_DEBUG_DRIVER("i = %d phy_freq = %lluHz alt = %lluHz\n",
- i, params[i].phy_freq,
- PHY_FREQ_1000_1001(params[i].phy_freq));
- /* Match strict frequency */
- if (phy_freq == params[i].phy_freq &&
- vclk_freq == params[i].vclk_freq)
- return MODE_OK;
- /* Match 1000/1001 variant */
- if (phy_freq == PHY_FREQ_1000_1001(params[i].phy_freq) &&
- vclk_freq == PIXEL_FREQ_1000_1001(params[i].vclk_freq))
+ if (meson_vclk_freqs_are_matching_param(i, phy_freq, vclk_freq))
return MODE_OK;
}
@@ -1075,10 +1090,8 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
}
for (freq = 0 ; params[freq].pixel_freq ; ++freq) {
- if ((phy_freq == params[freq].phy_freq ||
- phy_freq == PHY_FREQ_1000_1001(params[freq].phy_freq)) &&
- (vclk_freq == params[freq].vclk_freq ||
- vclk_freq == PIXEL_FREQ_1000_1001(params[freq].vclk_freq))) {
+ if (meson_vclk_freqs_are_matching_param(freq, phy_freq,
+ vclk_freq)) {
if (vclk_freq != params[freq].vclk_freq)
vic_alternate_clock = true;
else
diff --git a/drivers/gpu/drm/mgag200/mgag200_ddc.c b/drivers/gpu/drm/mgag200/mgag200_ddc.c
index 6d81ea8931e8..c31673eaa554 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ddc.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ddc.c
@@ -26,7 +26,6 @@
* Authors: Dave Airlie <airlied@redhat.com>
*/
-#include <linux/export.h>
#include <linux/i2c-algo-bit.h>
#include <linux/i2c.h>
#include <linux/pci.h>
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c
index 39641551eeb6..4280f71e472a 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c
@@ -71,10 +71,6 @@ static int a2xx_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)
return 0;
}
-static void a2xx_gpummu_resume_translation(struct msm_mmu *mmu)
-{
-}
-
static void a2xx_gpummu_destroy(struct msm_mmu *mmu)
{
struct a2xx_gpummu *gpummu = to_a2xx_gpummu(mmu);
@@ -90,7 +86,6 @@ static const struct msm_mmu_funcs funcs = {
.map = a2xx_gpummu_map,
.unmap = a2xx_gpummu_unmap,
.destroy = a2xx_gpummu_destroy,
- .resume_translation = a2xx_gpummu_resume_translation,
};
struct msm_mmu *a2xx_gpummu_new(struct device *dev, struct msm_gpu *gpu)
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 650e5bac225f..60aef0796236 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -131,6 +131,8 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
struct msm_ringbuffer *ring = submit->ring;
unsigned int i, ibs = 0;
+ adreno_check_and_reenable_stall(adreno_gpu);
+
if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) {
ring->cur_ctx_seqno = 0;
a5xx_submit_in_rb(gpu, submit);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index bf3758f010f4..491fde0083a2 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -130,6 +130,20 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
OUT_RING(ring, submit->seqno - 1);
+
+ OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
+ OUT_RING(ring, CP_SET_THREAD_BOTH);
+
+ /* Reset state used to synchronize BR and BV */
+ OUT_PKT7(ring, CP_RESET_CONTEXT_STATE, 1);
+ OUT_RING(ring,
+ CP_RESET_CONTEXT_STATE_0_CLEAR_ON_CHIP_TS |
+ CP_RESET_CONTEXT_STATE_0_CLEAR_RESOURCE_TABLE |
+ CP_RESET_CONTEXT_STATE_0_CLEAR_BV_BR_COUNTER |
+ CP_RESET_CONTEXT_STATE_0_RESET_GLOBAL_LOCAL_TS);
+
+ OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
+ OUT_RING(ring, CP_SET_THREAD_BR);
}
if (!sysprof) {
@@ -212,6 +226,8 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
struct msm_ringbuffer *ring = submit->ring;
unsigned int i, ibs = 0;
+ adreno_check_and_reenable_stall(adreno_gpu);
+
a6xx_set_pagetable(a6xx_gpu, ring, submit);
get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0),
@@ -335,6 +351,8 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
struct msm_ringbuffer *ring = submit->ring;
unsigned int i, ibs = 0;
+ adreno_check_and_reenable_stall(adreno_gpu);
+
/*
* Toggle concurrent binning for pagetable switch and set the thread to
* BR since only it can execute the pagetable switch packets.
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index f5e1490d07c1..16e7ac444efd 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -137,9 +137,8 @@ err_disable_rpm:
return NULL;
}
-static int find_chipid(struct device *dev, uint32_t *chipid)
+static int find_chipid(struct device_node *node, uint32_t *chipid)
{
- struct device_node *node = dev->of_node;
const char *compat;
int ret;
@@ -173,15 +172,36 @@ static int find_chipid(struct device *dev, uint32_t *chipid)
/* and if that fails, fall back to legacy "qcom,chipid" property: */
ret = of_property_read_u32(node, "qcom,chipid", chipid);
if (ret) {
- DRM_DEV_ERROR(dev, "could not parse qcom,chipid: %d\n", ret);
+ DRM_ERROR("%pOF: could not parse qcom,chipid: %d\n",
+ node, ret);
return ret;
}
- dev_warn(dev, "Using legacy qcom,chipid binding!\n");
+ pr_warn("%pOF: Using legacy qcom,chipid binding!\n", node);
return 0;
}
+bool adreno_has_gpu(struct device_node *node)
+{
+ const struct adreno_info *info;
+ uint32_t chip_id;
+ int ret;
+
+ ret = find_chipid(node, &chip_id);
+ if (ret)
+ return false;
+
+ info = adreno_info(chip_id);
+ if (!info) {
+ pr_warn("%pOF: Unknown GPU revision: %"ADRENO_CHIPID_FMT"\n",
+ node, ADRENO_CHIPID_ARGS(chip_id));
+ return false;
+ }
+
+ return true;
+}
+
static int adreno_bind(struct device *dev, struct device *master, void *data)
{
static struct adreno_platform_config config = {};
@@ -191,19 +211,18 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
struct msm_gpu *gpu;
int ret;
- ret = find_chipid(dev, &config.chip_id);
- if (ret)
+ ret = find_chipid(dev->of_node, &config.chip_id);
+ /* We shouldn't have gotten this far if we can't parse the chip_id */
+ if (WARN_ON(ret))
return ret;
dev->platform_data = &config;
priv->gpu_pdev = to_platform_device(dev);
info = adreno_info(config.chip_id);
- if (!info) {
- dev_warn(drm->dev, "Unknown GPU revision: %"ADRENO_CHIPID_FMT"\n",
- ADRENO_CHIPID_ARGS(config.chip_id));
+ /* We shouldn't have gotten this far if we don't recognize the GPU: */
+ if (WARN_ON(!info))
return -ENXIO;
- }
config.info = info;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 2348ffb35f7e..86bff915c3e7 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -259,24 +259,54 @@ u64 adreno_private_address_space_size(struct msm_gpu *gpu)
return BIT(ttbr1_cfg->ias) - ADRENO_VM_START;
}
+void adreno_check_and_reenable_stall(struct adreno_gpu *adreno_gpu)
+{
+ struct msm_gpu *gpu = &adreno_gpu->base;
+ struct msm_drm_private *priv = gpu->dev->dev_private;
+ unsigned long flags;
+
+ /*
+ * Wait until the cooldown period has passed and we would actually
+ * collect a crashdump to re-enable stall-on-fault.
+ */
+ spin_lock_irqsave(&priv->fault_stall_lock, flags);
+ if (!priv->stall_enabled &&
+ ktime_after(ktime_get(), priv->stall_reenable_time) &&
+ !READ_ONCE(gpu->crashstate)) {
+ priv->stall_enabled = true;
+
+ gpu->aspace->mmu->funcs->set_stall(gpu->aspace->mmu, true);
+ }
+ spin_unlock_irqrestore(&priv->fault_stall_lock, flags);
+}
+
#define ARM_SMMU_FSR_TF BIT(1)
#define ARM_SMMU_FSR_PF BIT(3)
#define ARM_SMMU_FSR_EF BIT(4)
+#define ARM_SMMU_FSR_SS BIT(30)
int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
struct adreno_smmu_fault_info *info, const char *block,
u32 scratch[4])
{
+ struct msm_drm_private *priv = gpu->dev->dev_private;
const char *type = "UNKNOWN";
- bool do_devcoredump = info && !READ_ONCE(gpu->crashstate);
+ bool do_devcoredump = info && (info->fsr & ARM_SMMU_FSR_SS) &&
+ !READ_ONCE(gpu->crashstate);
+ unsigned long irq_flags;
/*
- * If we aren't going to be resuming later from fault_worker, then do
- * it now.
+ * In case there is a subsequent storm of pagefaults, disable
+ * stall-on-fault for at least half a second.
*/
- if (!do_devcoredump) {
- gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu);
+ spin_lock_irqsave(&priv->fault_stall_lock, irq_flags);
+ if (priv->stall_enabled) {
+ priv->stall_enabled = false;
+
+ gpu->aspace->mmu->funcs->set_stall(gpu->aspace->mmu, false);
}
+ priv->stall_reenable_time = ktime_add_ms(ktime_get(), 500);
+ spin_unlock_irqrestore(&priv->fault_stall_lock, irq_flags);
/*
* Print a default message if we couldn't get the data from the
@@ -304,16 +334,18 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
scratch[0], scratch[1], scratch[2], scratch[3]);
if (do_devcoredump) {
+ struct msm_gpu_fault_info fault_info = {};
+
/* Turn off the hangcheck timer to keep it from bothering us */
timer_delete(&gpu->hangcheck_timer);
- gpu->fault_info.ttbr0 = info->ttbr0;
- gpu->fault_info.iova = iova;
- gpu->fault_info.flags = flags;
- gpu->fault_info.type = type;
- gpu->fault_info.block = block;
+ fault_info.ttbr0 = info->ttbr0;
+ fault_info.iova = iova;
+ fault_info.flags = flags;
+ fault_info.type = type;
+ fault_info.block = block;
- kthread_queue_work(gpu->worker, &gpu->fault_work);
+ msm_gpu_fault_crashstate_capture(gpu, &fault_info);
}
return 0;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index a8f4bf416e64..bc063594a359 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -636,6 +636,8 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
struct adreno_smmu_fault_info *info, const char *block,
u32 scratch[4]);
+void adreno_check_and_reenable_stall(struct adreno_gpu *gpu);
+
int adreno_read_speedbin(struct device *dev, u32 *speedbin);
/*
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index 8a618841e3ea..1c468ca5d692 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -94,17 +94,21 @@ static void drm_mode_to_intf_timing_params(
timing->vsync_polarity = 0;
}
- /* for DP/EDP, Shift timings to align it to bottom right */
- if (phys_enc->hw_intf->cap->type == INTF_DP) {
+ timing->wide_bus_en = dpu_encoder_is_widebus_enabled(phys_enc->parent);
+ timing->compression_en = dpu_encoder_is_dsc_enabled(phys_enc->parent);
+
+ /*
+ * For DP/EDP, Shift timings to align it to bottom right.
+ * wide_bus_en is set for everything excluding SDM845 &
+ * porch changes cause DisplayPort failure and HDMI tearing.
+ */
+ if (phys_enc->hw_intf->cap->type == INTF_DP && timing->wide_bus_en) {
timing->h_back_porch += timing->h_front_porch;
timing->h_front_porch = 0;
timing->v_back_porch += timing->v_front_porch;
timing->v_front_porch = 0;
}
- timing->wide_bus_en = dpu_encoder_is_widebus_enabled(phys_enc->parent);
- timing->compression_en = dpu_encoder_is_dsc_enabled(phys_enc->parent);
-
/*
* for DP, divide the horizonal parameters by 2 when
* widebus is enabled
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 386c4669c831..a48e6db4f156 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -128,6 +128,11 @@ static const struct msm_dp_desc msm_dp_desc_sa8775p[] = {
{}
};
+static const struct msm_dp_desc msm_dp_desc_sdm845[] = {
+ { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0 },
+ {}
+};
+
static const struct msm_dp_desc msm_dp_desc_sc7180[] = {
{ .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true },
{}
@@ -180,7 +185,7 @@ static const struct of_device_id msm_dp_dt_match[] = {
{ .compatible = "qcom,sc8180x-edp", .data = &msm_dp_desc_sc8180x },
{ .compatible = "qcom,sc8280xp-dp", .data = &msm_dp_desc_sc8280xp },
{ .compatible = "qcom,sc8280xp-edp", .data = &msm_dp_desc_sc8280xp },
- { .compatible = "qcom,sdm845-dp", .data = &msm_dp_desc_sc7180 },
+ { .compatible = "qcom,sdm845-dp", .data = &msm_dp_desc_sdm845 },
{ .compatible = "qcom,sm8350-dp", .data = &msm_dp_desc_sc7180 },
{ .compatible = "qcom,sm8650-dp", .data = &msm_dp_desc_sm8650 },
{ .compatible = "qcom,x1e80100-dp", .data = &msm_dp_desc_x1e80100 },
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
index 9812b4d69197..af2e30f3f842 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
@@ -704,6 +704,13 @@ static int dsi_pll_10nm_init(struct msm_dsi_phy *phy)
/* TODO: Remove this when we have proper display handover support */
msm_dsi_phy_pll_save_state(phy);
+ /*
+ * Store also proper vco_current_rate, because its value will be used in
+ * dsi_10nm_pll_restore_state().
+ */
+ if (!dsi_pll_10nm_vco_recalc_rate(&pll_10nm->clk_hw, VCO_REF_CLK_RATE))
+ pll_10nm->vco_current_rate = pll_10nm->phy->cfg->min_pll_rate;
+
return 0;
}
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index 7ab607252d18..6af72162cda4 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -208,6 +208,35 @@ DEFINE_DEBUGFS_ATTRIBUTE(shrink_fops,
shrink_get, shrink_set,
"0x%08llx\n");
+/*
+ * Return the number of microseconds to wait until stall-on-fault is
+ * re-enabled. If 0 then it is already enabled or will be re-enabled on the
+ * next submit (unless there's a leftover devcoredump). This is useful for
+ * kernel tests that intentionally produce a fault and check the devcoredump to
+ * wait until the cooldown period is over.
+ */
+
+static int
+stall_reenable_time_get(void *data, u64 *val)
+{
+ struct msm_drm_private *priv = data;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&priv->fault_stall_lock, irq_flags);
+
+ if (priv->stall_enabled)
+ *val = 0;
+ else
+ *val = max(ktime_us_delta(priv->stall_reenable_time, ktime_get()), 0);
+
+ spin_unlock_irqrestore(&priv->fault_stall_lock, irq_flags);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(stall_reenable_time_fops,
+ stall_reenable_time_get, NULL,
+ "%lld\n");
static int msm_gem_show(struct seq_file *m, void *arg)
{
@@ -319,6 +348,9 @@ static void msm_debugfs_gpu_init(struct drm_minor *minor)
debugfs_create_bool("disable_err_irq", 0600, minor->debugfs_root,
&priv->disable_err_irq);
+ debugfs_create_file("stall_reenable_time_us", 0400, minor->debugfs_root,
+ priv, &stall_reenable_time_fops);
+
gpu_devfreq = debugfs_create_dir("devfreq", minor->debugfs_root);
debugfs_create_bool("idle_clamp",0600, gpu_devfreq,
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index f316e6776f67..d007687c2446 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -245,6 +245,10 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
drm_gem_lru_init(&priv->lru.willneed, &priv->lru.lock);
drm_gem_lru_init(&priv->lru.dontneed, &priv->lru.lock);
+ /* Initialize stall-on-fault */
+ spin_lock_init(&priv->fault_stall_lock);
+ priv->stall_enabled = true;
+
/* Teach lockdep about lock ordering wrt. shrinker: */
fs_reclaim_acquire(GFP_KERNEL);
might_lock(&priv->lru.lock);
@@ -926,7 +930,7 @@ static const struct drm_driver msm_driver = {
* is no external component that we need to add since LVDS is within MDP4
* itself.
*/
-static int add_components_mdp(struct device *master_dev,
+static int add_mdp_components(struct device *master_dev,
struct component_match **matchptr)
{
struct device_node *np = master_dev->of_node;
@@ -1030,7 +1034,7 @@ static int add_gpu_components(struct device *dev,
if (!np)
return 0;
- if (of_device_is_available(np))
+ if (of_device_is_available(np) && adreno_has_gpu(np))
drm_of_component_match_add(dev, matchptr, component_compare_of, np);
of_node_put(np);
@@ -1071,7 +1075,7 @@ int msm_drv_probe(struct device *master_dev,
/* Add mdp components if we have KMS. */
if (kms_init) {
- ret = add_components_mdp(master_dev, &match);
+ ret = add_mdp_components(master_dev, &match);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index a65077855201..c8afb1ea6040 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -222,6 +222,29 @@ struct msm_drm_private {
* the sw hangcheck mechanism.
*/
bool disable_err_irq;
+
+ /**
+ * @fault_stall_lock:
+ *
+ * Serialize changes to stall-on-fault state.
+ */
+ spinlock_t fault_stall_lock;
+
+ /**
+ * @fault_stall_reenable_time:
+ *
+ * If stall_enabled is false, when to reenable stall-on-fault.
+ * Protected by @fault_stall_lock.
+ */
+ ktime_t stall_reenable_time;
+
+ /**
+ * @stall_enabled:
+ *
+ * Whether stall-on-fault is currently enabled. Protected by
+ * @fault_stall_lock.
+ */
+ bool stall_enabled;
};
const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format, uint64_t modifier);
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 3e9aa2cc38ef..d4f71bb54e84 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -85,6 +85,15 @@ void __msm_gem_submit_destroy(struct kref *kref)
container_of(kref, struct msm_gem_submit, ref);
unsigned i;
+ /*
+ * In error paths, we could unref the submit without calling
+ * drm_sched_entity_push_job(), so msm_job_free() will never
+ * get called. Since drm_sched_job_cleanup() will NULL out
+ * s_fence, we can use that to detect this case.
+ */
+ if (submit->base.s_fence)
+ drm_sched_job_cleanup(&submit->base);
+
if (submit->fence_id) {
spin_lock(&submit->queue->idr_lock);
idr_remove(&submit->queue->fence_idr, submit->fence_id);
@@ -649,6 +658,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct msm_ringbuffer *ring;
struct msm_submit_post_dep *post_deps = NULL;
struct drm_syncobj **syncobjs_to_reset = NULL;
+ struct sync_file *sync_file = NULL;
int out_fence_fd = -1;
unsigned i;
int ret;
@@ -858,7 +868,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
}
if (ret == 0 && args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
- struct sync_file *sync_file = sync_file_create(submit->user_fence);
+ sync_file = sync_file_create(submit->user_fence);
if (!sync_file) {
ret = -ENOMEM;
} else {
@@ -892,8 +902,11 @@ out:
out_unlock:
mutex_unlock(&queue->lock);
out_post_unlock:
- if (ret && (out_fence_fd >= 0))
+ if (ret && (out_fence_fd >= 0)) {
put_unused_fd(out_fence_fd);
+ if (sync_file)
+ fput(sync_file->file);
+ }
if (!IS_ERR_OR_NULL(submit)) {
msm_gem_submit_put(submit);
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 197871fdf508..3947f7ba1421 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -257,7 +257,8 @@ out:
}
static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
- struct msm_gem_submit *submit, char *comm, char *cmd)
+ struct msm_gem_submit *submit, struct msm_gpu_fault_info *fault_info,
+ char *comm, char *cmd)
{
struct msm_gpu_state *state;
@@ -276,7 +277,8 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
/* Fill in the additional crash state information */
state->comm = kstrdup(comm, GFP_KERNEL);
state->cmd = kstrdup(cmd, GFP_KERNEL);
- state->fault_info = gpu->fault_info;
+ if (fault_info)
+ state->fault_info = *fault_info;
if (submit) {
int i;
@@ -308,7 +310,8 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
}
#else
static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
- struct msm_gem_submit *submit, char *comm, char *cmd)
+ struct msm_gem_submit *submit, struct msm_gpu_fault_info *fault_info,
+ char *comm, char *cmd)
{
}
#endif
@@ -405,7 +408,7 @@ static void recover_worker(struct kthread_work *work)
/* Record the crash state */
pm_runtime_get_sync(&gpu->pdev->dev);
- msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
+ msm_gpu_crashstate_capture(gpu, submit, NULL, comm, cmd);
kfree(cmd);
kfree(comm);
@@ -459,9 +462,8 @@ out_unlock:
msm_gpu_retire(gpu);
}
-static void fault_worker(struct kthread_work *work)
+void msm_gpu_fault_crashstate_capture(struct msm_gpu *gpu, struct msm_gpu_fault_info *fault_info)
{
- struct msm_gpu *gpu = container_of(work, struct msm_gpu, fault_work);
struct msm_gem_submit *submit;
struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
char *comm = NULL, *cmd = NULL;
@@ -484,16 +486,13 @@ static void fault_worker(struct kthread_work *work)
/* Record the crash state */
pm_runtime_get_sync(&gpu->pdev->dev);
- msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
+ msm_gpu_crashstate_capture(gpu, submit, fault_info, comm, cmd);
pm_runtime_put_sync(&gpu->pdev->dev);
kfree(cmd);
kfree(comm);
resume_smmu:
- memset(&gpu->fault_info, 0, sizeof(gpu->fault_info));
- gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu);
-
mutex_unlock(&gpu->lock);
}
@@ -882,7 +881,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
init_waitqueue_head(&gpu->retire_event);
kthread_init_work(&gpu->retire_work, retire_worker);
kthread_init_work(&gpu->recover_work, recover_worker);
- kthread_init_work(&gpu->fault_work, fault_worker);
priv->hangcheck_period = DRM_MSM_HANGCHECK_DEFAULT_PERIOD;
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index e25009150579..5bf7cd985b9c 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -253,12 +253,6 @@ struct msm_gpu {
#define DRM_MSM_HANGCHECK_PROGRESS_RETRIES 3
struct timer_list hangcheck_timer;
- /* Fault info for most recent iova fault: */
- struct msm_gpu_fault_info fault_info;
-
- /* work for handling GPU ioval faults: */
- struct kthread_work fault_work;
-
/* work for handling GPU recovery: */
struct kthread_work recover_work;
@@ -668,6 +662,7 @@ msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *ta
void msm_gpu_cleanup(struct msm_gpu *gpu);
struct msm_gpu *adreno_load_gpu(struct drm_device *dev);
+bool adreno_has_gpu(struct device_node *node);
void __init adreno_register(void);
void __exit adreno_unregister(void);
@@ -705,6 +700,8 @@ static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu)
mutex_unlock(&gpu->lock);
}
+void msm_gpu_fault_crashstate_capture(struct msm_gpu *gpu, struct msm_gpu_fault_info *fault_info);
+
/*
* Simple macro to semi-cleanly add the MAP_PRIV flag for targets that can
* support expanded privileges
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index fd73dcd3f30e..739ce2c283a4 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -345,7 +345,6 @@ static int msm_gpu_fault_handler(struct iommu_domain *domain, struct device *dev
unsigned long iova, int flags, void *arg)
{
struct msm_iommu *iommu = arg;
- struct msm_mmu *mmu = &iommu->base;
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(iommu->base.dev);
struct adreno_smmu_fault_info info, *ptr = NULL;
@@ -359,9 +358,6 @@ static int msm_gpu_fault_handler(struct iommu_domain *domain, struct device *dev
pr_warn_ratelimited("*** fault: iova=%16lx, flags=%d\n", iova, flags);
- if (mmu->funcs->resume_translation)
- mmu->funcs->resume_translation(mmu);
-
return 0;
}
@@ -376,12 +372,12 @@ static int msm_disp_fault_handler(struct iommu_domain *domain, struct device *de
return -ENOSYS;
}
-static void msm_iommu_resume_translation(struct msm_mmu *mmu)
+static void msm_iommu_set_stall(struct msm_mmu *mmu, bool enable)
{
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(mmu->dev);
- if (adreno_smmu->resume_translation)
- adreno_smmu->resume_translation(adreno_smmu->cookie, true);
+ if (adreno_smmu->set_stall)
+ adreno_smmu->set_stall(adreno_smmu->cookie, enable);
}
static void msm_iommu_detach(struct msm_mmu *mmu)
@@ -431,7 +427,7 @@ static const struct msm_mmu_funcs funcs = {
.map = msm_iommu_map,
.unmap = msm_iommu_unmap,
.destroy = msm_iommu_destroy,
- .resume_translation = msm_iommu_resume_translation,
+ .set_stall = msm_iommu_set_stall,
};
struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks)
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index daf91529e02b..0c694907140d 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -15,7 +15,7 @@ struct msm_mmu_funcs {
size_t len, int prot);
int (*unmap)(struct msm_mmu *mmu, uint64_t iova, size_t len);
void (*destroy)(struct msm_mmu *mmu);
- void (*resume_translation)(struct msm_mmu *mmu);
+ void (*set_stall)(struct msm_mmu *mmu, bool enable);
};
enum msm_mmu_type {
diff --git a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
index 5a6ae9fc3194..462713401622 100644
--- a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
+++ b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
@@ -2255,7 +2255,8 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<reg32 offset="0" name="0">
<bitfield name="CLEAR_ON_CHIP_TS" pos="0" type="boolean"/>
<bitfield name="CLEAR_RESOURCE_TABLE" pos="1" type="boolean"/>
- <bitfield name="CLEAR_GLOBAL_LOCAL_TS" pos="2" type="boolean"/>
+ <bitfield name="CLEAR_BV_BR_COUNTER" pos="2" type="boolean"/>
+ <bitfield name="RESET_GLOBAL_LOCAL_TS" pos="3" type="boolean"/>
</reg32>
</domain>
diff --git a/drivers/gpu/drm/msm/registers/gen_header.py b/drivers/gpu/drm/msm/registers/gen_header.py
index 3926485bb197..a409404627c7 100644
--- a/drivers/gpu/drm/msm/registers/gen_header.py
+++ b/drivers/gpu/drm/msm/registers/gen_header.py
@@ -11,6 +11,7 @@ import collections
import argparse
import time
import datetime
+import re
class Error(Exception):
def __init__(self, message):
@@ -877,13 +878,14 @@ The rules-ng-ng source files this header was generated from are:
""")
maxlen = 0
for filepath in p.xml_files:
- maxlen = max(maxlen, len(filepath))
+ new_filepath = re.sub("^.+drivers","drivers",filepath)
+ maxlen = max(maxlen, len(new_filepath))
for filepath in p.xml_files:
- pad = " " * (maxlen - len(filepath))
+ pad = " " * (maxlen - len(new_filepath))
filesize = str(os.path.getsize(filepath))
filesize = " " * (7 - len(filesize)) + filesize
filetime = time.ctime(os.path.getmtime(filepath))
- print("- " + filepath + pad + " (" + filesize + " bytes, from " + filetime + ")")
+ print("- " + new_filepath + pad + " (" + filesize + " bytes, from <stripped>)")
if p.copyright_year:
current_year = str(datetime.date.today().year)
print()
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index d47442125fa1..9aae26eb7d8f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -42,7 +42,7 @@
#include "nouveau_acpi.h"
static struct ida bl_ida;
-#define BL_NAME_SIZE 15 // 12 for name + 2 for digits + 1 for '\0'
+#define BL_NAME_SIZE 24 // 12 for name + 11 for digits + 1 for '\0'
static bool
nouveau_get_backlight_name(char backlight_name[BL_NAME_SIZE],
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c
index 5acb98d137bd..9d06ff722fea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c
@@ -637,12 +637,18 @@ r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *payload,
if (payload_size > max_payload_size) {
const u32 fn = rpc->function;
u32 remain_payload_size = payload_size;
+ void *next;
- /* Adjust length, and send initial RPC. */
- rpc->length = sizeof(*rpc) + max_payload_size;
- msg->checksum = rpc->length;
+ /* Send initial RPC. */
+ next = r535_gsp_rpc_get(gsp, fn, max_payload_size);
+ if (IS_ERR(next)) {
+ repv = next;
+ goto done;
+ }
- repv = r535_gsp_rpc_send(gsp, payload, NVKM_GSP_RPC_REPLY_NOWAIT, 0);
+ memcpy(next, payload, max_payload_size);
+
+ repv = r535_gsp_rpc_send(gsp, next, NVKM_GSP_RPC_REPLY_NOWAIT, 0);
if (IS_ERR(repv))
goto done;
@@ -653,7 +659,6 @@ r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *payload,
while (remain_payload_size) {
u32 size = min(remain_payload_size,
max_payload_size);
- void *next;
next = r535_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD, size);
if (IS_ERR(next)) {
@@ -674,6 +679,8 @@ r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *payload,
/* Wait for reply. */
repv = r535_gsp_rpc_handle_reply(gsp, fn, policy, payload_size +
sizeof(*rpc));
+ if (!IS_ERR(repv))
+ kvfree(msg);
} else {
repv = r535_gsp_rpc_send(gsp, payload, policy, gsp_rpc_len);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c
index 52f2e5f14517..f25ea610cd99 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c
@@ -121,7 +121,7 @@ r535_mmu_vaspace_new(struct nvkm_vmm *vmm, u32 handle, bool external)
page_shift -= desc->bits;
ctrl->levels[i].physAddress = pd->pt[0]->addr;
- ctrl->levels[i].size = (1 << desc->bits) * desc->size;
+ ctrl->levels[i].size = BIT_ULL(desc->bits) * desc->size;
ctrl->levels[i].aperture = 1;
ctrl->levels[i].pageShift = page_shift;
diff --git a/drivers/gpu/drm/sitronix/Kconfig b/drivers/gpu/drm/sitronix/Kconfig
index c069d0d41775..741d1bb4b83f 100644
--- a/drivers/gpu/drm/sitronix/Kconfig
+++ b/drivers/gpu/drm/sitronix/Kconfig
@@ -5,6 +5,7 @@ config DRM_ST7571_I2C
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
select REGMAP_I2C
+ select VIDEOMODE_HELPERS
help
DRM driver for Sitronix ST7571 panels controlled over I2C.
diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
index dd2006d51c7a..eec43d1a5595 100644
--- a/drivers/gpu/drm/solomon/ssd130x.c
+++ b/drivers/gpu/drm/solomon/ssd130x.c
@@ -974,7 +974,7 @@ static void ssd130x_clear_screen(struct ssd130x_device *ssd130x, u8 *data_array)
static void ssd132x_clear_screen(struct ssd130x_device *ssd130x, u8 *data_array)
{
- unsigned int columns = DIV_ROUND_UP(ssd130x->height, SSD132X_SEGMENT_WIDTH);
+ unsigned int columns = DIV_ROUND_UP(ssd130x->width, SSD132X_SEGMENT_WIDTH);
unsigned int height = ssd130x->height;
memset(data_array, 0, columns * height);
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index 35f131a46d07..42df9d3567e7 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -199,7 +199,6 @@ v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue)
struct v3d_dev *v3d = job->v3d;
struct v3d_file_priv *file = job->file->driver_priv;
struct v3d_stats *global_stats = &v3d->queue[queue].stats;
- struct v3d_stats *local_stats = &file->stats[queue];
u64 now = local_clock();
unsigned long flags;
@@ -209,7 +208,12 @@ v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue)
else
preempt_disable();
- v3d_stats_update(local_stats, now);
+ /* Don't update the local stats if the file context has already closed */
+ if (file)
+ v3d_stats_update(&file->stats[queue], now);
+ else
+ drm_dbg(&v3d->drm, "The file descriptor was closed before job completion\n");
+
v3d_stats_update(global_stats, now);
if (IS_ENABLED(CONFIG_LOCKDEP))
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index a29a6ef266f9..163d092bd973 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -560,12 +560,6 @@ static int vc4_hdmi_connector_init(struct drm_device *dev,
if (ret)
return ret;
- ret = drm_connector_hdmi_audio_init(connector, dev->dev,
- &vc4_hdmi_audio_funcs,
- 8, false, -1);
- if (ret)
- return ret;
-
drm_connector_helper_add(connector, &vc4_hdmi_connector_helper_funcs);
/*
@@ -2291,6 +2285,12 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
return ret;
}
+ ret = drm_connector_hdmi_audio_init(&vc4_hdmi->connector, dev,
+ &vc4_hdmi_audio_funcs, 8, false,
+ -1);
+ if (ret)
+ return ret;
+
dai_link->cpus = &vc4_hdmi->audio.cpu;
dai_link->codecs = &vc4_hdmi->audio.codec;
dai_link->platforms = &vc4_hdmi->audio.platform;
diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c
index 68f064f33d4b..9f4ade25787a 100644
--- a/drivers/gpu/drm/xe/display/xe_display.c
+++ b/drivers/gpu/drm/xe/display/xe_display.c
@@ -104,6 +104,8 @@ int xe_display_create(struct xe_device *xe)
spin_lock_init(&xe->display.fb_tracking.lock);
xe->display.hotplug.dp_wq = alloc_ordered_workqueue("xe-dp", 0);
+ if (!xe->display.hotplug.dp_wq)
+ return -ENOMEM;
return drmm_add_action_or_reset(&xe->drm, display_destroy, NULL);
}
diff --git a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
index f95375451e2f..9f941fc2e36b 100644
--- a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
+++ b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
@@ -17,10 +17,7 @@ u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf)
void intel_dsb_buffer_write(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val)
{
- struct xe_device *xe = dsb_buf->vma->bo->tile->xe;
-
iosys_map_wr(&dsb_buf->vma->bo->vmap, idx * 4, u32, val);
- xe_device_l2_flush(xe);
}
u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx)
@@ -30,12 +27,9 @@ u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx)
void intel_dsb_buffer_memset(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size)
{
- struct xe_device *xe = dsb_buf->vma->bo->tile->xe;
-
WARN_ON(idx > (dsb_buf->buf_size - size) / sizeof(*dsb_buf->cmd_buf));
iosys_map_memset(&dsb_buf->vma->bo->vmap, idx * 4, val, size);
- xe_device_l2_flush(xe);
}
bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *dsb_buf, size_t size)
@@ -74,9 +68,12 @@ void intel_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf)
void intel_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf)
{
+ struct xe_device *xe = dsb_buf->vma->bo->tile->xe;
+
/*
* The memory barrier here is to ensure coherency of DSB vs MMIO,
* both for weak ordering archs and discrete cards.
*/
- xe_device_wmb(dsb_buf->vma->bo->tile->xe);
+ xe_device_wmb(xe);
+ xe_device_l2_flush(xe);
}
diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c
index d918ae1c8061..55259969480b 100644
--- a/drivers/gpu/drm/xe/display/xe_fb_pin.c
+++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c
@@ -164,6 +164,9 @@ static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb,
vma->dpt = dpt;
vma->node = dpt->ggtt_node[tile0->id];
+
+ /* Ensure DPT writes are flushed */
+ xe_device_l2_flush(xe);
return 0;
}
@@ -333,8 +336,6 @@ static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb,
if (ret)
goto err_unpin;
- /* Ensure DPT writes are flushed */
- xe_device_l2_flush(xe);
return vma;
err_unpin:
diff --git a/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h b/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
index 5394a1373a6b..ef2bf984723f 100644
--- a/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
@@ -40,6 +40,7 @@
#define PCU_CR_PACKAGE_RAPL_LIMIT XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x59a0)
#define PWR_LIM_VAL REG_GENMASK(14, 0)
#define PWR_LIM_EN REG_BIT(15)
+#define PWR_LIM REG_GENMASK(15, 0)
#define PWR_LIM_TIME REG_GENMASK(23, 17)
#define PWR_LIM_TIME_X REG_GENMASK(23, 22)
#define PWR_LIM_TIME_Y REG_GENMASK(21, 17)
diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
index 7062115909f2..2c799958c1e4 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.c
+++ b/drivers/gpu/drm/xe/xe_ggtt.c
@@ -201,6 +201,13 @@ static const struct xe_ggtt_pt_ops xelpg_pt_wa_ops = {
.ggtt_set_pte = xe_ggtt_set_pte_and_flush,
};
+static void dev_fini_ggtt(void *arg)
+{
+ struct xe_ggtt *ggtt = arg;
+
+ drain_workqueue(ggtt->wq);
+}
+
/**
* xe_ggtt_init_early - Early GGTT initialization
* @ggtt: the &xe_ggtt to be initialized
@@ -257,6 +264,10 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
if (err)
return err;
+ err = devm_add_action_or_reset(xe->drm.dev, dev_fini_ggtt, ggtt);
+ if (err)
+ return err;
+
if (IS_SRIOV_VF(xe)) {
err = xe_gt_sriov_vf_prepare_ggtt(xe_tile_get_gt(ggtt->tile, 0));
if (err)
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index 0e5d243c9451..6c4cb9576fb6 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -118,7 +118,7 @@ static void xe_gt_enable_host_l2_vram(struct xe_gt *gt)
xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg);
}
- xe_gt_mcr_multicast_write(gt, XEHPC_L3CLOS_MASK(3), 0x3);
+ xe_gt_mcr_multicast_write(gt, XEHPC_L3CLOS_MASK(3), 0xF);
xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
index 084cbdeba8ea..e1362e608146 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
@@ -138,6 +138,14 @@ void xe_gt_tlb_invalidation_reset(struct xe_gt *gt)
int pending_seqno;
/*
+ * we can get here before the CTs are even initialized if we're wedging
+ * very early, in which case there are not going to be any pending
+ * fences so we can bail immediately.
+ */
+ if (!xe_guc_ct_initialized(&gt->uc.guc.ct))
+ return;
+
+ /*
* CT channel is already disabled at this point. No new TLB requests can
* appear.
*/
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index 2447de0ebedf..bbcbb348256f 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -34,6 +34,11 @@
#include "xe_pm.h"
#include "xe_trace_guc.h"
+static void receive_g2h(struct xe_guc_ct *ct);
+static void g2h_worker_func(struct work_struct *w);
+static void safe_mode_worker_func(struct work_struct *w);
+static void ct_exit_safe_mode(struct xe_guc_ct *ct);
+
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
enum {
/* Internal states, not error conditions */
@@ -186,14 +191,11 @@ static void guc_ct_fini(struct drm_device *drm, void *arg)
{
struct xe_guc_ct *ct = arg;
+ ct_exit_safe_mode(ct);
destroy_workqueue(ct->g2h_wq);
xa_destroy(&ct->fence_lookup);
}
-static void receive_g2h(struct xe_guc_ct *ct);
-static void g2h_worker_func(struct work_struct *w);
-static void safe_mode_worker_func(struct work_struct *w);
-
static void primelockdep(struct xe_guc_ct *ct)
{
if (!IS_ENABLED(CONFIG_LOCKDEP))
@@ -514,6 +516,9 @@ void xe_guc_ct_disable(struct xe_guc_ct *ct)
*/
void xe_guc_ct_stop(struct xe_guc_ct *ct)
{
+ if (!xe_guc_ct_initialized(ct))
+ return;
+
xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_STOPPED);
stop_g2h_handler(ct);
}
@@ -760,7 +765,7 @@ static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
u16 seqno;
int ret;
- xe_gt_assert(gt, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED);
+ xe_gt_assert(gt, xe_guc_ct_initialized(ct));
xe_gt_assert(gt, !g2h_len || !g2h_fence);
xe_gt_assert(gt, !num_g2h || !g2h_fence);
xe_gt_assert(gt, !g2h_len || num_g2h);
@@ -1344,7 +1349,7 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
u32 action;
u32 *hxg;
- xe_gt_assert(gt, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED);
+ xe_gt_assert(gt, xe_guc_ct_initialized(ct));
lockdep_assert_held(&ct->fast_lock);
if (ct->state == XE_GUC_CT_STATE_DISABLED)
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.h b/drivers/gpu/drm/xe/xe_guc_ct.h
index 82c4ae458dda..582aac106469 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.h
+++ b/drivers/gpu/drm/xe/xe_guc_ct.h
@@ -22,6 +22,11 @@ void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot, struct drm_pr
void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot);
void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool want_ctb);
+static inline bool xe_guc_ct_initialized(struct xe_guc_ct *ct)
+{
+ return ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED;
+}
+
static inline bool xe_guc_ct_enabled(struct xe_guc_ct *ct)
{
return ct->state == XE_GUC_CT_STATE_ENABLED;
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
index 18c623992035..3beaaa7b25c1 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.c
+++ b/drivers/gpu/drm/xe/xe_guc_pc.c
@@ -1068,7 +1068,7 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
goto out;
}
- memset(pc->bo->vmap.vaddr, 0, size);
+ xe_map_memset(xe, &pc->bo->vmap, 0, 0, size);
slpc_shared_data_write(pc, header.size, size);
earlier = ktime_get();
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 6d84a52b660a..9567f6700cf2 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -1762,6 +1762,9 @@ int xe_guc_submit_reset_prepare(struct xe_guc *guc)
{
int ret;
+ if (!guc->submission_state.initialized)
+ return 0;
+
/*
* Using an atomic here rather than submission_state.lock as this
* function can be called while holding the CT lock (engine reset
diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
index 74f31639b37f..f008e8049700 100644
--- a/drivers/gpu/drm/xe/xe_hwmon.c
+++ b/drivers/gpu/drm/xe/xe_hwmon.c
@@ -159,8 +159,8 @@ static int xe_hwmon_pcode_read_power_limit(const struct xe_hwmon *hwmon, u32 att
return ret;
}
-static int xe_hwmon_pcode_write_power_limit(const struct xe_hwmon *hwmon, u32 attr, u8 channel,
- u32 uval)
+static int xe_hwmon_pcode_rmw_power_limit(const struct xe_hwmon *hwmon, u32 attr, u8 channel,
+ u32 clr, u32 set)
{
struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe);
u32 val0, val1;
@@ -179,7 +179,7 @@ static int xe_hwmon_pcode_write_power_limit(const struct xe_hwmon *hwmon, u32 at
channel, val0, val1, ret);
if (attr == PL1_HWMON_ATTR)
- val0 = uval;
+ val0 = (val0 & ~clr) | set;
else
return -EIO;
@@ -339,7 +339,7 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, u32 attr, int channe
if (hwmon->xe->info.has_mbx_power_limits) {
drm_dbg(&hwmon->xe->drm, "disabling %s on channel %d\n",
PWR_ATTR_TO_STR(attr), channel);
- xe_hwmon_pcode_write_power_limit(hwmon, attr, channel, 0);
+ xe_hwmon_pcode_rmw_power_limit(hwmon, attr, channel, PWR_LIM_EN, 0);
xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, &reg_val);
} else {
reg_val = xe_mmio_rmw32(mmio, rapl_limit, PWR_LIM_EN, 0);
@@ -370,10 +370,9 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, u32 attr, int channe
}
if (hwmon->xe->info.has_mbx_power_limits)
- ret = xe_hwmon_pcode_write_power_limit(hwmon, attr, channel, reg_val);
+ ret = xe_hwmon_pcode_rmw_power_limit(hwmon, attr, channel, PWR_LIM, reg_val);
else
- reg_val = xe_mmio_rmw32(mmio, rapl_limit, PWR_LIM_EN | PWR_LIM_VAL,
- reg_val);
+ reg_val = xe_mmio_rmw32(mmio, rapl_limit, PWR_LIM, reg_val);
unlock:
mutex_unlock(&hwmon->hwmon_lock);
return ret;
@@ -563,14 +562,11 @@ xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *a
mutex_lock(&hwmon->hwmon_lock);
- if (hwmon->xe->info.has_mbx_power_limits) {
- ret = xe_hwmon_pcode_read_power_limit(hwmon, power_attr, channel, (u32 *)&r);
- r = (r & ~PWR_LIM_TIME) | rxy;
- xe_hwmon_pcode_write_power_limit(hwmon, power_attr, channel, r);
- } else {
+ if (hwmon->xe->info.has_mbx_power_limits)
+ xe_hwmon_pcode_rmw_power_limit(hwmon, power_attr, channel, PWR_LIM_TIME, rxy);
+ else
r = xe_mmio_rmw32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel),
PWR_LIM_TIME, rxy);
- }
mutex_unlock(&hwmon->hwmon_lock);
@@ -1138,12 +1134,12 @@ xe_hwmon_get_preregistration_info(struct xe_hwmon *hwmon)
} else {
drm_info(&hwmon->xe->drm, "Using mailbox commands for power limits\n");
/* Write default limits to read from pcode from now on. */
- xe_hwmon_pcode_write_power_limit(hwmon, PL1_HWMON_ATTR,
- CHANNEL_CARD,
- hwmon->pl1_on_boot[CHANNEL_CARD]);
- xe_hwmon_pcode_write_power_limit(hwmon, PL1_HWMON_ATTR,
- CHANNEL_PKG,
- hwmon->pl1_on_boot[CHANNEL_PKG]);
+ xe_hwmon_pcode_rmw_power_limit(hwmon, PL1_HWMON_ATTR,
+ CHANNEL_CARD, PWR_LIM | PWR_LIM_TIME,
+ hwmon->pl1_on_boot[CHANNEL_CARD]);
+ xe_hwmon_pcode_rmw_power_limit(hwmon, PL1_HWMON_ATTR,
+ CHANNEL_PKG, PWR_LIM | PWR_LIM_TIME,
+ hwmon->pl1_on_boot[CHANNEL_PKG]);
hwmon->scl_shift_power = PWR_UNIT;
hwmon->scl_shift_energy = ENERGY_UNIT;
hwmon->scl_shift_time = TIME_UNIT;
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
index 63d74e27f54c..bf7c3981897d 100644
--- a/drivers/gpu/drm/xe/xe_lrc.c
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -941,11 +941,18 @@ static void xe_lrc_finish(struct xe_lrc *lrc)
* store it in the PPHSWP.
*/
#define CONTEXT_ACTIVE 1ULL
-static void xe_lrc_setup_utilization(struct xe_lrc *lrc)
+static int xe_lrc_setup_utilization(struct xe_lrc *lrc)
{
- u32 *cmd;
+ u32 *cmd, *buf = NULL;
- cmd = lrc->bb_per_ctx_bo->vmap.vaddr;
+ if (lrc->bb_per_ctx_bo->vmap.is_iomem) {
+ buf = kmalloc(lrc->bb_per_ctx_bo->size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ cmd = buf;
+ } else {
+ cmd = lrc->bb_per_ctx_bo->vmap.vaddr;
+ }
*cmd++ = MI_STORE_REGISTER_MEM | MI_SRM_USE_GGTT | MI_SRM_ADD_CS_OFFSET;
*cmd++ = ENGINE_ID(0).addr;
@@ -966,9 +973,16 @@ static void xe_lrc_setup_utilization(struct xe_lrc *lrc)
*cmd++ = MI_BATCH_BUFFER_END;
+ if (buf) {
+ xe_map_memcpy_to(gt_to_xe(lrc->gt), &lrc->bb_per_ctx_bo->vmap, 0,
+ buf, (cmd - buf) * sizeof(*cmd));
+ kfree(buf);
+ }
+
xe_lrc_write_ctx_reg(lrc, CTX_BB_PER_CTX_PTR,
xe_bo_ggtt_addr(lrc->bb_per_ctx_bo) | 1);
+ return 0;
}
#define PVC_CTX_ASID (0x2e + 1)
@@ -1125,7 +1139,9 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
map = __xe_lrc_start_seqno_map(lrc);
xe_map_write32(lrc_to_xe(lrc), &map, lrc->fence_ctx.next_seqno - 1);
- xe_lrc_setup_utilization(lrc);
+ err = xe_lrc_setup_utilization(lrc);
+ if (err)
+ goto err_lrc_finish;
return 0;
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index 6345896585de..f0b167b3fb6a 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -764,7 +764,7 @@ static bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range,
return false;
}
- if (range_size <= SZ_64K && !supports_4K_migration(vm->xe)) {
+ if (range_size < SZ_64K && !supports_4K_migration(vm->xe)) {
drm_dbg(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n");
return false;
}
diff --git a/drivers/hid/hid-appletb-kbd.c b/drivers/hid/hid-appletb-kbd.c
index 6f251b284018..2e0caf52af13 100644
--- a/drivers/hid/hid-appletb-kbd.c
+++ b/drivers/hid/hid-appletb-kbd.c
@@ -438,6 +438,8 @@ static int appletb_kbd_probe(struct hid_device *hdev, const struct hid_device_id
return 0;
close_hw:
+ if (kbd->backlight_dev)
+ put_device(&kbd->backlight_dev->dev);
hid_hw_close(hdev);
stop_hw:
hid_hw_stop(hdev);
@@ -453,6 +455,9 @@ static void appletb_kbd_remove(struct hid_device *hdev)
input_unregister_handler(&kbd->inp_handler);
timer_delete_sync(&kbd->inactivity_timer);
+ if (kbd->backlight_dev)
+ put_device(&kbd->backlight_dev->dev);
+
hid_hw_close(hdev);
hid_hw_stop(hdev);
}
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index e3fb4e2fe911..c6468568aea1 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -312,6 +312,8 @@
#define USB_DEVICE_ID_ASUS_AK1D 0x1125
#define USB_DEVICE_ID_CHICONY_TOSHIBA_WT10A 0x1408
#define USB_DEVICE_ID_CHICONY_ACER_SWITCH12 0x1421
+#define USB_DEVICE_ID_CHICONY_HP_5MP_CAMERA 0xb824
+#define USB_DEVICE_ID_CHICONY_HP_5MP_CAMERA2 0xb82c
#define USB_VENDOR_ID_CHUNGHWAT 0x2247
#define USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH 0x0001
@@ -819,6 +821,7 @@
#define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067
#define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085
#define USB_DEVICE_ID_LENOVO_X1_TAB 0x60a3
+#define USB_DEVICE_ID_LENOVO_X1_TAB2 0x60a4
#define USB_DEVICE_ID_LENOVO_X1_TAB3 0x60b5
#define USB_DEVICE_ID_LENOVO_X12_TAB 0x60fe
#define USB_DEVICE_ID_LENOVO_X12_TAB2 0x61ae
@@ -1525,4 +1528,7 @@
#define USB_VENDOR_ID_SIGNOTEC 0x2133
#define USB_DEVICE_ID_SIGNOTEC_VIEWSONIC_PD1011 0x0018
+#define USB_VENDOR_ID_SMARTLINKTECHNOLOGY 0x4c4a
+#define USB_DEVICE_ID_SMARTLINKTECHNOLOGY_4155 0x4155
+
#endif
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 9d80635a91eb..ff1784b5c2a4 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -2343,7 +2343,7 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
}
if (list_empty(&hid->inputs)) {
- hid_err(hid, "No inputs registered, leaving\n");
+ hid_dbg(hid, "No inputs registered, leaving\n");
goto out_unwind;
}
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
index af29ba840522..b3121fa7a72d 100644
--- a/drivers/hid/hid-lenovo.c
+++ b/drivers/hid/hid-lenovo.c
@@ -492,6 +492,7 @@ static int lenovo_input_mapping(struct hid_device *hdev,
case USB_DEVICE_ID_LENOVO_X12_TAB:
case USB_DEVICE_ID_LENOVO_X12_TAB2:
case USB_DEVICE_ID_LENOVO_X1_TAB:
+ case USB_DEVICE_ID_LENOVO_X1_TAB2:
case USB_DEVICE_ID_LENOVO_X1_TAB3:
return lenovo_input_mapping_x1_tab_kbd(hdev, hi, field, usage, bit, max);
default:
@@ -548,11 +549,14 @@ static void lenovo_features_set_cptkbd(struct hid_device *hdev)
/*
* Tell the keyboard a driver understands it, and turn F7, F9, F11 into
- * regular keys
+ * regular keys (Compact only)
*/
- ret = lenovo_send_cmd_cptkbd(hdev, 0x01, 0x03);
- if (ret)
- hid_warn(hdev, "Failed to switch F7/9/11 mode: %d\n", ret);
+ if (hdev->product == USB_DEVICE_ID_LENOVO_CUSBKBD ||
+ hdev->product == USB_DEVICE_ID_LENOVO_CBTKBD) {
+ ret = lenovo_send_cmd_cptkbd(hdev, 0x01, 0x03);
+ if (ret)
+ hid_warn(hdev, "Failed to switch F7/9/11 mode: %d\n", ret);
+ }
/* Switch middle button to native mode */
ret = lenovo_send_cmd_cptkbd(hdev, 0x09, 0x01);
@@ -605,6 +609,7 @@ static ssize_t attr_fn_lock_store(struct device *dev,
case USB_DEVICE_ID_LENOVO_X12_TAB2:
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
case USB_DEVICE_ID_LENOVO_X1_TAB:
+ case USB_DEVICE_ID_LENOVO_X1_TAB2:
case USB_DEVICE_ID_LENOVO_X1_TAB3:
ret = lenovo_led_set_tp10ubkbd(hdev, TP10UBKBD_FN_LOCK_LED, value);
if (ret)
@@ -861,6 +866,7 @@ static int lenovo_event(struct hid_device *hdev, struct hid_field *field,
case USB_DEVICE_ID_LENOVO_X12_TAB2:
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
case USB_DEVICE_ID_LENOVO_X1_TAB:
+ case USB_DEVICE_ID_LENOVO_X1_TAB2:
case USB_DEVICE_ID_LENOVO_X1_TAB3:
return lenovo_event_tp10ubkbd(hdev, field, usage, value);
default:
@@ -1144,6 +1150,7 @@ static int lenovo_led_brightness_set(struct led_classdev *led_cdev,
case USB_DEVICE_ID_LENOVO_X12_TAB2:
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
case USB_DEVICE_ID_LENOVO_X1_TAB:
+ case USB_DEVICE_ID_LENOVO_X1_TAB2:
case USB_DEVICE_ID_LENOVO_X1_TAB3:
ret = lenovo_led_set_tp10ubkbd(hdev, tp10ubkbd_led[led_nr], value);
break;
@@ -1384,6 +1391,7 @@ static int lenovo_probe(struct hid_device *hdev,
case USB_DEVICE_ID_LENOVO_X12_TAB2:
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
case USB_DEVICE_ID_LENOVO_X1_TAB:
+ case USB_DEVICE_ID_LENOVO_X1_TAB2:
case USB_DEVICE_ID_LENOVO_X1_TAB3:
ret = lenovo_probe_tp10ubkbd(hdev);
break;
@@ -1473,6 +1481,7 @@ static void lenovo_remove(struct hid_device *hdev)
case USB_DEVICE_ID_LENOVO_X12_TAB2:
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
case USB_DEVICE_ID_LENOVO_X1_TAB:
+ case USB_DEVICE_ID_LENOVO_X1_TAB2:
case USB_DEVICE_ID_LENOVO_X1_TAB3:
lenovo_remove_tp10ubkbd(hdev);
break;
@@ -1524,6 +1533,8 @@ static const struct hid_device_id lenovo_devices[] = {
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_TAB) },
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_TAB2) },
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_TAB3) },
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X12_TAB) },
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index b41001e02da7..a1c54ffe02b4 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -2132,12 +2132,18 @@ static const struct hid_device_id mt_devices[] = {
HID_DEVICE(BUS_I2C, HID_GROUP_GENERIC,
USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_7010) },
- /* Lenovo X1 TAB Gen 2 */
+ /* Lenovo X1 TAB Gen 1 */
{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
USB_VENDOR_ID_LENOVO,
USB_DEVICE_ID_LENOVO_X1_TAB) },
+ /* Lenovo X1 TAB Gen 2 */
+ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
+ HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
+ USB_VENDOR_ID_LENOVO,
+ USB_DEVICE_ID_LENOVO_X1_TAB2) },
+
/* Lenovo X1 TAB Gen 3 */
{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c
index 839d5bcd72b1..fb4985988615 100644
--- a/drivers/hid/hid-nintendo.c
+++ b/drivers/hid/hid-nintendo.c
@@ -308,6 +308,7 @@ enum joycon_ctlr_state {
JOYCON_CTLR_STATE_INIT,
JOYCON_CTLR_STATE_READ,
JOYCON_CTLR_STATE_REMOVED,
+ JOYCON_CTLR_STATE_SUSPENDED,
};
/* Controller type received as part of device info */
@@ -2750,14 +2751,46 @@ static void nintendo_hid_remove(struct hid_device *hdev)
static int nintendo_hid_resume(struct hid_device *hdev)
{
- int ret = joycon_init(hdev);
+ struct joycon_ctlr *ctlr = hid_get_drvdata(hdev);
+ int ret;
+
+ hid_dbg(hdev, "resume\n");
+ if (!joycon_using_usb(ctlr)) {
+ hid_dbg(hdev, "no-op resume for bt ctlr\n");
+ ctlr->ctlr_state = JOYCON_CTLR_STATE_READ;
+ return 0;
+ }
+ ret = joycon_init(hdev);
if (ret)
- hid_err(hdev, "Failed to restore controller after resume");
+ hid_err(hdev,
+ "Failed to restore controller after resume: %d\n",
+ ret);
+ else
+ ctlr->ctlr_state = JOYCON_CTLR_STATE_READ;
return ret;
}
+static int nintendo_hid_suspend(struct hid_device *hdev, pm_message_t message)
+{
+ struct joycon_ctlr *ctlr = hid_get_drvdata(hdev);
+
+ hid_dbg(hdev, "suspend: %d\n", message.event);
+ /*
+ * Avoid any blocking loops in suspend/resume transitions.
+ *
+ * joycon_enforce_subcmd_rate() can result in repeated retries if for
+ * whatever reason the controller stops providing input reports.
+ *
+ * This has been observed with bluetooth controllers which lose
+ * connectivity prior to suspend (but not long enough to result in
+ * complete disconnection).
+ */
+ ctlr->ctlr_state = JOYCON_CTLR_STATE_SUSPENDED;
+ return 0;
+}
+
#endif
static const struct hid_device_id nintendo_hid_devices[] = {
@@ -2796,6 +2829,7 @@ static struct hid_driver nintendo_hid_driver = {
#ifdef CONFIG_PM
.resume = nintendo_hid_resume,
+ .suspend = nintendo_hid_suspend,
#endif
};
static int __init nintendo_init(void)
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 7fefeb413ec3..31508da93ba2 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -757,6 +757,8 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) },
{ HID_USB_DEVICE(USB_VENDOR_ID_AXENTIA, USB_DEVICE_ID_AXENTIA_FM_RADIO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_HP_5MP_CAMERA) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_HP_5MP_CAMERA2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI470X) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI4713) },
@@ -904,6 +906,7 @@ static const struct hid_device_id hid_ignore_list[] = {
#endif
{ HID_USB_DEVICE(USB_VENDOR_ID_YEALINK, USB_DEVICE_ID_YEALINK_P1K_P4K_B2K) },
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_HP_5MP_CAMERA_5473) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SMARTLINKTECHNOLOGY, USB_DEVICE_ID_SMARTLINKTECHNOLOGY_4155) },
{ }
};
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
index 07e90d51f073..fa5d68c36313 100644
--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
@@ -38,6 +38,7 @@
#define PCI_DEVICE_ID_INTEL_ISH_LNL_M 0xA845
#define PCI_DEVICE_ID_INTEL_ISH_PTL_H 0xE345
#define PCI_DEVICE_ID_INTEL_ISH_PTL_P 0xE445
+#define PCI_DEVICE_ID_INTEL_ISH_WCL 0x4D45
#define REVISION_ID_CHT_A0 0x6
#define REVISION_ID_CHT_Ax_SI 0x0
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index ff0fc8010072..c57483224db6 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -27,10 +27,12 @@ enum ishtp_driver_data_index {
ISHTP_DRIVER_DATA_NONE,
ISHTP_DRIVER_DATA_LNL_M,
ISHTP_DRIVER_DATA_PTL,
+ ISHTP_DRIVER_DATA_WCL,
};
#define ISH_FW_GEN_LNL_M "lnlm"
#define ISH_FW_GEN_PTL "ptl"
+#define ISH_FW_GEN_WCL "wcl"
#define ISH_FIRMWARE_PATH(gen) "intel/ish/ish_" gen ".bin"
#define ISH_FIRMWARE_PATH_ALL "intel/ish/ish_*.bin"
@@ -42,6 +44,9 @@ static struct ishtp_driver_data ishtp_driver_data[] = {
[ISHTP_DRIVER_DATA_PTL] = {
.fw_generation = ISH_FW_GEN_PTL,
},
+ [ISHTP_DRIVER_DATA_WCL] = {
+ .fw_generation = ISH_FW_GEN_WCL,
+ },
};
static const struct pci_device_id ish_pci_tbl[] = {
@@ -67,9 +72,10 @@ static const struct pci_device_id ish_pci_tbl[] = {
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_MTL_P)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_ARL_H)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_ARL_S)},
- {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_LNL_M), .driver_data = ISHTP_DRIVER_DATA_LNL_M},
- {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_PTL_H), .driver_data = ISHTP_DRIVER_DATA_PTL},
- {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_PTL_P), .driver_data = ISHTP_DRIVER_DATA_PTL},
+ {PCI_DEVICE_DATA(INTEL, ISH_LNL_M, ISHTP_DRIVER_DATA_LNL_M)},
+ {PCI_DEVICE_DATA(INTEL, ISH_PTL_H, ISHTP_DRIVER_DATA_PTL)},
+ {PCI_DEVICE_DATA(INTEL, ISH_PTL_P, ISHTP_DRIVER_DATA_PTL)},
+ {PCI_DEVICE_DATA(INTEL, ISH_WCL, ISHTP_DRIVER_DATA_WCL)},
{}
};
MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
diff --git a/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-protocol.c b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-protocol.c
index f493df0d5dc4..a63f8c833252 100644
--- a/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-protocol.c
+++ b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-protocol.c
@@ -4,6 +4,7 @@
#include <linux/bitfield.h>
#include <linux/hid.h>
#include <linux/hid-over-i2c.h>
+#include <linux/unaligned.h>
#include "intel-thc-dev.h"
#include "intel-thc-dma.h"
@@ -200,6 +201,9 @@ int quicki2c_set_report(struct quicki2c_device *qcdev, u8 report_type,
int quicki2c_reset(struct quicki2c_device *qcdev)
{
+ u16 input_reg = le16_to_cpu(qcdev->dev_desc.input_reg);
+ size_t read_len = HIDI2C_LENGTH_LEN;
+ u32 prd_len = read_len;
int ret;
qcdev->reset_ack = false;
@@ -213,12 +217,32 @@ int quicki2c_reset(struct quicki2c_device *qcdev)
ret = wait_event_interruptible_timeout(qcdev->reset_ack_wq, qcdev->reset_ack,
HIDI2C_RESET_TIMEOUT * HZ);
- if (ret <= 0 || !qcdev->reset_ack) {
+ if (qcdev->reset_ack)
+ return 0;
+
+ /*
+ * Manually read reset response if it wasn't received, in case reset interrupt
+ * was missed by touch device or THC hardware.
+ */
+ ret = thc_tic_pio_read(qcdev->thc_hw, input_reg, read_len, &prd_len,
+ (u32 *)qcdev->input_buf);
+ if (ret) {
+ dev_err_once(qcdev->dev, "Read Reset Response failed, ret %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Check response packet length, it's first 16 bits of packet.
+ * If response packet length is zero, it's reset response, otherwise not.
+ */
+ if (get_unaligned_le16(qcdev->input_buf)) {
dev_err_once(qcdev->dev,
"Wait reset response timed out ret:%d timeout:%ds\n",
ret, HIDI2C_RESET_TIMEOUT);
return -ETIMEDOUT;
}
+ qcdev->reset_ack = true;
+
return 0;
}
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index eaf099b2efdb..9a57504e51a1 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -2048,14 +2048,18 @@ static int wacom_initialize_remotes(struct wacom *wacom)
remote->remote_dir = kobject_create_and_add("wacom_remote",
&wacom->hdev->dev.kobj);
- if (!remote->remote_dir)
+ if (!remote->remote_dir) {
+ kfifo_free(&remote->remote_fifo);
return -ENOMEM;
+ }
error = sysfs_create_files(remote->remote_dir, remote_unpair_attrs);
if (error) {
hid_err(wacom->hdev,
"cannot create sysfs group err: %d\n", error);
+ kfifo_free(&remote->remote_fifo);
+ kobject_put(remote->remote_dir);
return error;
}
@@ -2901,6 +2905,7 @@ static void wacom_remove(struct hid_device *hdev)
hid_hw_stop(hdev);
cancel_delayed_work_sync(&wacom->init_work);
+ cancel_delayed_work_sync(&wacom->aes_battery_work);
cancel_work_sync(&wacom->wireless_work);
cancel_work_sync(&wacom->battery_work);
cancel_work_sync(&wacom->remote_work);
diff --git a/drivers/hwmon/ftsteutates.c b/drivers/hwmon/ftsteutates.c
index a3a07662e491..8aeec16a7a90 100644
--- a/drivers/hwmon/ftsteutates.c
+++ b/drivers/hwmon/ftsteutates.c
@@ -423,13 +423,16 @@ static int fts_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
break;
case hwmon_pwm:
switch (attr) {
- case hwmon_pwm_auto_channels_temp:
- if (data->fan_source[channel] == FTS_FAN_SOURCE_INVALID)
+ case hwmon_pwm_auto_channels_temp: {
+ u8 fan_source = data->fan_source[channel];
+
+ if (fan_source == FTS_FAN_SOURCE_INVALID || fan_source >= BITS_PER_LONG)
*val = 0;
else
- *val = BIT(data->fan_source[channel]);
+ *val = BIT(fan_source);
return 0;
+ }
default:
break;
}
diff --git a/drivers/hwmon/ltc4282.c b/drivers/hwmon/ltc4282.c
index 7f38d2696239..f607fe8f7937 100644
--- a/drivers/hwmon/ltc4282.c
+++ b/drivers/hwmon/ltc4282.c
@@ -1512,13 +1512,6 @@ static int ltc4282_setup(struct ltc4282_state *st, struct device *dev)
}
if (device_property_read_bool(dev, "adi,fault-log-enable")) {
- ret = regmap_set_bits(st->map, LTC4282_ADC_CTRL,
- LTC4282_FAULT_LOG_EN_MASK);
- if (ret)
- return ret;
- }
-
- if (device_property_read_bool(dev, "adi,fault-log-enable")) {
ret = regmap_set_bits(st->map, LTC4282_ADC_CTRL, LTC4282_FAULT_LOG_EN_MASK);
if (ret)
return ret;
diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
index 9486db249c64..b3694a4209b9 100644
--- a/drivers/hwmon/occ/common.c
+++ b/drivers/hwmon/occ/common.c
@@ -459,12 +459,10 @@ static ssize_t occ_show_power_1(struct device *dev,
return sysfs_emit(buf, "%llu\n", val);
}
-static u64 occ_get_powr_avg(u64 *accum, u32 *samples)
+static u64 occ_get_powr_avg(u64 accum, u32 samples)
{
- u64 divisor = get_unaligned_be32(samples);
-
- return (divisor == 0) ? 0 :
- div64_u64(get_unaligned_be64(accum) * 1000000ULL, divisor);
+ return (samples == 0) ? 0 :
+ mul_u64_u32_div(accum, 1000000UL, samples);
}
static ssize_t occ_show_power_2(struct device *dev,
@@ -489,8 +487,8 @@ static ssize_t occ_show_power_2(struct device *dev,
get_unaligned_be32(&power->sensor_id),
power->function_id, power->apss_channel);
case 1:
- val = occ_get_powr_avg(&power->accumulator,
- &power->update_tag);
+ val = occ_get_powr_avg(get_unaligned_be64(&power->accumulator),
+ get_unaligned_be32(&power->update_tag));
break;
case 2:
val = (u64)get_unaligned_be32(&power->update_tag) *
@@ -527,8 +525,8 @@ static ssize_t occ_show_power_a0(struct device *dev,
return sysfs_emit(buf, "%u_system\n",
get_unaligned_be32(&power->sensor_id));
case 1:
- val = occ_get_powr_avg(&power->system.accumulator,
- &power->system.update_tag);
+ val = occ_get_powr_avg(get_unaligned_be64(&power->system.accumulator),
+ get_unaligned_be32(&power->system.update_tag));
break;
case 2:
val = (u64)get_unaligned_be32(&power->system.update_tag) *
@@ -541,8 +539,8 @@ static ssize_t occ_show_power_a0(struct device *dev,
return sysfs_emit(buf, "%u_proc\n",
get_unaligned_be32(&power->sensor_id));
case 5:
- val = occ_get_powr_avg(&power->proc.accumulator,
- &power->proc.update_tag);
+ val = occ_get_powr_avg(get_unaligned_be64(&power->proc.accumulator),
+ get_unaligned_be32(&power->proc.update_tag));
break;
case 6:
val = (u64)get_unaligned_be32(&power->proc.update_tag) *
@@ -555,8 +553,8 @@ static ssize_t occ_show_power_a0(struct device *dev,
return sysfs_emit(buf, "%u_vdd\n",
get_unaligned_be32(&power->sensor_id));
case 9:
- val = occ_get_powr_avg(&power->vdd.accumulator,
- &power->vdd.update_tag);
+ val = occ_get_powr_avg(get_unaligned_be64(&power->vdd.accumulator),
+ get_unaligned_be32(&power->vdd.update_tag));
break;
case 10:
val = (u64)get_unaligned_be32(&power->vdd.update_tag) *
@@ -569,8 +567,8 @@ static ssize_t occ_show_power_a0(struct device *dev,
return sysfs_emit(buf, "%u_vdn\n",
get_unaligned_be32(&power->sensor_id));
case 13:
- val = occ_get_powr_avg(&power->vdn.accumulator,
- &power->vdn.update_tag);
+ val = occ_get_powr_avg(get_unaligned_be64(&power->vdn.accumulator),
+ get_unaligned_be32(&power->vdn.update_tag));
break;
case 14:
val = (u64)get_unaligned_be32(&power->vdn.update_tag) *
@@ -747,29 +745,30 @@ static ssize_t occ_show_extended(struct device *dev,
}
/*
- * Some helper macros to make it easier to define an occ_attribute. Since these
- * are dynamically allocated, we shouldn't use the existing kernel macros which
+ * A helper to make it easier to define an occ_attribute. Since these
+ * are dynamically allocated, we cannot use the existing kernel macros which
* stringify the name argument.
*/
-#define ATTR_OCC(_name, _mode, _show, _store) { \
- .attr = { \
- .name = _name, \
- .mode = VERIFY_OCTAL_PERMISSIONS(_mode), \
- }, \
- .show = _show, \
- .store = _store, \
-}
-
-#define SENSOR_ATTR_OCC(_name, _mode, _show, _store, _nr, _index) { \
- .dev_attr = ATTR_OCC(_name, _mode, _show, _store), \
- .index = _index, \
- .nr = _nr, \
+static void occ_init_attribute(struct occ_attribute *attr, int mode,
+ ssize_t (*show)(struct device *dev, struct device_attribute *attr, char *buf),
+ ssize_t (*store)(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count),
+ int nr, int index, const char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ vsnprintf(attr->name, sizeof(attr->name), fmt, args);
+ va_end(args);
+
+ attr->sensor.dev_attr.attr.name = attr->name;
+ attr->sensor.dev_attr.attr.mode = mode;
+ attr->sensor.dev_attr.show = show;
+ attr->sensor.dev_attr.store = store;
+ attr->sensor.index = index;
+ attr->sensor.nr = nr;
}
-#define OCC_INIT_ATTR(_name, _mode, _show, _store, _nr, _index) \
- ((struct sensor_device_attribute_2) \
- SENSOR_ATTR_OCC(_name, _mode, _show, _store, _nr, _index))
-
/*
* Allocate and instatiate sensor_device_attribute_2s. It's most efficient to
* use our own instead of the built-in hwmon attribute types.
@@ -855,14 +854,15 @@ static int occ_setup_sensor_attrs(struct occ *occ)
sensors->extended.num_sensors = 0;
}
- occ->attrs = devm_kzalloc(dev, sizeof(*occ->attrs) * num_attrs,
+ occ->attrs = devm_kcalloc(dev, num_attrs, sizeof(*occ->attrs),
GFP_KERNEL);
if (!occ->attrs)
return -ENOMEM;
/* null-terminated list */
- occ->group.attrs = devm_kzalloc(dev, sizeof(*occ->group.attrs) *
- num_attrs + 1, GFP_KERNEL);
+ occ->group.attrs = devm_kcalloc(dev, num_attrs + 1,
+ sizeof(*occ->group.attrs),
+ GFP_KERNEL);
if (!occ->group.attrs)
return -ENOMEM;
@@ -872,43 +872,33 @@ static int occ_setup_sensor_attrs(struct occ *occ)
s = i + 1;
temp = ((struct temp_sensor_2 *)sensors->temp.data) + i;
- snprintf(attr->name, sizeof(attr->name), "temp%d_label", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_temp, NULL,
- 0, i);
+ occ_init_attribute(attr, 0444, show_temp, NULL,
+ 0, i, "temp%d_label", s);
attr++;
if (sensors->temp.version == 2 &&
temp->fru_type == OCC_FRU_TYPE_VRM) {
- snprintf(attr->name, sizeof(attr->name),
- "temp%d_alarm", s);
+ occ_init_attribute(attr, 0444, show_temp, NULL,
+ 1, i, "temp%d_alarm", s);
} else {
- snprintf(attr->name, sizeof(attr->name),
- "temp%d_input", s);
+ occ_init_attribute(attr, 0444, show_temp, NULL,
+ 1, i, "temp%d_input", s);
}
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_temp, NULL,
- 1, i);
attr++;
if (sensors->temp.version > 1) {
- snprintf(attr->name, sizeof(attr->name),
- "temp%d_fru_type", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
- show_temp, NULL, 2, i);
+ occ_init_attribute(attr, 0444, show_temp, NULL,
+ 2, i, "temp%d_fru_type", s);
attr++;
- snprintf(attr->name, sizeof(attr->name),
- "temp%d_fault", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
- show_temp, NULL, 3, i);
+ occ_init_attribute(attr, 0444, show_temp, NULL,
+ 3, i, "temp%d_fault", s);
attr++;
if (sensors->temp.version == 0x10) {
- snprintf(attr->name, sizeof(attr->name),
- "temp%d_max", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
- show_temp, NULL,
- 4, i);
+ occ_init_attribute(attr, 0444, show_temp, NULL,
+ 4, i, "temp%d_max", s);
attr++;
}
}
@@ -917,14 +907,12 @@ static int occ_setup_sensor_attrs(struct occ *occ)
for (i = 0; i < sensors->freq.num_sensors; ++i) {
s = i + 1;
- snprintf(attr->name, sizeof(attr->name), "freq%d_label", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_freq, NULL,
- 0, i);
+ occ_init_attribute(attr, 0444, show_freq, NULL,
+ 0, i, "freq%d_label", s);
attr++;
- snprintf(attr->name, sizeof(attr->name), "freq%d_input", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_freq, NULL,
- 1, i);
+ occ_init_attribute(attr, 0444, show_freq, NULL,
+ 1, i, "freq%d_input", s);
attr++;
}
@@ -940,32 +928,24 @@ static int occ_setup_sensor_attrs(struct occ *occ)
s = (i * 4) + 1;
for (j = 0; j < 4; ++j) {
- snprintf(attr->name, sizeof(attr->name),
- "power%d_label", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
- show_power, NULL,
- nr++, i);
+ occ_init_attribute(attr, 0444, show_power,
+ NULL, nr++, i,
+ "power%d_label", s);
attr++;
- snprintf(attr->name, sizeof(attr->name),
- "power%d_average", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
- show_power, NULL,
- nr++, i);
+ occ_init_attribute(attr, 0444, show_power,
+ NULL, nr++, i,
+ "power%d_average", s);
attr++;
- snprintf(attr->name, sizeof(attr->name),
- "power%d_average_interval", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
- show_power, NULL,
- nr++, i);
+ occ_init_attribute(attr, 0444, show_power,
+ NULL, nr++, i,
+ "power%d_average_interval", s);
attr++;
- snprintf(attr->name, sizeof(attr->name),
- "power%d_input", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
- show_power, NULL,
- nr++, i);
+ occ_init_attribute(attr, 0444, show_power,
+ NULL, nr++, i,
+ "power%d_input", s);
attr++;
s++;
@@ -977,28 +957,20 @@ static int occ_setup_sensor_attrs(struct occ *occ)
for (i = 0; i < sensors->power.num_sensors; ++i) {
s = i + 1;
- snprintf(attr->name, sizeof(attr->name),
- "power%d_label", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
- show_power, NULL, 0, i);
+ occ_init_attribute(attr, 0444, show_power, NULL,
+ 0, i, "power%d_label", s);
attr++;
- snprintf(attr->name, sizeof(attr->name),
- "power%d_average", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
- show_power, NULL, 1, i);
+ occ_init_attribute(attr, 0444, show_power, NULL,
+ 1, i, "power%d_average", s);
attr++;
- snprintf(attr->name, sizeof(attr->name),
- "power%d_average_interval", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
- show_power, NULL, 2, i);
+ occ_init_attribute(attr, 0444, show_power, NULL,
+ 2, i, "power%d_average_interval", s);
attr++;
- snprintf(attr->name, sizeof(attr->name),
- "power%d_input", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
- show_power, NULL, 3, i);
+ occ_init_attribute(attr, 0444, show_power, NULL,
+ 3, i, "power%d_input", s);
attr++;
}
@@ -1006,56 +978,43 @@ static int occ_setup_sensor_attrs(struct occ *occ)
}
if (sensors->caps.num_sensors >= 1) {
- snprintf(attr->name, sizeof(attr->name), "power%d_label", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL,
- 0, 0);
+ occ_init_attribute(attr, 0444, show_caps, NULL,
+ 0, 0, "power%d_label", s);
attr++;
- snprintf(attr->name, sizeof(attr->name), "power%d_cap", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL,
- 1, 0);
+ occ_init_attribute(attr, 0444, show_caps, NULL,
+ 1, 0, "power%d_cap", s);
attr++;
- snprintf(attr->name, sizeof(attr->name), "power%d_input", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL,
- 2, 0);
+ occ_init_attribute(attr, 0444, show_caps, NULL,
+ 2, 0, "power%d_input", s);
attr++;
- snprintf(attr->name, sizeof(attr->name),
- "power%d_cap_not_redundant", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL,
- 3, 0);
+ occ_init_attribute(attr, 0444, show_caps, NULL,
+ 3, 0, "power%d_cap_not_redundant", s);
attr++;
- snprintf(attr->name, sizeof(attr->name), "power%d_cap_max", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL,
- 4, 0);
+ occ_init_attribute(attr, 0444, show_caps, NULL,
+ 4, 0, "power%d_cap_max", s);
attr++;
- snprintf(attr->name, sizeof(attr->name), "power%d_cap_min", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL,
- 5, 0);
+ occ_init_attribute(attr, 0444, show_caps, NULL,
+ 5, 0, "power%d_cap_min", s);
attr++;
- snprintf(attr->name, sizeof(attr->name), "power%d_cap_user",
- s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0644, show_caps,
- occ_store_caps_user, 6, 0);
+ occ_init_attribute(attr, 0644, show_caps, occ_store_caps_user,
+ 6, 0, "power%d_cap_user", s);
attr++;
if (sensors->caps.version > 1) {
- snprintf(attr->name, sizeof(attr->name),
- "power%d_cap_user_source", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
- show_caps, NULL, 7, 0);
+ occ_init_attribute(attr, 0444, show_caps, NULL,
+ 7, 0, "power%d_cap_user_source", s);
attr++;
if (sensors->caps.version > 2) {
- snprintf(attr->name, sizeof(attr->name),
- "power%d_cap_min_soft", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
- show_caps, NULL,
- 8, 0);
+ occ_init_attribute(attr, 0444, show_caps, NULL,
+ 8, 0,
+ "power%d_cap_min_soft", s);
attr++;
}
}
@@ -1064,19 +1023,16 @@ static int occ_setup_sensor_attrs(struct occ *occ)
for (i = 0; i < sensors->extended.num_sensors; ++i) {
s = i + 1;
- snprintf(attr->name, sizeof(attr->name), "extn%d_label", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
- occ_show_extended, NULL, 0, i);
+ occ_init_attribute(attr, 0444, occ_show_extended, NULL,
+ 0, i, "extn%d_label", s);
attr++;
- snprintf(attr->name, sizeof(attr->name), "extn%d_flags", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
- occ_show_extended, NULL, 1, i);
+ occ_init_attribute(attr, 0444, occ_show_extended, NULL,
+ 1, i, "extn%d_flags", s);
attr++;
- snprintf(attr->name, sizeof(attr->name), "extn%d_input", s);
- attr->sensor = OCC_INIT_ATTR(attr->name, 0444,
- occ_show_extended, NULL, 2, i);
+ occ_init_attribute(attr, 0444, occ_show_extended, NULL,
+ 2, i, "extn%d_input", s);
attr++;
}
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
index eddf25b90ca8..6544d27e4419 100644
--- a/drivers/i2c/algos/i2c-algo-bit.c
+++ b/drivers/i2c/algos/i2c-algo-bit.c
@@ -619,8 +619,8 @@ static u32 bit_func(struct i2c_adapter *adap)
/* -----exported algorithm data: ------------------------------------- */
const struct i2c_algorithm i2c_bit_algo = {
- .master_xfer = bit_xfer,
- .master_xfer_atomic = bit_xfer_atomic,
+ .xfer = bit_xfer,
+ .xfer_atomic = bit_xfer_atomic,
.functionality = bit_func,
};
EXPORT_SYMBOL(i2c_bit_algo);
diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c
index 384af88e58ad..74b66aec33d4 100644
--- a/drivers/i2c/algos/i2c-algo-pca.c
+++ b/drivers/i2c/algos/i2c-algo-pca.c
@@ -361,8 +361,8 @@ static u32 pca_func(struct i2c_adapter *adap)
}
static const struct i2c_algorithm pca_algo = {
- .master_xfer = pca_xfer,
- .functionality = pca_func,
+ .xfer = pca_xfer,
+ .functionality = pca_func,
};
static unsigned int pca_probe_chip(struct i2c_adapter *adap)
diff --git a/drivers/i2c/algos/i2c-algo-pcf.c b/drivers/i2c/algos/i2c-algo-pcf.c
index 740066ceaea3..fd563e845d4b 100644
--- a/drivers/i2c/algos/i2c-algo-pcf.c
+++ b/drivers/i2c/algos/i2c-algo-pcf.c
@@ -389,8 +389,8 @@ static u32 pcf_func(struct i2c_adapter *adap)
/* exported algorithm data: */
static const struct i2c_algorithm pcf_algo = {
- .master_xfer = pcf_xfer,
- .functionality = pcf_func,
+ .xfer = pcf_xfer,
+ .functionality = pcf_func,
};
/*
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 48c5ab832009..0a4ecccd1851 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -1530,7 +1530,7 @@ config I2C_XGENE_SLIMPRO
config SCx200_ACB
tristate "Geode ACCESS.bus support"
- depends on X86_32 && PCI
+ depends on X86_32 && PCI && HAS_IOPORT
help
Enable the use of the ACCESS.bus controllers on the Geode SCx200 and
SC1100 processors and the CS5535 and CS5536 Geode companion devices.
diff --git a/drivers/i2c/busses/i2c-amd-mp2-plat.c b/drivers/i2c/busses/i2c-amd-mp2-plat.c
index d9dd0e475d1a..188e24cc4d35 100644
--- a/drivers/i2c/busses/i2c-amd-mp2-plat.c
+++ b/drivers/i2c/busses/i2c-amd-mp2-plat.c
@@ -179,7 +179,7 @@ static u32 i2c_amd_func(struct i2c_adapter *a)
}
static const struct i2c_algorithm i2c_amd_algorithm = {
- .master_xfer = i2c_amd_xfer,
+ .xfer = i2c_amd_xfer,
.functionality = i2c_amd_func,
};
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
index 1550d3d552ae..a26b74c71206 100644
--- a/drivers/i2c/busses/i2c-aspeed.c
+++ b/drivers/i2c/busses/i2c-aspeed.c
@@ -814,11 +814,11 @@ static int aspeed_i2c_unreg_slave(struct i2c_client *client)
#endif /* CONFIG_I2C_SLAVE */
static const struct i2c_algorithm aspeed_i2c_algo = {
- .master_xfer = aspeed_i2c_master_xfer,
- .functionality = aspeed_i2c_functionality,
+ .xfer = aspeed_i2c_master_xfer,
+ .functionality = aspeed_i2c_functionality,
#if IS_ENABLED(CONFIG_I2C_SLAVE)
- .reg_slave = aspeed_i2c_reg_slave,
- .unreg_slave = aspeed_i2c_unreg_slave,
+ .reg_slave = aspeed_i2c_reg_slave,
+ .unreg_slave = aspeed_i2c_unreg_slave,
#endif /* CONFIG_I2C_SLAVE */
};
diff --git a/drivers/i2c/busses/i2c-at91-master.c b/drivers/i2c/busses/i2c-at91-master.c
index 374fc50bb205..59795c1c24ff 100644
--- a/drivers/i2c/busses/i2c-at91-master.c
+++ b/drivers/i2c/busses/i2c-at91-master.c
@@ -739,8 +739,8 @@ static u32 at91_twi_func(struct i2c_adapter *adapter)
}
static const struct i2c_algorithm at91_twi_algorithm = {
- .master_xfer = at91_twi_xfer,
- .functionality = at91_twi_func,
+ .xfer = at91_twi_xfer,
+ .functionality = at91_twi_func,
};
static int at91_twi_configure_dma(struct at91_twi_dev *dev, u32 phy_addr)
diff --git a/drivers/i2c/busses/i2c-axxia.c b/drivers/i2c/busses/i2c-axxia.c
index 50030256cd85..0555eeb6903a 100644
--- a/drivers/i2c/busses/i2c-axxia.c
+++ b/drivers/i2c/busses/i2c-axxia.c
@@ -706,7 +706,7 @@ static int axxia_i2c_unreg_slave(struct i2c_client *slave)
}
static const struct i2c_algorithm axxia_i2c_algo = {
- .master_xfer = axxia_i2c_xfer,
+ .xfer = axxia_i2c_xfer,
.functionality = axxia_i2c_func,
.reg_slave = axxia_i2c_reg_slave,
.unreg_slave = axxia_i2c_unreg_slave,
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index 63bc3c8f49d3..e418a4f23f15 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -1041,7 +1041,7 @@ static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave)
}
static struct i2c_algorithm bcm_iproc_algo = {
- .master_xfer = bcm_iproc_i2c_xfer,
+ .xfer = bcm_iproc_i2c_xfer,
.functionality = bcm_iproc_i2c_functionality,
.reg_slave = bcm_iproc_i2c_reg_slave,
.unreg_slave = bcm_iproc_i2c_unreg_slave,
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 8df63aaf2a80..697d095afbe4 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -1231,12 +1231,12 @@ static int cdns_unreg_slave(struct i2c_client *slave)
#endif
static const struct i2c_algorithm cdns_i2c_algo = {
- .master_xfer = cdns_i2c_master_xfer,
- .master_xfer_atomic = cdns_i2c_master_xfer_atomic,
- .functionality = cdns_i2c_func,
+ .xfer = cdns_i2c_master_xfer,
+ .xfer_atomic = cdns_i2c_master_xfer_atomic,
+ .functionality = cdns_i2c_func,
#if IS_ENABLED(CONFIG_I2C_SLAVE)
- .reg_slave = cdns_reg_slave,
- .unreg_slave = cdns_unreg_slave,
+ .reg_slave = cdns_reg_slave,
+ .unreg_slave = cdns_unreg_slave,
#endif
};
diff --git a/drivers/i2c/busses/i2c-cgbc.c b/drivers/i2c/busses/i2c-cgbc.c
index f054d167ac47..25a74fa51aa0 100644
--- a/drivers/i2c/busses/i2c-cgbc.c
+++ b/drivers/i2c/busses/i2c-cgbc.c
@@ -331,8 +331,8 @@ static u32 cgbc_i2c_func(struct i2c_adapter *adap)
}
static const struct i2c_algorithm cgbc_i2c_algorithm = {
- .master_xfer = cgbc_i2c_xfer,
- .functionality = cgbc_i2c_func,
+ .xfer = cgbc_i2c_xfer,
+ .functionality = cgbc_i2c_func,
};
static struct i2c_algo_cgbc_data cgbc_i2c_algo_data[] = {
diff --git a/drivers/i2c/busses/i2c-designware-amdisp.c b/drivers/i2c/busses/i2c-designware-amdisp.c
index ad6f08338124..450793d5f839 100644
--- a/drivers/i2c/busses/i2c-designware-amdisp.c
+++ b/drivers/i2c/busses/i2c-designware-amdisp.c
@@ -8,6 +8,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/soc/amd/isp4_misc.h>
#include "i2c-designware-core.h"
@@ -62,6 +63,7 @@ static int amd_isp_dw_i2c_plat_probe(struct platform_device *pdev)
adap = &isp_i2c_dev->adapter;
adap->owner = THIS_MODULE;
+ scnprintf(adap->name, sizeof(adap->name), AMDISP_I2C_ADAP_NAME);
ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev));
adap->dev.of_node = pdev->dev.of_node;
/* use dynamically allocated adapter id */
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index c5394229b77f..9d7d9e47564a 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -1042,8 +1042,9 @@ int i2c_dw_probe_master(struct dw_i2c_dev *dev)
if (ret)
return ret;
- snprintf(adap->name, sizeof(adap->name),
- "Synopsys DesignWare I2C adapter");
+ if (!adap->name[0])
+ scnprintf(adap->name, sizeof(adap->name),
+ "Synopsys DesignWare I2C adapter");
adap->retries = 3;
adap->algo = &i2c_dw_algo;
adap->quirks = &i2c_dw_quirks;
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index efdaddf99f9e..27ea3c130a16 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -690,7 +690,7 @@ static u32 pch_i2c_func(struct i2c_adapter *adap)
}
static const struct i2c_algorithm pch_algorithm = {
- .master_xfer = pch_i2c_xfer,
+ .xfer = pch_i2c_xfer,
.functionality = pch_i2c_func
};
diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c
index 2512cef8e2a2..ece019b3d066 100644
--- a/drivers/i2c/busses/i2c-emev2.c
+++ b/drivers/i2c/busses/i2c-emev2.c
@@ -351,10 +351,10 @@ static int em_i2c_unreg_slave(struct i2c_client *slave)
}
static const struct i2c_algorithm em_i2c_algo = {
- .master_xfer = em_i2c_xfer,
+ .xfer = em_i2c_xfer,
.functionality = em_i2c_func,
- .reg_slave = em_i2c_reg_slave,
- .unreg_slave = em_i2c_unreg_slave,
+ .reg_slave = em_i2c_reg_slave,
+ .unreg_slave = em_i2c_unreg_slave,
};
static int em_i2c_probe(struct platform_device *pdev)
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index 02f24479aa07..9c1c5f3c09f6 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -879,9 +879,9 @@ static u32 exynos5_i2c_func(struct i2c_adapter *adap)
}
static const struct i2c_algorithm exynos5_i2c_algorithm = {
- .master_xfer = exynos5_i2c_xfer,
- .master_xfer_atomic = exynos5_i2c_xfer_atomic,
- .functionality = exynos5_i2c_func,
+ .xfer = exynos5_i2c_xfer,
+ .xfer_atomic = exynos5_i2c_xfer_atomic,
+ .functionality = exynos5_i2c_func,
};
static int exynos5_i2c_probe(struct platform_device *pdev)
diff --git a/drivers/i2c/busses/i2c-gxp.c b/drivers/i2c/busses/i2c-gxp.c
index 0fc39caa6c87..2d117e7e3cb6 100644
--- a/drivers/i2c/busses/i2c-gxp.c
+++ b/drivers/i2c/busses/i2c-gxp.c
@@ -184,11 +184,11 @@ static int gxp_i2c_unreg_slave(struct i2c_client *slave)
#endif
static const struct i2c_algorithm gxp_i2c_algo = {
- .master_xfer = gxp_i2c_master_xfer,
+ .xfer = gxp_i2c_master_xfer,
.functionality = gxp_i2c_func,
#if IS_ENABLED(CONFIG_I2C_SLAVE)
- .reg_slave = gxp_i2c_reg_slave,
- .unreg_slave = gxp_i2c_unreg_slave,
+ .reg_slave = gxp_i2c_reg_slave,
+ .unreg_slave = gxp_i2c_unreg_slave,
#endif
};
diff --git a/drivers/i2c/busses/i2c-img-scb.c b/drivers/i2c/busses/i2c-img-scb.c
index 3278707bb885..a454f9f25146 100644
--- a/drivers/i2c/busses/i2c-img-scb.c
+++ b/drivers/i2c/busses/i2c-img-scb.c
@@ -1143,7 +1143,7 @@ static u32 img_i2c_func(struct i2c_adapter *adap)
}
static const struct i2c_algorithm img_i2c_algo = {
- .master_xfer = img_i2c_xfer,
+ .xfer = img_i2c_xfer,
.functionality = img_i2c_func,
};
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
index 342d47e67586..064bc83840a6 100644
--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -1268,10 +1268,10 @@ static u32 lpi2c_imx_func(struct i2c_adapter *adapter)
}
static const struct i2c_algorithm lpi2c_imx_algo = {
- .master_xfer = lpi2c_imx_xfer,
- .functionality = lpi2c_imx_func,
- .reg_target = lpi2c_imx_register_target,
- .unreg_target = lpi2c_imx_unregister_target,
+ .xfer = lpi2c_imx_xfer,
+ .functionality = lpi2c_imx_func,
+ .reg_target = lpi2c_imx_register_target,
+ .unreg_target = lpi2c_imx_unregister_target,
};
static const struct of_device_id lpi2c_imx_of_match[] = {
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index de01dfecb16e..205cc132fdec 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -1008,7 +1008,7 @@ static inline int i2c_imx_isr_read(struct imx_i2c_struct *i2c_imx)
/* setup bus to read data */
temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
temp &= ~I2CR_MTX;
- if (i2c_imx->msg->len - 1)
+ if ((i2c_imx->msg->len - 1) || (i2c_imx->msg->flags & I2C_M_RECV_LEN))
temp &= ~I2CR_TXAK;
imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
@@ -1063,6 +1063,7 @@ static inline void i2c_imx_isr_read_block_data_len(struct imx_i2c_struct *i2c_im
wake_up(&i2c_imx->queue);
}
i2c_imx->msg->len += len;
+ i2c_imx->msg->buf[i2c_imx->msg_buf_idx++] = len;
}
static irqreturn_t i2c_imx_master_isr(struct imx_i2c_struct *i2c_imx, unsigned int status)
@@ -1692,11 +1693,11 @@ static u32 i2c_imx_func(struct i2c_adapter *adapter)
}
static const struct i2c_algorithm i2c_imx_algo = {
- .master_xfer = i2c_imx_xfer,
- .master_xfer_atomic = i2c_imx_xfer_atomic,
+ .xfer = i2c_imx_xfer,
+ .xfer_atomic = i2c_imx_xfer_atomic,
.functionality = i2c_imx_func,
- .reg_slave = i2c_imx_reg_slave,
- .unreg_slave = i2c_imx_unreg_slave,
+ .reg_slave = i2c_imx_reg_slave,
+ .unreg_slave = i2c_imx_unreg_slave,
};
static int i2c_imx_probe(struct platform_device *pdev)
diff --git a/drivers/i2c/busses/i2c-k1.c b/drivers/i2c/busses/i2c-k1.c
index 5965b4cf6220..b68a21fff0b5 100644
--- a/drivers/i2c/busses/i2c-k1.c
+++ b/drivers/i2c/busses/i2c-k1.c
@@ -477,7 +477,7 @@ static int spacemit_i2c_xfer(struct i2c_adapter *adapt, struct i2c_msg *msgs, in
ret = spacemit_i2c_wait_bus_idle(i2c);
if (!ret)
- spacemit_i2c_xfer_msg(i2c);
+ ret = spacemit_i2c_xfer_msg(i2c);
else if (ret < 0)
dev_dbg(i2c->dev, "i2c transfer error: %d\n", ret);
else
diff --git a/drivers/i2c/busses/i2c-keba.c b/drivers/i2c/busses/i2c-keba.c
index 7b9ed2592f5b..9420c8b342b5 100644
--- a/drivers/i2c/busses/i2c-keba.c
+++ b/drivers/i2c/busses/i2c-keba.c
@@ -500,7 +500,7 @@ static u32 ki2c_func(struct i2c_adapter *adap)
}
static const struct i2c_algorithm ki2c_algo = {
- .master_xfer = ki2c_xfer,
+ .xfer = ki2c_xfer,
.functionality = ki2c_func,
};
diff --git a/drivers/i2c/busses/i2c-mchp-pci1xxxx.c b/drivers/i2c/busses/i2c-mchp-pci1xxxx.c
index 5ef136c3ecb1..bc0f1a0c8ee1 100644
--- a/drivers/i2c/busses/i2c-mchp-pci1xxxx.c
+++ b/drivers/i2c/busses/i2c-mchp-pci1xxxx.c
@@ -1048,7 +1048,7 @@ static u32 pci1xxxx_i2c_get_funcs(struct i2c_adapter *adap)
}
static const struct i2c_algorithm pci1xxxx_i2c_algo = {
- .master_xfer = pci1xxxx_i2c_xfer,
+ .xfer = pci1xxxx_i2c_xfer,
.functionality = pci1xxxx_i2c_get_funcs,
};
diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c
index e1d69537353b..0d9032953e48 100644
--- a/drivers/i2c/busses/i2c-meson.c
+++ b/drivers/i2c/busses/i2c-meson.c
@@ -448,8 +448,8 @@ static u32 meson_i2c_func(struct i2c_adapter *adap)
}
static const struct i2c_algorithm meson_i2c_algorithm = {
- .master_xfer = meson_i2c_xfer,
- .master_xfer_atomic = meson_i2c_xfer_atomic,
+ .xfer = meson_i2c_xfer,
+ .xfer_atomic = meson_i2c_xfer_atomic,
.functionality = meson_i2c_func,
};
diff --git a/drivers/i2c/busses/i2c-microchip-corei2c.c b/drivers/i2c/busses/i2c-microchip-corei2c.c
index 492bf4c34722..f173bda1c98c 100644
--- a/drivers/i2c/busses/i2c-microchip-corei2c.c
+++ b/drivers/i2c/busses/i2c-microchip-corei2c.c
@@ -526,7 +526,7 @@ static int mchp_corei2c_smbus_xfer(struct i2c_adapter *adap, u16 addr, unsigned
}
static const struct i2c_algorithm mchp_corei2c_algo = {
- .master_xfer = mchp_corei2c_xfer,
+ .xfer = mchp_corei2c_xfer,
.functionality = mchp_corei2c_func,
.smbus_xfer = mchp_corei2c_smbus_xfer,
};
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 5bd342047d59..ab456c3717db 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -1342,7 +1342,7 @@ static u32 mtk_i2c_functionality(struct i2c_adapter *adap)
}
static const struct i2c_algorithm mtk_i2c_algorithm = {
- .master_xfer = mtk_i2c_transfer,
+ .xfer = mtk_i2c_transfer,
.functionality = mtk_i2c_functionality,
};
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index ad62d56b2186..08c9091a1e35 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -687,7 +687,7 @@ static irqreturn_t mxs_i2c_isr(int this_irq, void *dev_id)
}
static const struct i2c_algorithm mxs_i2c_algo = {
- .master_xfer = mxs_i2c_xfer,
+ .xfer = mxs_i2c_xfer,
.functionality = mxs_i2c_func,
};
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index d2877e4cc28d..19b648fc094d 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -996,8 +996,8 @@ static unsigned int nmk_i2c_functionality(struct i2c_adapter *adap)
}
static const struct i2c_algorithm nmk_i2c_algo = {
- .master_xfer = nmk_i2c_xfer,
- .functionality = nmk_i2c_functionality
+ .xfer = nmk_i2c_xfer,
+ .functionality = nmk_i2c_functionality
};
static void nmk_i2c_of_probe(struct device_node *np,
diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c
index 892e2d2988a7..8b7e15240fb0 100644
--- a/drivers/i2c/busses/i2c-npcm7xx.c
+++ b/drivers/i2c/busses/i2c-npcm7xx.c
@@ -2470,11 +2470,11 @@ static const struct i2c_adapter_quirks npcm_i2c_quirks = {
};
static const struct i2c_algorithm npcm_i2c_algo = {
- .master_xfer = npcm_i2c_master_xfer,
+ .xfer = npcm_i2c_master_xfer,
.functionality = npcm_i2c_functionality,
#if IS_ENABLED(CONFIG_I2C_SLAVE)
- .reg_slave = npcm_i2c_reg_slave,
- .unreg_slave = npcm_i2c_unreg_slave,
+ .reg_slave = npcm_i2c_reg_slave,
+ .unreg_slave = npcm_i2c_unreg_slave,
#endif
};
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 876791d20ed5..8b01df3cc8e9 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1201,9 +1201,9 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
}
static const struct i2c_algorithm omap_i2c_algo = {
- .master_xfer = omap_i2c_xfer_irq,
- .master_xfer_atomic = omap_i2c_xfer_polling,
- .functionality = omap_i2c_func,
+ .xfer = omap_i2c_xfer_irq,
+ .xfer_atomic = omap_i2c_xfer_polling,
+ .functionality = omap_i2c_func,
};
static const struct i2c_adapter_quirks omap_i2c_quirks = {
@@ -1461,13 +1461,13 @@ omap_i2c_probe(struct platform_device *pdev)
if (IS_ERR(mux_state)) {
r = PTR_ERR(mux_state);
dev_dbg(&pdev->dev, "failed to get I2C mux: %d\n", r);
- goto err_disable_pm;
+ goto err_put_pm;
}
omap->mux_state = mux_state;
r = mux_state_select(omap->mux_state);
if (r) {
dev_err(&pdev->dev, "failed to select I2C mux: %d\n", r);
- goto err_disable_pm;
+ goto err_put_pm;
}
}
@@ -1515,6 +1515,9 @@ omap_i2c_probe(struct platform_device *pdev)
err_unuse_clocks:
omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0);
+ if (omap->mux_state)
+ mux_state_deselect(omap->mux_state);
+err_put_pm:
pm_runtime_dont_use_autosuspend(omap->dev);
pm_runtime_put_sync(omap->dev);
err_disable_pm:
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index 9a1af5bbd604..8daa0008bd05 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -580,7 +580,7 @@ static u32 i2c_pnx_func(struct i2c_adapter *adapter)
}
static const struct i2c_algorithm pnx_algorithm = {
- .master_xfer = i2c_pnx_xfer,
+ .xfer = i2c_pnx_xfer,
.functionality = i2c_pnx_func,
};
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 4415a29f749b..968a8b8794da 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -1154,11 +1154,11 @@ static u32 i2c_pxa_functionality(struct i2c_adapter *adap)
}
static const struct i2c_algorithm i2c_pxa_algorithm = {
- .master_xfer = i2c_pxa_xfer,
- .functionality = i2c_pxa_functionality,
+ .xfer = i2c_pxa_xfer,
+ .functionality = i2c_pxa_functionality,
#ifdef CONFIG_I2C_PXA_SLAVE
- .reg_slave = i2c_pxa_slave_reg,
- .unreg_slave = i2c_pxa_slave_unreg,
+ .reg_slave = i2c_pxa_slave_reg,
+ .unreg_slave = i2c_pxa_slave_unreg,
#endif
};
@@ -1244,11 +1244,11 @@ static int i2c_pxa_pio_xfer(struct i2c_adapter *adap,
}
static const struct i2c_algorithm i2c_pxa_pio_algorithm = {
- .master_xfer = i2c_pxa_pio_xfer,
- .functionality = i2c_pxa_functionality,
+ .xfer = i2c_pxa_pio_xfer,
+ .functionality = i2c_pxa_functionality,
#ifdef CONFIG_I2C_PXA_SLAVE
- .reg_slave = i2c_pxa_slave_reg,
- .unreg_slave = i2c_pxa_slave_unreg,
+ .reg_slave = i2c_pxa_slave_reg,
+ .unreg_slave = i2c_pxa_slave_unreg,
#endif
};
diff --git a/drivers/i2c/busses/i2c-qcom-cci.c b/drivers/i2c/busses/i2c-qcom-cci.c
index 05b73326afd4..a3afa11a71a1 100644
--- a/drivers/i2c/busses/i2c-qcom-cci.c
+++ b/drivers/i2c/busses/i2c-qcom-cci.c
@@ -462,8 +462,8 @@ static u32 cci_func(struct i2c_adapter *adap)
}
static const struct i2c_algorithm cci_algo = {
- .master_xfer = cci_xfer,
- .functionality = cci_func,
+ .xfer = cci_xfer,
+ .functionality = cci_func,
};
static int cci_enable_clocks(struct cci *cci)
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index ccea575fb783..13889f52b6f7 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -727,8 +727,8 @@ static u32 geni_i2c_func(struct i2c_adapter *adap)
}
static const struct i2c_algorithm geni_i2c_algo = {
- .master_xfer = geni_i2c_xfer,
- .functionality = geni_i2c_func,
+ .xfer = geni_i2c_xfer,
+ .functionality = geni_i2c_func,
};
#ifdef CONFIG_ACPI
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index 3a36d682ed57..6059f585843e 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -1634,13 +1634,13 @@ static u32 qup_i2c_func(struct i2c_adapter *adap)
}
static const struct i2c_algorithm qup_i2c_algo = {
- .master_xfer = qup_i2c_xfer,
- .functionality = qup_i2c_func,
+ .xfer = qup_i2c_xfer,
+ .functionality = qup_i2c_func,
};
static const struct i2c_algorithm qup_i2c_algo_v2 = {
- .master_xfer = qup_i2c_xfer_v2,
- .functionality = qup_i2c_func,
+ .xfer = qup_i2c_xfer_v2,
+ .functionality = qup_i2c_func,
};
/*
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 5693a38da7b5..d51884ab99f4 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -1084,11 +1084,11 @@ static u32 rcar_i2c_func(struct i2c_adapter *adap)
}
static const struct i2c_algorithm rcar_i2c_algo = {
- .master_xfer = rcar_i2c_master_xfer,
- .master_xfer_atomic = rcar_i2c_master_xfer_atomic,
- .functionality = rcar_i2c_func,
- .reg_slave = rcar_reg_slave,
- .unreg_slave = rcar_unreg_slave,
+ .xfer = rcar_i2c_master_xfer,
+ .xfer_atomic = rcar_i2c_master_xfer_atomic,
+ .functionality = rcar_i2c_func,
+ .reg_slave = rcar_reg_slave,
+ .unreg_slave = rcar_unreg_slave,
};
static const struct i2c_adapter_quirks rcar_i2c_quirks = {
diff --git a/drivers/i2c/busses/i2c-robotfuzz-osif.c b/drivers/i2c/busses/i2c-robotfuzz-osif.c
index 80d45079b763..e0a76fb5bc31 100644
--- a/drivers/i2c/busses/i2c-robotfuzz-osif.c
+++ b/drivers/i2c/busses/i2c-robotfuzz-osif.c
@@ -111,6 +111,11 @@ static u32 osif_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
+/* prevent invalid 0-length usb_control_msg */
+static const struct i2c_adapter_quirks osif_quirks = {
+ .flags = I2C_AQ_NO_ZERO_LEN_READ,
+};
+
static const struct i2c_algorithm osif_algorithm = {
.xfer = osif_xfer,
.functionality = osif_func,
@@ -143,6 +148,7 @@ static int osif_probe(struct usb_interface *interface,
priv->adapter.owner = THIS_MODULE;
priv->adapter.class = I2C_CLASS_HWMON;
+ priv->adapter.quirks = &osif_quirks;
priv->adapter.algo = &osif_algorithm;
priv->adapter.algo_data = priv;
snprintf(priv->adapter.name, sizeof(priv->adapter.name),
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 0f3cf500df68..f4fa4703acbd 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -800,9 +800,9 @@ static u32 s3c24xx_i2c_func(struct i2c_adapter *adap)
/* i2c bus registration info */
static const struct i2c_algorithm s3c24xx_i2c_algorithm = {
- .master_xfer = s3c24xx_i2c_xfer,
- .master_xfer_atomic = s3c24xx_i2c_xfer_atomic,
- .functionality = s3c24xx_i2c_func,
+ .xfer = s3c24xx_i2c_xfer,
+ .xfer_atomic = s3c24xx_i2c_xfer_atomic,
+ .functionality = s3c24xx_i2c_func,
};
/*
diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
index 620f12596763..43f33988b98f 100644
--- a/drivers/i2c/busses/i2c-sh7760.c
+++ b/drivers/i2c/busses/i2c-sh7760.c
@@ -379,8 +379,8 @@ static u32 sh7760_i2c_func(struct i2c_adapter *adap)
}
static const struct i2c_algorithm sh7760_i2c_algo = {
- .master_xfer = sh7760_i2c_master_xfer,
- .functionality = sh7760_i2c_func,
+ .xfer = sh7760_i2c_master_xfer,
+ .functionality = sh7760_i2c_func,
};
/* calculate CCR register setting for a desired scl clock. SCL clock is
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index adfcee6c9fdc..dae8967f8749 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -740,8 +740,8 @@ static u32 sh_mobile_i2c_func(struct i2c_adapter *adapter)
static const struct i2c_algorithm sh_mobile_i2c_algorithm = {
.functionality = sh_mobile_i2c_func,
- .master_xfer = sh_mobile_i2c_xfer,
- .master_xfer_atomic = sh_mobile_i2c_xfer_atomic,
+ .xfer = sh_mobile_i2c_xfer,
+ .xfer_atomic = sh_mobile_i2c_xfer_atomic,
};
static const struct i2c_adapter_quirks sh_mobile_i2c_quirks = {
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index 973a3a8c6d4a..e4aaeb2262d0 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -2151,8 +2151,8 @@ static u32 stm32f7_i2c_func(struct i2c_adapter *adap)
}
static const struct i2c_algorithm stm32f7_i2c_algo = {
- .master_xfer = stm32f7_i2c_xfer,
- .master_xfer_atomic = stm32f7_i2c_xfer_atomic,
+ .xfer = stm32f7_i2c_xfer,
+ .xfer_atomic = stm32f7_i2c_xfer_atomic,
.smbus_xfer = stm32f7_i2c_smbus_xfer,
.functionality = stm32f7_i2c_func,
.reg_slave = stm32f7_i2c_reg_slave,
diff --git a/drivers/i2c/busses/i2c-synquacer.c b/drivers/i2c/busses/i2c-synquacer.c
index 31f8d08e32a4..1230f51e1624 100644
--- a/drivers/i2c/busses/i2c-synquacer.c
+++ b/drivers/i2c/busses/i2c-synquacer.c
@@ -520,8 +520,8 @@ static u32 synquacer_i2c_functionality(struct i2c_adapter *adap)
}
static const struct i2c_algorithm synquacer_i2c_algo = {
- .master_xfer = synquacer_i2c_xfer,
- .functionality = synquacer_i2c_functionality,
+ .xfer = synquacer_i2c_xfer,
+ .functionality = synquacer_i2c_functionality,
};
static const struct i2c_adapter synquacer_i2c_ops = {
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 049b4d154c23..0862b98007f5 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -1440,9 +1440,9 @@ static u32 tegra_i2c_func(struct i2c_adapter *adap)
}
static const struct i2c_algorithm tegra_i2c_algo = {
- .master_xfer = tegra_i2c_xfer,
- .master_xfer_atomic = tegra_i2c_xfer_atomic,
- .functionality = tegra_i2c_func,
+ .xfer = tegra_i2c_xfer,
+ .xfer_atomic = tegra_i2c_xfer_atomic,
+ .functionality = tegra_i2c_func,
};
/* payload size is only 12 bit */
diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c
index a18eab0992a1..57dfe5f1a7d9 100644
--- a/drivers/i2c/busses/i2c-tiny-usb.c
+++ b/drivers/i2c/busses/i2c-tiny-usb.c
@@ -139,6 +139,11 @@ out:
return ret;
}
+/* prevent invalid 0-length usb_control_msg */
+static const struct i2c_adapter_quirks usb_quirks = {
+ .flags = I2C_AQ_NO_ZERO_LEN_READ,
+};
+
/* This is the actual algorithm we define */
static const struct i2c_algorithm usb_algorithm = {
.xfer = usb_xfer,
@@ -247,6 +252,7 @@ static int i2c_tiny_usb_probe(struct usb_interface *interface,
/* setup i2c adapter description */
dev->adapter.owner = THIS_MODULE;
dev->adapter.class = I2C_CLASS_HWMON;
+ dev->adapter.quirks = &usb_quirks;
dev->adapter.algo = &usb_algorithm;
dev->adapter.algo_data = dev;
snprintf(dev->adapter.name, sizeof(dev->adapter.name),
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 6bc1575cea6c..607026c921d6 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -1398,8 +1398,8 @@ static u32 xiic_func(struct i2c_adapter *adap)
}
static const struct i2c_algorithm xiic_algorithm = {
- .master_xfer = xiic_xfer,
- .master_xfer_atomic = xiic_xfer_atomic,
+ .xfer = xiic_xfer,
+ .xfer_atomic = xiic_xfer_atomic,
.functionality = xiic_func,
};
diff --git a/drivers/i2c/busses/i2c-xlp9xx.c b/drivers/i2c/busses/i2c-xlp9xx.c
index 4d5e49b6321b..ddb1c3e8bc9d 100644
--- a/drivers/i2c/busses/i2c-xlp9xx.c
+++ b/drivers/i2c/busses/i2c-xlp9xx.c
@@ -452,7 +452,7 @@ static u32 xlp9xx_i2c_functionality(struct i2c_adapter *adapter)
}
static const struct i2c_algorithm xlp9xx_i2c_algo = {
- .master_xfer = xlp9xx_i2c_xfer,
+ .xfer = xlp9xx_i2c_xfer,
.functionality = xlp9xx_i2c_functionality,
};
diff --git a/drivers/i2c/i2c-atr.c b/drivers/i2c/i2c-atr.c
index be7d6d41e0b2..dd194476b118 100644
--- a/drivers/i2c/i2c-atr.c
+++ b/drivers/i2c/i2c-atr.c
@@ -738,7 +738,7 @@ struct i2c_atr *i2c_atr_new(struct i2c_adapter *parent, struct device *dev,
atr->flags = flags;
if (parent->algo->master_xfer)
- atr->algo.master_xfer = i2c_atr_master_xfer;
+ atr->algo.xfer = i2c_atr_master_xfer;
if (parent->algo->smbus_xfer)
atr->algo.smbus_xfer = i2c_atr_smbus_xfer;
atr->algo.functionality = i2c_atr_functionality;
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index fda72e8be885..4d8690981a55 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -293,12 +293,12 @@ int i2c_mux_add_adapter(struct i2c_mux_core *muxc,
*/
if (parent->algo->master_xfer) {
if (muxc->mux_locked)
- priv->algo.master_xfer = i2c_mux_master_xfer;
+ priv->algo.xfer = i2c_mux_master_xfer;
else
- priv->algo.master_xfer = __i2c_mux_master_xfer;
+ priv->algo.xfer = __i2c_mux_master_xfer;
}
if (parent->algo->master_xfer_atomic)
- priv->algo.master_xfer_atomic = priv->algo.master_xfer;
+ priv->algo.xfer_atomic = priv->algo.master_xfer;
if (parent->algo->smbus_xfer) {
if (muxc->mux_locked)
diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
index 77a740561fd7..f2a1f4744978 100644
--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
@@ -95,9 +95,9 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne
priv->cur_chan = new_chan;
/* Now fill out current adapter structure. cur_chan must be up to date */
- priv->algo.master_xfer = i2c_demux_master_xfer;
+ priv->algo.xfer = i2c_demux_master_xfer;
if (adap->algo->master_xfer_atomic)
- priv->algo.master_xfer_atomic = i2c_demux_master_xfer;
+ priv->algo.xfer_atomic = i2c_demux_master_xfer;
priv->algo.functionality = i2c_demux_functionality;
snprintf(priv->cur_adap.name, sizeof(priv->cur_adap.name),
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 8ccb483204fa..73747d20df85 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -152,8 +152,8 @@ static __always_inline int __intel_idle(struct cpuidle_device *dev,
int index, bool irqoff)
{
struct cpuidle_state *state = &drv->states[index];
- unsigned long eax = flg2MWAIT(state->flags);
- unsigned long ecx = 1*irqoff; /* break on interrupt flag */
+ unsigned int eax = flg2MWAIT(state->flags);
+ unsigned int ecx = 1*irqoff; /* break on interrupt flag */
mwait_idle_with_hints(eax, ecx);
@@ -226,9 +226,9 @@ static __cpuidle int intel_idle_xstate(struct cpuidle_device *dev,
static __cpuidle int intel_idle_s2idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
- unsigned long ecx = 1; /* break on interrupt flag */
struct cpuidle_state *state = &drv->states[index];
- unsigned long eax = flg2MWAIT(state->flags);
+ unsigned int eax = flg2MWAIT(state->flags);
+ unsigned int ecx = 1; /* break on interrupt flag */
if (state->flags & CPUIDLE_FLAG_INIT_XSTATE)
fpu_idle_fpregs();
@@ -2507,6 +2507,8 @@ static int __init intel_idle_init(void)
pr_debug("Local APIC timer is reliable in %s\n",
boot_cpu_has(X86_FEATURE_ARAT) ? "all C-states" : "C1");
+ arch_cpu_rescan_dead_smt_siblings();
+
return 0;
hp_setup_fail:
@@ -2518,7 +2520,7 @@ init_driver_fail:
return retval;
}
-device_initcall(intel_idle_init);
+subsys_initcall_sync(intel_idle_init);
/*
* We are not really modular, but we used to support that. Meaning we also
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 9979a351577f..81cf3c902e81 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -582,8 +582,8 @@ static int __ib_cache_gid_add(struct ib_device *ib_dev, u32 port,
out_unlock:
mutex_unlock(&table->lock);
if (ret)
- pr_warn("%s: unable to add gid %pI6 error=%d\n",
- __func__, gid->raw, ret);
+ pr_warn_ratelimited("%s: unable to add gid %pI6 error=%d\n",
+ __func__, gid->raw, ret);
return ret;
}
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index c752ae9fad6c..b1c44ec1a3f3 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -76,6 +76,17 @@ static int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
end = ALIGN(end, page_size);
if (unlikely(end < page_size))
return -EOVERFLOW;
+ /*
+ * The mmu notifier can be called within reclaim contexts and takes the
+ * umem_mutex. This is rare to trigger in testing, teach lockdep about
+ * it.
+ */
+ if (IS_ENABLED(CONFIG_LOCKDEP)) {
+ fs_reclaim_acquire(GFP_KERNEL);
+ mutex_lock(&umem_odp->umem_mutex);
+ mutex_unlock(&umem_odp->umem_mutex);
+ fs_reclaim_release(GFP_KERNEL);
+ }
nr_entries = (end - start) >> PAGE_SHIFT;
if (!(nr_entries * PAGE_SIZE / page_size))
diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c
index b847084dcd99..a506fafd2b15 100644
--- a/drivers/infiniband/hw/mlx5/counters.c
+++ b/drivers/infiniband/hw/mlx5/counters.c
@@ -398,7 +398,7 @@ static int do_get_hw_stats(struct ib_device *ibdev,
return ret;
/* We don't expose device counters over Vports */
- if (is_mdev_switchdev_mode(dev->mdev) && port_num != 0)
+ if (is_mdev_switchdev_mode(dev->mdev) && dev->is_rep && port_num != 0)
goto done;
if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
@@ -418,7 +418,7 @@ static int do_get_hw_stats(struct ib_device *ibdev,
*/
goto done;
}
- ret = mlx5_lag_query_cong_counters(dev->mdev,
+ ret = mlx5_lag_query_cong_counters(mdev,
stats->value +
cnts->num_q_counters,
cnts->num_cong_counters,
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 2479da8620ca..843dcd312242 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -1958,6 +1958,7 @@ subscribe_event_xa_alloc(struct mlx5_devx_event_table *devx_event_table,
/* Level1 is valid for future use, no need to free */
return -ENOMEM;
+ INIT_LIST_HEAD(&obj_event->obj_sub_list);
err = xa_insert(&event->object_ids,
key_level2,
obj_event,
@@ -1966,7 +1967,6 @@ subscribe_event_xa_alloc(struct mlx5_devx_event_table *devx_event_table,
kfree(obj_event);
return err;
}
- INIT_LIST_HEAD(&obj_event->obj_sub_list);
}
return 0;
@@ -2669,7 +2669,7 @@ static void devx_wait_async_destroy(struct mlx5_async_cmd *cmd)
void mlx5_ib_ufile_hw_cleanup(struct ib_uverbs_file *ufile)
{
- struct mlx5_async_cmd async_cmd[MAX_ASYNC_CMDS];
+ struct mlx5_async_cmd *async_cmd;
struct ib_ucontext *ucontext = ufile->ucontext;
struct ib_device *device = ucontext->device;
struct mlx5_ib_dev *dev = to_mdev(device);
@@ -2678,6 +2678,10 @@ void mlx5_ib_ufile_hw_cleanup(struct ib_uverbs_file *ufile)
int head = 0;
int tail = 0;
+ async_cmd = kcalloc(MAX_ASYNC_CMDS, sizeof(*async_cmd), GFP_KERNEL);
+ if (!async_cmd)
+ return;
+
list_for_each_entry(uobject, &ufile->uobjects, list) {
WARN_ON(uverbs_try_lock_object(uobject, UVERBS_LOOKUP_WRITE));
@@ -2713,6 +2717,8 @@ void mlx5_ib_ufile_hw_cleanup(struct ib_uverbs_file *ufile)
devx_wait_async_destroy(&async_cmd[head % MAX_ASYNC_CMDS]);
head++;
}
+
+ kfree(async_cmd);
}
static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf,
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index ce7610740412..df6557ddbdfc 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1791,6 +1791,33 @@ static void deallocate_uars(struct mlx5_ib_dev *dev,
context->devx_uid);
}
+static int mlx5_ib_enable_lb_mp(struct mlx5_core_dev *master,
+ struct mlx5_core_dev *slave)
+{
+ int err;
+
+ err = mlx5_nic_vport_update_local_lb(master, true);
+ if (err)
+ return err;
+
+ err = mlx5_nic_vport_update_local_lb(slave, true);
+ if (err)
+ goto out;
+
+ return 0;
+
+out:
+ mlx5_nic_vport_update_local_lb(master, false);
+ return err;
+}
+
+static void mlx5_ib_disable_lb_mp(struct mlx5_core_dev *master,
+ struct mlx5_core_dev *slave)
+{
+ mlx5_nic_vport_update_local_lb(slave, false);
+ mlx5_nic_vport_update_local_lb(master, false);
+}
+
int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
{
int err = 0;
@@ -3495,6 +3522,8 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
lockdep_assert_held(&mlx5_ib_multiport_mutex);
+ mlx5_ib_disable_lb_mp(ibdev->mdev, mpi->mdev);
+
mlx5_core_mp_event_replay(ibdev->mdev,
MLX5_DRIVER_EVENT_AFFILIATION_REMOVED,
NULL);
@@ -3590,6 +3619,10 @@ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
MLX5_DRIVER_EVENT_AFFILIATION_DONE,
&key);
+ err = mlx5_ib_enable_lb_mp(ibdev->mdev, mpi->mdev);
+ if (err)
+ goto unbind;
+
return true;
unbind:
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 57f9bc2a4a3a..bd35e75d9ce5 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -2027,23 +2027,50 @@ void mlx5_ib_revoke_data_direct_mrs(struct mlx5_ib_dev *dev)
}
}
-static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
+static int mlx5_umr_revoke_mr_with_lock(struct mlx5_ib_mr *mr)
{
- struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
- struct mlx5_cache_ent *ent = mr->mmkey.cache_ent;
- bool is_odp = is_odp_mr(mr);
bool is_odp_dma_buf = is_dmabuf_mr(mr) &&
- !to_ib_umem_dmabuf(mr->umem)->pinned;
- bool from_cache = !!ent;
- int ret = 0;
+ !to_ib_umem_dmabuf(mr->umem)->pinned;
+ bool is_odp = is_odp_mr(mr);
+ int ret;
if (is_odp)
mutex_lock(&to_ib_umem_odp(mr->umem)->umem_mutex);
if (is_odp_dma_buf)
- dma_resv_lock(to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv, NULL);
+ dma_resv_lock(to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv,
+ NULL);
+
+ ret = mlx5r_umr_revoke_mr(mr);
+
+ if (is_odp) {
+ if (!ret)
+ to_ib_umem_odp(mr->umem)->private = NULL;
+ mutex_unlock(&to_ib_umem_odp(mr->umem)->umem_mutex);
+ }
+
+ if (is_odp_dma_buf) {
+ if (!ret)
+ to_ib_umem_dmabuf(mr->umem)->private = NULL;
+ dma_resv_unlock(
+ to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv);
+ }
- if (mr->mmkey.cacheable && !mlx5r_umr_revoke_mr(mr) && !cache_ent_find_and_store(dev, mr)) {
+ return ret;
+}
+
+static int mlx5r_handle_mkey_cleanup(struct mlx5_ib_mr *mr)
+{
+ bool is_odp_dma_buf = is_dmabuf_mr(mr) &&
+ !to_ib_umem_dmabuf(mr->umem)->pinned;
+ struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
+ struct mlx5_cache_ent *ent = mr->mmkey.cache_ent;
+ bool is_odp = is_odp_mr(mr);
+ bool from_cache = !!ent;
+ int ret;
+
+ if (mr->mmkey.cacheable && !mlx5_umr_revoke_mr_with_lock(mr) &&
+ !cache_ent_find_and_store(dev, mr)) {
ent = mr->mmkey.cache_ent;
/* upon storing to a clean temp entry - schedule its cleanup */
spin_lock_irq(&ent->mkeys_queue.lock);
@@ -2055,7 +2082,7 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
ent->tmp_cleanup_scheduled = true;
}
spin_unlock_irq(&ent->mkeys_queue.lock);
- goto out;
+ return 0;
}
if (ent) {
@@ -2064,8 +2091,14 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
mr->mmkey.cache_ent = NULL;
spin_unlock_irq(&ent->mkeys_queue.lock);
}
+
+ if (is_odp)
+ mutex_lock(&to_ib_umem_odp(mr->umem)->umem_mutex);
+
+ if (is_odp_dma_buf)
+ dma_resv_lock(to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv,
+ NULL);
ret = destroy_mkey(dev, mr);
-out:
if (is_odp) {
if (!ret)
to_ib_umem_odp(mr->umem)->private = NULL;
@@ -2075,9 +2108,9 @@ out:
if (is_odp_dma_buf) {
if (!ret)
to_ib_umem_dmabuf(mr->umem)->private = NULL;
- dma_resv_unlock(to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv);
+ dma_resv_unlock(
+ to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv);
}
-
return ret;
}
@@ -2126,7 +2159,7 @@ static int __mlx5_ib_dereg_mr(struct ib_mr *ibmr)
}
/* Stop DMA */
- rc = mlx5_revoke_mr(mr);
+ rc = mlx5r_handle_mkey_cleanup(mr);
if (rc)
return rc;
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index eaa2f9f5f3a9..f6abd64f07f7 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -259,8 +259,8 @@ static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr)
}
if (MLX5_CAP_ODP(mr_to_mdev(mr)->mdev, mem_page_fault))
- __xa_erase(&mr_to_mdev(mr)->odp_mkeys,
- mlx5_base_mkey(mr->mmkey.key));
+ xa_erase(&mr_to_mdev(mr)->odp_mkeys,
+ mlx5_base_mkey(mr->mmkey.key));
xa_unlock(&imr->implicit_children);
/* Freeing a MR is a sleeping operation, so bounce to a work queue */
@@ -532,8 +532,8 @@ static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
}
if (MLX5_CAP_ODP(dev->mdev, mem_page_fault)) {
- ret = __xa_store(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key),
- &mr->mmkey, GFP_KERNEL);
+ ret = xa_store(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key),
+ &mr->mmkey, GFP_KERNEL);
if (xa_is_err(ret)) {
ret = ERR_PTR(xa_err(ret));
__xa_erase(&imr->implicit_children, idx);
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 61897d50162d..e58fe9d8b9e7 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -559,11 +559,11 @@ static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
{
unsigned int pd_index = iova_pd_index(iova);
struct tegra_smmu *smmu = as->smmu;
- struct tegra_pd *pd = as->pd;
+ u32 *pd = &as->pd->val[pd_index];
unsigned long offset = pd_index * sizeof(*pd);
/* Set the page directory entry first */
- pd->val[pd_index] = value;
+ *pd = value;
/* The flush the page directory entry from caches */
dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset,
diff --git a/drivers/irqchip/irq-ath79-misc.c b/drivers/irqchip/irq-ath79-misc.c
index 268cc18b781f..258b8e9a2d57 100644
--- a/drivers/irqchip/irq-ath79-misc.c
+++ b/drivers/irqchip/irq-ath79-misc.c
@@ -15,6 +15,8 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <asm/time.h>
+
#define AR71XX_RESET_REG_MISC_INT_STATUS 0
#define AR71XX_RESET_REG_MISC_INT_ENABLE 4
@@ -177,21 +179,3 @@ static int __init ar7240_misc_intc_of_init(
IRQCHIP_DECLARE(ar7240_misc_intc, "qca,ar7240-misc-intc",
ar7240_misc_intc_of_init);
-
-void __init ath79_misc_irq_init(void __iomem *regs, int irq,
- int irq_base, bool is_ar71xx)
-{
- struct irq_domain *domain;
-
- if (is_ar71xx)
- ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask;
- else
- ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack;
-
- domain = irq_domain_create_legacy(NULL, ATH79_MISC_IRQ_COUNT,
- irq_base, 0, &misc_irq_domain_ops, regs);
- if (!domain)
- panic("Failed to create MISC irqdomain");
-
- ath79_misc_intc_domain_init(domain, irq);
-}
diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig
index d4697e79d5a3..b2d10063d35f 100644
--- a/drivers/md/bcache/Kconfig
+++ b/drivers/md/bcache/Kconfig
@@ -5,7 +5,6 @@ config BCACHE
select BLOCK_HOLDER_DEPRECATED if SYSFS
select CRC64
select CLOSURES
- select MIN_HEAP
help
Allows a block device to be used as cache for other devices; uses
a btree for indexing and the layout is optimized for SSDs.
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 8998e61efa40..48ce750bf70a 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -164,61 +164,40 @@ static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
* prio is worth 1/8th of what INITIAL_PRIO is worth.
*/
-static inline unsigned int new_bucket_prio(struct cache *ca, struct bucket *b)
-{
- unsigned int min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8;
-
- return (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b);
-}
-
-static inline bool new_bucket_max_cmp(const void *l, const void *r, void *args)
-{
- struct bucket **lhs = (struct bucket **)l;
- struct bucket **rhs = (struct bucket **)r;
- struct cache *ca = args;
-
- return new_bucket_prio(ca, *lhs) > new_bucket_prio(ca, *rhs);
-}
-
-static inline bool new_bucket_min_cmp(const void *l, const void *r, void *args)
-{
- struct bucket **lhs = (struct bucket **)l;
- struct bucket **rhs = (struct bucket **)r;
- struct cache *ca = args;
+#define bucket_prio(b) \
+({ \
+ unsigned int min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \
+ \
+ (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b); \
+})
- return new_bucket_prio(ca, *lhs) < new_bucket_prio(ca, *rhs);
-}
+#define bucket_max_cmp(l, r) (bucket_prio(l) < bucket_prio(r))
+#define bucket_min_cmp(l, r) (bucket_prio(l) > bucket_prio(r))
static void invalidate_buckets_lru(struct cache *ca)
{
struct bucket *b;
- const struct min_heap_callbacks bucket_max_cmp_callback = {
- .less = new_bucket_max_cmp,
- .swp = NULL,
- };
- const struct min_heap_callbacks bucket_min_cmp_callback = {
- .less = new_bucket_min_cmp,
- .swp = NULL,
- };
+ ssize_t i;
- ca->heap.nr = 0;
+ ca->heap.used = 0;
for_each_bucket(b, ca) {
if (!bch_can_invalidate_bucket(ca, b))
continue;
- if (!min_heap_full(&ca->heap))
- min_heap_push(&ca->heap, &b, &bucket_max_cmp_callback, ca);
- else if (!new_bucket_max_cmp(&b, min_heap_peek(&ca->heap), ca)) {
+ if (!heap_full(&ca->heap))
+ heap_add(&ca->heap, b, bucket_max_cmp);
+ else if (bucket_max_cmp(b, heap_peek(&ca->heap))) {
ca->heap.data[0] = b;
- min_heap_sift_down(&ca->heap, 0, &bucket_max_cmp_callback, ca);
+ heap_sift(&ca->heap, 0, bucket_max_cmp);
}
}
- min_heapify_all(&ca->heap, &bucket_min_cmp_callback, ca);
+ for (i = ca->heap.used / 2 - 1; i >= 0; --i)
+ heap_sift(&ca->heap, i, bucket_min_cmp);
while (!fifo_full(&ca->free_inc)) {
- if (!ca->heap.nr) {
+ if (!heap_pop(&ca->heap, b, bucket_min_cmp)) {
/*
* We don't want to be calling invalidate_buckets()
* multiple times when it can't do anything
@@ -227,8 +206,6 @@ static void invalidate_buckets_lru(struct cache *ca)
wake_up_gc(ca->set);
return;
}
- b = min_heap_peek(&ca->heap)[0];
- min_heap_pop(&ca->heap, &bucket_min_cmp_callback, ca);
bch_invalidate_one_bucket(ca, b);
}
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 785b0d9008fa..1d33e40d26ea 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -458,7 +458,7 @@ struct cache {
/* Allocation stuff: */
struct bucket *buckets;
- DEFINE_MIN_HEAP(struct bucket *, cache_heap) heap;
+ DECLARE_HEAP(struct bucket *, heap);
/*
* If nonzero, we know we aren't going to find any buckets to invalidate
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 68258a16e125..463eb13bd0b2 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -54,11 +54,9 @@ void bch_dump_bucket(struct btree_keys *b)
int __bch_count_data(struct btree_keys *b)
{
unsigned int ret = 0;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
struct bkey *k;
- min_heap_init(&iter.heap, NULL, MAX_BSETS);
-
if (b->ops->is_extents)
for_each_key(b, k, &iter)
ret += KEY_SIZE(k);
@@ -69,11 +67,9 @@ void __bch_check_keys(struct btree_keys *b, const char *fmt, ...)
{
va_list args;
struct bkey *k, *p = NULL;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
const char *err;
- min_heap_init(&iter.heap, NULL, MAX_BSETS);
-
for_each_key(b, k, &iter) {
if (b->ops->is_extents) {
err = "Keys out of order";
@@ -114,9 +110,9 @@ bug:
static void bch_btree_iter_next_check(struct btree_iter *iter)
{
- struct bkey *k = iter->heap.data->k, *next = bkey_next(k);
+ struct bkey *k = iter->data->k, *next = bkey_next(k);
- if (next < iter->heap.data->end &&
+ if (next < iter->data->end &&
bkey_cmp(k, iter->b->ops->is_extents ?
&START_KEY(next) : next) > 0) {
bch_dump_bucket(iter->b);
@@ -883,14 +879,12 @@ unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
unsigned int status = BTREE_INSERT_STATUS_NO_INSERT;
struct bset *i = bset_tree_last(b)->data;
struct bkey *m, *prev = NULL;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
struct bkey preceding_key_on_stack = ZERO_KEY;
struct bkey *preceding_key_p = &preceding_key_on_stack;
BUG_ON(b->ops->is_extents && !KEY_SIZE(k));
- min_heap_init(&iter.heap, NULL, MAX_BSETS);
-
/*
* If k has preceding key, preceding_key_p will be set to address
* of k's preceding key; otherwise preceding_key_p will be set
@@ -901,9 +895,9 @@ unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
else
preceding_key(k, &preceding_key_p);
- m = bch_btree_iter_init(b, &iter, preceding_key_p);
+ m = bch_btree_iter_stack_init(b, &iter, preceding_key_p);
- if (b->ops->insert_fixup(b, k, &iter, replace_key))
+ if (b->ops->insert_fixup(b, k, &iter.iter, replace_key))
return status;
status = BTREE_INSERT_STATUS_INSERT;
@@ -1083,94 +1077,79 @@ struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
/* Btree iterator */
-typedef bool (new_btree_iter_cmp_fn)(const void *, const void *, void *);
+typedef bool (btree_iter_cmp_fn)(struct btree_iter_set,
+ struct btree_iter_set);
-static inline bool new_btree_iter_cmp(const void *l, const void *r, void __always_unused *args)
+static inline bool btree_iter_cmp(struct btree_iter_set l,
+ struct btree_iter_set r)
{
- const struct btree_iter_set *_l = l;
- const struct btree_iter_set *_r = r;
-
- return bkey_cmp(_l->k, _r->k) <= 0;
+ return bkey_cmp(l.k, r.k) > 0;
}
static inline bool btree_iter_end(struct btree_iter *iter)
{
- return !iter->heap.nr;
+ return !iter->used;
}
void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
struct bkey *end)
{
- const struct min_heap_callbacks callbacks = {
- .less = new_btree_iter_cmp,
- .swp = NULL,
- };
-
if (k != end)
- BUG_ON(!min_heap_push(&iter->heap,
- &((struct btree_iter_set) { k, end }),
- &callbacks,
- NULL));
+ BUG_ON(!heap_add(iter,
+ ((struct btree_iter_set) { k, end }),
+ btree_iter_cmp));
}
-static struct bkey *__bch_btree_iter_init(struct btree_keys *b,
- struct btree_iter *iter,
- struct bkey *search,
- struct bset_tree *start)
+static struct bkey *__bch_btree_iter_stack_init(struct btree_keys *b,
+ struct btree_iter_stack *iter,
+ struct bkey *search,
+ struct bset_tree *start)
{
struct bkey *ret = NULL;
- iter->heap.size = ARRAY_SIZE(iter->heap.preallocated);
- iter->heap.nr = 0;
+ iter->iter.size = ARRAY_SIZE(iter->stack_data);
+ iter->iter.used = 0;
#ifdef CONFIG_BCACHE_DEBUG
- iter->b = b;
+ iter->iter.b = b;
#endif
for (; start <= bset_tree_last(b); start++) {
ret = bch_bset_search(b, start, search);
- bch_btree_iter_push(iter, ret, bset_bkey_last(start->data));
+ bch_btree_iter_push(&iter->iter, ret, bset_bkey_last(start->data));
}
return ret;
}
-struct bkey *bch_btree_iter_init(struct btree_keys *b,
- struct btree_iter *iter,
+struct bkey *bch_btree_iter_stack_init(struct btree_keys *b,
+ struct btree_iter_stack *iter,
struct bkey *search)
{
- return __bch_btree_iter_init(b, iter, search, b->set);
+ return __bch_btree_iter_stack_init(b, iter, search, b->set);
}
static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter,
- new_btree_iter_cmp_fn *cmp)
+ btree_iter_cmp_fn *cmp)
{
struct btree_iter_set b __maybe_unused;
struct bkey *ret = NULL;
- const struct min_heap_callbacks callbacks = {
- .less = cmp,
- .swp = NULL,
- };
if (!btree_iter_end(iter)) {
bch_btree_iter_next_check(iter);
- ret = iter->heap.data->k;
- iter->heap.data->k = bkey_next(iter->heap.data->k);
+ ret = iter->data->k;
+ iter->data->k = bkey_next(iter->data->k);
- if (iter->heap.data->k > iter->heap.data->end) {
+ if (iter->data->k > iter->data->end) {
WARN_ONCE(1, "bset was corrupt!\n");
- iter->heap.data->k = iter->heap.data->end;
+ iter->data->k = iter->data->end;
}
- if (iter->heap.data->k == iter->heap.data->end) {
- if (iter->heap.nr) {
- b = min_heap_peek(&iter->heap)[0];
- min_heap_pop(&iter->heap, &callbacks, NULL);
- }
- }
+ if (iter->data->k == iter->data->end)
+ heap_pop(iter, b, cmp);
else
- min_heap_sift_down(&iter->heap, 0, &callbacks, NULL);
+ heap_sift(iter, 0, cmp);
}
return ret;
@@ -1178,7 +1157,7 @@ static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter,
struct bkey *bch_btree_iter_next(struct btree_iter *iter)
{
- return __bch_btree_iter_next(iter, new_btree_iter_cmp);
+ return __bch_btree_iter_next(iter, btree_iter_cmp);
}
@@ -1216,18 +1195,16 @@ static void btree_mergesort(struct btree_keys *b, struct bset *out,
struct btree_iter *iter,
bool fixup, bool remove_stale)
{
+ int i;
struct bkey *k, *last = NULL;
BKEY_PADDED(k) tmp;
bool (*bad)(struct btree_keys *, const struct bkey *) = remove_stale
? bch_ptr_bad
: bch_ptr_invalid;
- const struct min_heap_callbacks callbacks = {
- .less = b->ops->sort_cmp,
- .swp = NULL,
- };
/* Heapify the iterator, using our comparison function */
- min_heapify_all(&iter->heap, &callbacks, NULL);
+ for (i = iter->used / 2 - 1; i >= 0; --i)
+ heap_sift(iter, i, b->ops->sort_cmp);
while (!btree_iter_end(iter)) {
if (b->ops->sort_fixup && fixup)
@@ -1316,11 +1293,10 @@ void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
struct bset_sort_state *state)
{
size_t order = b->page_order, keys = 0;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
int oldsize = bch_count_data(b);
- min_heap_init(&iter.heap, NULL, MAX_BSETS);
- __bch_btree_iter_init(b, &iter, NULL, &b->set[start]);
+ __bch_btree_iter_stack_init(b, &iter, NULL, &b->set[start]);
if (start) {
unsigned int i;
@@ -1331,7 +1307,7 @@ void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
order = get_order(__set_bytes(b->set->data, keys));
}
- __btree_sort(b, &iter, start, order, false, state);
+ __btree_sort(b, &iter.iter, start, order, false, state);
EBUG_ON(oldsize >= 0 && bch_count_data(b) != oldsize);
}
@@ -1347,13 +1323,11 @@ void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
struct bset_sort_state *state)
{
uint64_t start_time = local_clock();
- struct btree_iter iter;
-
- min_heap_init(&iter.heap, NULL, MAX_BSETS);
+ struct btree_iter_stack iter;
- bch_btree_iter_init(b, &iter, NULL);
+ bch_btree_iter_stack_init(b, &iter, NULL);
- btree_mergesort(b, new->set->data, &iter, false, true);
+ btree_mergesort(b, new->set->data, &iter.iter, false, true);
bch_time_stats_update(&state->time, start_time);
diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
index f79441acd4c1..011f6062c4c0 100644
--- a/drivers/md/bcache/bset.h
+++ b/drivers/md/bcache/bset.h
@@ -187,9 +187,8 @@ struct bset_tree {
};
struct btree_keys_ops {
- bool (*sort_cmp)(const void *l,
- const void *r,
- void *args);
+ bool (*sort_cmp)(struct btree_iter_set l,
+ struct btree_iter_set r);
struct bkey *(*sort_fixup)(struct btree_iter *iter,
struct bkey *tmp);
bool (*insert_fixup)(struct btree_keys *b,
@@ -313,17 +312,23 @@ enum {
BTREE_INSERT_STATUS_FRONT_MERGE,
};
-struct btree_iter_set {
- struct bkey *k, *end;
-};
-
/* Btree key iteration */
struct btree_iter {
+ size_t size, used;
#ifdef CONFIG_BCACHE_DEBUG
struct btree_keys *b;
#endif
- MIN_HEAP_PREALLOCATED(struct btree_iter_set, btree_iter_heap, MAX_BSETS) heap;
+ struct btree_iter_set {
+ struct bkey *k, *end;
+ } data[];
+};
+
+/* Fixed-size btree_iter that can be allocated on the stack */
+
+struct btree_iter_stack {
+ struct btree_iter iter;
+ struct btree_iter_set stack_data[MAX_BSETS];
};
typedef bool (*ptr_filter_fn)(struct btree_keys *b, const struct bkey *k);
@@ -335,9 +340,9 @@ struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
struct bkey *end);
-struct bkey *bch_btree_iter_init(struct btree_keys *b,
- struct btree_iter *iter,
- struct bkey *search);
+struct bkey *bch_btree_iter_stack_init(struct btree_keys *b,
+ struct btree_iter_stack *iter,
+ struct bkey *search);
struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
const struct bkey *search);
@@ -352,13 +357,14 @@ static inline struct bkey *bch_bset_search(struct btree_keys *b,
return search ? __bch_bset_search(b, t, search) : t->data->start;
}
-#define for_each_key_filter(b, k, iter, filter) \
- for (bch_btree_iter_init((b), (iter), NULL); \
- ((k) = bch_btree_iter_next_filter((iter), (b), filter));)
+#define for_each_key_filter(b, k, stack_iter, filter) \
+ for (bch_btree_iter_stack_init((b), (stack_iter), NULL); \
+ ((k) = bch_btree_iter_next_filter(&((stack_iter)->iter), (b), \
+ filter));)
-#define for_each_key(b, k, iter) \
- for (bch_btree_iter_init((b), (iter), NULL); \
- ((k) = bch_btree_iter_next(iter));)
+#define for_each_key(b, k, stack_iter) \
+ for (bch_btree_iter_stack_init((b), (stack_iter), NULL); \
+ ((k) = bch_btree_iter_next(&((stack_iter)->iter)));)
/* Sorting */
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 1d0100677357..210b59007d98 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -148,19 +148,19 @@ void bch_btree_node_read_done(struct btree *b)
{
const char *err = "bad btree header";
struct bset *i = btree_bset_first(b);
- struct btree_iter iter;
+ struct btree_iter *iter;
/*
* c->fill_iter can allocate an iterator with more memory space
* than static MAX_BSETS.
* See the comment arount cache_set->fill_iter.
*/
- iter.heap.data = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
- iter.heap.size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size;
- iter.heap.nr = 0;
+ iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
+ iter->size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size;
+ iter->used = 0;
#ifdef CONFIG_BCACHE_DEBUG
- iter.b = &b->keys;
+ iter->b = &b->keys;
#endif
if (!i->seq)
@@ -198,7 +198,7 @@ void bch_btree_node_read_done(struct btree *b)
if (i != b->keys.set[0].data && !i->keys)
goto err;
- bch_btree_iter_push(&iter, i->start, bset_bkey_last(i));
+ bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
b->written += set_blocks(i, block_bytes(b->c->cache));
}
@@ -210,7 +210,7 @@ void bch_btree_node_read_done(struct btree *b)
if (i->seq == b->keys.set[0].data->seq)
goto err;
- bch_btree_sort_and_fix_extents(&b->keys, &iter, &b->c->sort);
+ bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
i = b->keys.set[0].data;
err = "short btree key";
@@ -222,7 +222,7 @@ void bch_btree_node_read_done(struct btree *b)
bch_bset_init_next(&b->keys, write_block(b),
bset_magic(&b->c->cache->sb));
out:
- mempool_free(iter.heap.data, &b->c->fill_iter);
+ mempool_free(iter, &b->c->fill_iter);
return;
err:
set_btree_node_io_error(b);
@@ -1306,11 +1306,9 @@ static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
uint8_t stale = 0;
unsigned int keys = 0, good_keys = 0;
struct bkey *k;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
struct bset_tree *t;
- min_heap_init(&iter.heap, NULL, MAX_BSETS);
-
gc->nodes++;
for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
@@ -1569,11 +1567,9 @@ static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
static unsigned int btree_gc_count_keys(struct btree *b)
{
struct bkey *k;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
unsigned int ret = 0;
- min_heap_init(&iter.heap, NULL, MAX_BSETS);
-
for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
ret += bkey_u64s(k);
@@ -1612,18 +1608,18 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op,
int ret = 0;
bool should_rewrite;
struct bkey *k;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
struct gc_merge_info r[GC_MERGE_NODES];
struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
- min_heap_init(&iter.heap, NULL, MAX_BSETS);
- bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
+ bch_btree_iter_stack_init(&b->keys, &iter, &b->c->gc_done);
for (i = r; i < r + ARRAY_SIZE(r); i++)
i->b = ERR_PTR(-EINTR);
while (1) {
- k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
+ k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
+ bch_ptr_bad);
if (k) {
r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
true, b);
@@ -1918,9 +1914,7 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
{
int ret = 0;
struct bkey *k, *p = NULL;
- struct btree_iter iter;
-
- min_heap_init(&iter.heap, NULL, MAX_BSETS);
+ struct btree_iter_stack iter;
for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
bch_initial_mark_key(b->c, b->level, k);
@@ -1928,10 +1922,10 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
bch_initial_mark_key(b->c, b->level + 1, &b->key);
if (b->level) {
- bch_btree_iter_init(&b->keys, &iter, NULL);
+ bch_btree_iter_stack_init(&b->keys, &iter, NULL);
do {
- k = bch_btree_iter_next_filter(&iter, &b->keys,
+ k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
bch_ptr_bad);
if (k) {
btree_node_prefetch(b, k);
@@ -1959,7 +1953,7 @@ static int bch_btree_check_thread(void *arg)
struct btree_check_info *info = arg;
struct btree_check_state *check_state = info->state;
struct cache_set *c = check_state->c;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
struct bkey *k, *p;
int cur_idx, prev_idx, skip_nr;
@@ -1967,11 +1961,9 @@ static int bch_btree_check_thread(void *arg)
cur_idx = prev_idx = 0;
ret = 0;
- min_heap_init(&iter.heap, NULL, MAX_BSETS);
-
/* root node keys are checked before thread created */
- bch_btree_iter_init(&c->root->keys, &iter, NULL);
- k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
+ bch_btree_iter_stack_init(&c->root->keys, &iter, NULL);
+ k = bch_btree_iter_next_filter(&iter.iter, &c->root->keys, bch_ptr_bad);
BUG_ON(!k);
p = k;
@@ -1989,7 +1981,7 @@ static int bch_btree_check_thread(void *arg)
skip_nr = cur_idx - prev_idx;
while (skip_nr) {
- k = bch_btree_iter_next_filter(&iter,
+ k = bch_btree_iter_next_filter(&iter.iter,
&c->root->keys,
bch_ptr_bad);
if (k)
@@ -2062,11 +2054,9 @@ int bch_btree_check(struct cache_set *c)
int ret = 0;
int i;
struct bkey *k = NULL;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
struct btree_check_state check_state;
- min_heap_init(&iter.heap, NULL, MAX_BSETS);
-
/* check and mark root node keys */
for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid)
bch_initial_mark_key(c, c->root->level, k);
@@ -2560,12 +2550,11 @@ static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
if (b->level) {
struct bkey *k;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
- min_heap_init(&iter.heap, NULL, MAX_BSETS);
- bch_btree_iter_init(&b->keys, &iter, from);
+ bch_btree_iter_stack_init(&b->keys, &iter, from);
- while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
+ while ((k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
bch_ptr_bad))) {
ret = bcache_btree(map_nodes_recurse, k, b,
op, from, fn, flags);
@@ -2594,12 +2583,12 @@ int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
{
int ret = MAP_CONTINUE;
struct bkey *k;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
- min_heap_init(&iter.heap, NULL, MAX_BSETS);
- bch_btree_iter_init(&b->keys, &iter, from);
+ bch_btree_iter_stack_init(&b->keys, &iter, from);
- while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
+ while ((k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
+ bch_ptr_bad))) {
ret = !b->level
? fn(op, b, k)
: bcache_btree(map_keys_recurse, k,
diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
index 4b84fda1530a..d626ffcbecb9 100644
--- a/drivers/md/bcache/extents.c
+++ b/drivers/md/bcache/extents.c
@@ -33,16 +33,15 @@ static void sort_key_next(struct btree_iter *iter,
i->k = bkey_next(i->k);
if (i->k == i->end)
- *i = iter->heap.data[--iter->heap.nr];
+ *i = iter->data[--iter->used];
}
-static bool new_bch_key_sort_cmp(const void *l, const void *r, void *args)
+static bool bch_key_sort_cmp(struct btree_iter_set l,
+ struct btree_iter_set r)
{
- struct btree_iter_set *_l = (struct btree_iter_set *)l;
- struct btree_iter_set *_r = (struct btree_iter_set *)r;
- int64_t c = bkey_cmp(_l->k, _r->k);
+ int64_t c = bkey_cmp(l.k, r.k);
- return !(c ? c > 0 : _l->k < _r->k);
+ return c ? c > 0 : l.k < r.k;
}
static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
@@ -239,7 +238,7 @@ static bool bch_btree_ptr_insert_fixup(struct btree_keys *bk,
}
const struct btree_keys_ops bch_btree_keys_ops = {
- .sort_cmp = new_bch_key_sort_cmp,
+ .sort_cmp = bch_key_sort_cmp,
.insert_fixup = bch_btree_ptr_insert_fixup,
.key_invalid = bch_btree_ptr_invalid,
.key_bad = bch_btree_ptr_bad,
@@ -256,28 +255,22 @@ const struct btree_keys_ops bch_btree_keys_ops = {
* Necessary for btree_sort_fixup() - if there are multiple keys that compare
* equal in different sets, we have to process them newest to oldest.
*/
-
-static bool new_bch_extent_sort_cmp(const void *l, const void *r, void __always_unused *args)
+static bool bch_extent_sort_cmp(struct btree_iter_set l,
+ struct btree_iter_set r)
{
- struct btree_iter_set *_l = (struct btree_iter_set *)l;
- struct btree_iter_set *_r = (struct btree_iter_set *)r;
- int64_t c = bkey_cmp(&START_KEY(_l->k), &START_KEY(_r->k));
+ int64_t c = bkey_cmp(&START_KEY(l.k), &START_KEY(r.k));
- return !(c ? c > 0 : _l->k < _r->k);
+ return c ? c > 0 : l.k < r.k;
}
static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
struct bkey *tmp)
{
- const struct min_heap_callbacks callbacks = {
- .less = new_bch_extent_sort_cmp,
- .swp = NULL,
- };
- while (iter->heap.nr > 1) {
- struct btree_iter_set *top = iter->heap.data, *i = top + 1;
-
- if (iter->heap.nr > 2 &&
- !new_bch_extent_sort_cmp(&i[0], &i[1], NULL))
+ while (iter->used > 1) {
+ struct btree_iter_set *top = iter->data, *i = top + 1;
+
+ if (iter->used > 2 &&
+ bch_extent_sort_cmp(i[0], i[1]))
i++;
if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0)
@@ -285,7 +278,7 @@ static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
if (!KEY_SIZE(i->k)) {
sort_key_next(iter, i);
- min_heap_sift_down(&iter->heap, i - top, &callbacks, NULL);
+ heap_sift(iter, i - top, bch_extent_sort_cmp);
continue;
}
@@ -295,7 +288,7 @@ static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
else
bch_cut_front(top->k, i->k);
- min_heap_sift_down(&iter->heap, i - top, &callbacks, NULL);
+ heap_sift(iter, i - top, bch_extent_sort_cmp);
} else {
/* can't happen because of comparison func */
BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k)));
@@ -305,7 +298,7 @@ static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
bch_cut_back(&START_KEY(i->k), tmp);
bch_cut_front(i->k, top->k);
- min_heap_sift_down(&iter->heap, 0, &callbacks, NULL);
+ heap_sift(iter, 0, bch_extent_sort_cmp);
return tmp;
} else {
@@ -625,7 +618,7 @@ static bool bch_extent_merge(struct btree_keys *bk,
}
const struct btree_keys_ops bch_extent_keys_ops = {
- .sort_cmp = new_bch_extent_sort_cmp,
+ .sort_cmp = bch_extent_sort_cmp,
.sort_fixup = bch_extent_sort_fixup,
.insert_fixup = bch_extent_insert_fixup,
.key_invalid = bch_extent_invalid,
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index 45ca134cbf02..26a6a535ec32 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -182,19 +182,16 @@ err: if (!IS_ERR_OR_NULL(w->private))
closure_sync(&cl);
}
-static bool new_bucket_cmp(const void *l, const void *r, void __always_unused *args)
+static bool bucket_cmp(struct bucket *l, struct bucket *r)
{
- struct bucket **_l = (struct bucket **)l;
- struct bucket **_r = (struct bucket **)r;
-
- return GC_SECTORS_USED(*_l) >= GC_SECTORS_USED(*_r);
+ return GC_SECTORS_USED(l) < GC_SECTORS_USED(r);
}
static unsigned int bucket_heap_top(struct cache *ca)
{
struct bucket *b;
- return (b = min_heap_peek(&ca->heap)[0]) ? GC_SECTORS_USED(b) : 0;
+ return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0;
}
void bch_moving_gc(struct cache_set *c)
@@ -202,10 +199,6 @@ void bch_moving_gc(struct cache_set *c)
struct cache *ca = c->cache;
struct bucket *b;
unsigned long sectors_to_move, reserve_sectors;
- const struct min_heap_callbacks callbacks = {
- .less = new_bucket_cmp,
- .swp = NULL,
- };
if (!c->copy_gc_enabled)
return;
@@ -216,7 +209,7 @@ void bch_moving_gc(struct cache_set *c)
reserve_sectors = ca->sb.bucket_size *
fifo_used(&ca->free[RESERVE_MOVINGGC]);
- ca->heap.nr = 0;
+ ca->heap.used = 0;
for_each_bucket(b, ca) {
if (GC_MARK(b) == GC_MARK_METADATA ||
@@ -225,31 +218,25 @@ void bch_moving_gc(struct cache_set *c)
atomic_read(&b->pin))
continue;
- if (!min_heap_full(&ca->heap)) {
+ if (!heap_full(&ca->heap)) {
sectors_to_move += GC_SECTORS_USED(b);
- min_heap_push(&ca->heap, &b, &callbacks, NULL);
- } else if (!new_bucket_cmp(&b, min_heap_peek(&ca->heap), ca)) {
+ heap_add(&ca->heap, b, bucket_cmp);
+ } else if (bucket_cmp(b, heap_peek(&ca->heap))) {
sectors_to_move -= bucket_heap_top(ca);
sectors_to_move += GC_SECTORS_USED(b);
ca->heap.data[0] = b;
- min_heap_sift_down(&ca->heap, 0, &callbacks, NULL);
+ heap_sift(&ca->heap, 0, bucket_cmp);
}
}
while (sectors_to_move > reserve_sectors) {
- if (ca->heap.nr) {
- b = min_heap_peek(&ca->heap)[0];
- min_heap_pop(&ca->heap, &callbacks, NULL);
- }
+ heap_pop(&ca->heap, b, bucket_cmp);
sectors_to_move -= GC_SECTORS_USED(b);
}
- while (ca->heap.nr) {
- b = min_heap_peek(&ca->heap)[0];
- min_heap_pop(&ca->heap, &callbacks, NULL);
+ while (heap_pop(&ca->heap, b, bucket_cmp))
SET_GC_MOVE(b, 1);
- }
mutex_unlock(&c->bucket_lock);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 1efb768b2890..2ea490b9d370 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1912,7 +1912,8 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
INIT_LIST_HEAD(&c->btree_cache_freed);
INIT_LIST_HEAD(&c->data_buckets);
- iter_size = ((meta_bucket_pages(sb) * PAGE_SECTORS) / sb->block_size) *
+ iter_size = sizeof(struct btree_iter) +
+ ((meta_bucket_pages(sb) * PAGE_SECTORS) / sb->block_size) *
sizeof(struct btree_iter_set);
c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL);
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index e8f696cb58c0..826b14cae4e5 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -660,9 +660,7 @@ static unsigned int bch_root_usage(struct cache_set *c)
unsigned int bytes = 0;
struct bkey *k;
struct btree *b;
- struct btree_iter iter;
-
- min_heap_init(&iter.heap, NULL, MAX_BSETS);
+ struct btree_iter_stack iter;
goto lock_root;
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index 539454d8e2d0..f61ab1bada6c 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -9,7 +9,6 @@
#include <linux/kernel.h>
#include <linux/sched/clock.h>
#include <linux/llist.h>
-#include <linux/min_heap.h>
#include <linux/ratelimit.h>
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
@@ -31,10 +30,16 @@ struct closure;
#endif
+#define DECLARE_HEAP(type, name) \
+ struct { \
+ size_t size, used; \
+ type *data; \
+ } name
+
#define init_heap(heap, _size, gfp) \
({ \
size_t _bytes; \
- (heap)->nr = 0; \
+ (heap)->used = 0; \
(heap)->size = (_size); \
_bytes = (heap)->size * sizeof(*(heap)->data); \
(heap)->data = kvmalloc(_bytes, (gfp) & GFP_KERNEL); \
@@ -47,6 +52,64 @@ do { \
(heap)->data = NULL; \
} while (0)
+#define heap_swap(h, i, j) swap((h)->data[i], (h)->data[j])
+
+#define heap_sift(h, i, cmp) \
+do { \
+ size_t _r, _j = i; \
+ \
+ for (; _j * 2 + 1 < (h)->used; _j = _r) { \
+ _r = _j * 2 + 1; \
+ if (_r + 1 < (h)->used && \
+ cmp((h)->data[_r], (h)->data[_r + 1])) \
+ _r++; \
+ \
+ if (cmp((h)->data[_r], (h)->data[_j])) \
+ break; \
+ heap_swap(h, _r, _j); \
+ } \
+} while (0)
+
+#define heap_sift_down(h, i, cmp) \
+do { \
+ while (i) { \
+ size_t p = (i - 1) / 2; \
+ if (cmp((h)->data[i], (h)->data[p])) \
+ break; \
+ heap_swap(h, i, p); \
+ i = p; \
+ } \
+} while (0)
+
+#define heap_add(h, d, cmp) \
+({ \
+ bool _r = !heap_full(h); \
+ if (_r) { \
+ size_t _i = (h)->used++; \
+ (h)->data[_i] = d; \
+ \
+ heap_sift_down(h, _i, cmp); \
+ heap_sift(h, _i, cmp); \
+ } \
+ _r; \
+})
+
+#define heap_pop(h, d, cmp) \
+({ \
+ bool _r = (h)->used; \
+ if (_r) { \
+ (d) = (h)->data[0]; \
+ (h)->used--; \
+ heap_swap(h, 0, (h)->used); \
+ heap_sift(h, 0, cmp); \
+ } \
+ _r; \
+})
+
+#define heap_peek(h) ((h)->used ? (h)->data[0] : NULL)
+
+#define heap_full(h) ((h)->used == (h)->size)
+
#define DECLARE_FIFO(type, name) \
struct { \
size_t front, back, size, mask; \
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 453efbbdc8ee..302e75f1fc4b 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -908,16 +908,15 @@ static int bch_dirty_init_thread(void *arg)
struct dirty_init_thrd_info *info = arg;
struct bch_dirty_init_state *state = info->state;
struct cache_set *c = state->c;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
struct bkey *k, *p;
int cur_idx, prev_idx, skip_nr;
k = p = NULL;
prev_idx = 0;
- min_heap_init(&iter.heap, NULL, MAX_BSETS);
- bch_btree_iter_init(&c->root->keys, &iter, NULL);
- k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
+ bch_btree_iter_stack_init(&c->root->keys, &iter, NULL);
+ k = bch_btree_iter_next_filter(&iter.iter, &c->root->keys, bch_ptr_bad);
BUG_ON(!k);
p = k;
@@ -931,7 +930,7 @@ static int bch_dirty_init_thread(void *arg)
skip_nr = cur_idx - prev_idx;
while (skip_nr) {
- k = bch_btree_iter_next_filter(&iter,
+ k = bch_btree_iter_next_filter(&iter.iter,
&c->root->keys,
bch_ptr_bad);
if (k)
@@ -980,13 +979,11 @@ void bch_sectors_dirty_init(struct bcache_device *d)
int i;
struct btree *b = NULL;
struct bkey *k = NULL;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
struct sectors_dirty_init op;
struct cache_set *c = d->c;
struct bch_dirty_init_state state;
- min_heap_init(&iter.heap, NULL, MAX_BSETS);
-
retry_lock:
b = c->root;
rw_lock(0, b, b->level);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 9dfdb63220d7..17157c4216a5 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -517,7 +517,10 @@ static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
{
struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
SHASH_DESC_ON_STACK(desc, lmk->hash_tfm);
- struct md5_state md5state;
+ union {
+ struct md5_state md5state;
+ u8 state[CRYPTO_MD5_STATESIZE];
+ } u;
__le32 buf[4];
int i, r;
@@ -548,13 +551,13 @@ static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
return r;
/* No MD5 padding here */
- r = crypto_shash_export(desc, &md5state);
+ r = crypto_shash_export(desc, &u.md5state);
if (r)
return r;
for (i = 0; i < MD5_HASH_WORDS; i++)
- __cpu_to_le32s(&md5state.hash[i]);
- memcpy(iv, &md5state.hash, cc->iv_size);
+ __cpu_to_le32s(&u.md5state.hash[i]);
+ memcpy(iv, &u.md5state.hash, cc->iv_size);
return 0;
}
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index d296770478b2..e8c0a8c6fb51 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -2407,7 +2407,7 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
*/
sb_retrieve_failed_devices(sb, failed_devices);
rdev_for_each(r, mddev) {
- if (test_bit(Journal, &rdev->flags) ||
+ if (test_bit(Journal, &r->flags) ||
!r->sb_page)
continue;
sb2 = page_address(r->sb_page);
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
index 488e346047c1..77230fbe07be 100644
--- a/drivers/mfd/88pm860x-core.c
+++ b/drivers/mfd/88pm860x-core.c
@@ -573,7 +573,6 @@ static int device_irq_init(struct pm860x_chip *chip,
unsigned long flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT;
int data, mask, ret = -EINVAL;
int nr_irqs, irq_base = -1;
- struct device_node *node = i2c->dev.of_node;
mask = PM8607_B0_MISC1_INV_INT | PM8607_B0_MISC1_INT_CLEAR
| PM8607_B0_MISC1_INT_MASK;
@@ -624,7 +623,7 @@ static int device_irq_init(struct pm860x_chip *chip,
ret = -EBUSY;
goto out;
}
- irq_domain_create_legacy(of_fwnode_handle(node), nr_irqs, chip->irq_base, 0,
+ irq_domain_create_legacy(dev_fwnode(&i2c->dev), nr_irqs, chip->irq_base, 0,
&pm860x_irq_domain_ops, chip);
chip->core_irq = i2c->irq;
if (!chip->core_irq)
diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c
index 78b16c67a5fc..25377dcce60e 100644
--- a/drivers/mfd/max8925-core.c
+++ b/drivers/mfd/max8925-core.c
@@ -656,7 +656,6 @@ static int max8925_irq_init(struct max8925_chip *chip, int irq,
{
unsigned long flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT;
int ret;
- struct device_node *node = chip->dev->of_node;
/* clear all interrupts */
max8925_reg_read(chip->i2c, MAX8925_CHG_IRQ1);
@@ -682,8 +681,9 @@ static int max8925_irq_init(struct max8925_chip *chip, int irq,
return -EBUSY;
}
- irq_domain_create_legacy(of_fwnode_handle(node), MAX8925_NR_IRQS, chip->irq_base, 0,
- &max8925_irq_domain_ops, chip);
+ irq_domain_create_legacy(dev_fwnode(chip->dev), MAX8925_NR_IRQS,
+ chip->irq_base, 0, &max8925_irq_domain_ops,
+ chip);
/* request irq handler for pmic main irq*/
chip->core_irq = irq;
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index 232c2bfe8c18..d3ab40651307 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -676,7 +676,6 @@ int twl4030_init_irq(struct device *dev, int irq_num)
static struct irq_chip twl4030_irq_chip;
int status, i;
int irq_base, irq_end, nr_irqs;
- struct device_node *node = dev->of_node;
/*
* TWL core and pwr interrupts must be contiguous because
@@ -691,7 +690,7 @@ int twl4030_init_irq(struct device *dev, int irq_num)
return irq_base;
}
- irq_domain_create_legacy(of_fwnode_handle(node), nr_irqs, irq_base, 0,
+ irq_domain_create_legacy(dev_fwnode(dev), nr_irqs, irq_base, 0,
&irq_domain_simple_ops, NULL);
irq_end = irq_base + TWL4030_CORE_NR_IRQS;
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 391d81ad960c..8dc4f5c493fc 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -559,7 +559,7 @@ static int mtdchar_blkpg_ioctl(struct mtd_info *mtd,
/* Sanitize user input */
p.devname[BLKPG_DEVNAMELTH - 1] = '\0';
- return mtd_add_partition(mtd, p.devname, p.start, p.length, NULL);
+ return mtd_add_partition(mtd, p.devname, p.start, p.length);
case BLKPG_DEL_PARTITION:
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 429d8c16baf0..5ba9a741f5ac 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -68,13 +68,7 @@ static struct class mtd_class = {
.pm = MTD_CLS_PM_OPS,
};
-static struct class mtd_master_class = {
- .name = "mtd_master",
- .pm = MTD_CLS_PM_OPS,
-};
-
static DEFINE_IDR(mtd_idr);
-static DEFINE_IDR(mtd_master_idr);
/* These are exported solely for the purpose of mtd_blkdevs.c. You
should not use them for _anything_ else */
@@ -89,9 +83,8 @@ EXPORT_SYMBOL_GPL(__mtd_next_device);
static LIST_HEAD(mtd_notifiers);
-#define MTD_MASTER_DEVS 255
+
#define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
-static dev_t mtd_master_devt;
/* REVISIT once MTD uses the driver model better, whoever allocates
* the mtd_info will probably want to use the release() hook...
@@ -111,17 +104,6 @@ static void mtd_release(struct device *dev)
device_destroy(&mtd_class, index + 1);
}
-static void mtd_master_release(struct device *dev)
-{
- struct mtd_info *mtd = dev_get_drvdata(dev);
-
- idr_remove(&mtd_master_idr, mtd->index);
- of_node_put(mtd_get_of_node(mtd));
-
- if (mtd_is_partition(mtd))
- release_mtd_partition(mtd);
-}
-
static void mtd_device_release(struct kref *kref)
{
struct mtd_info *mtd = container_of(kref, struct mtd_info, refcnt);
@@ -385,11 +367,6 @@ static const struct device_type mtd_devtype = {
.release = mtd_release,
};
-static const struct device_type mtd_master_devtype = {
- .name = "mtd_master",
- .release = mtd_master_release,
-};
-
static bool mtd_expert_analysis_mode;
#ifdef CONFIG_DEBUG_FS
@@ -657,13 +634,13 @@ exit_parent:
/**
* add_mtd_device - register an MTD device
* @mtd: pointer to new MTD device info structure
- * @partitioned: create partitioned device
*
* Add a device to the list of MTD devices present in the system, and
* notify each currently active MTD 'user' of its arrival. Returns
* zero on success or non-zero on failure.
*/
-int add_mtd_device(struct mtd_info *mtd, bool partitioned)
+
+int add_mtd_device(struct mtd_info *mtd)
{
struct device_node *np = mtd_get_of_node(mtd);
struct mtd_info *master = mtd_get_master(mtd);
@@ -710,17 +687,10 @@ int add_mtd_device(struct mtd_info *mtd, bool partitioned)
ofidx = -1;
if (np)
ofidx = of_alias_get_id(np, "mtd");
- if (partitioned) {
- if (ofidx >= 0)
- i = idr_alloc(&mtd_idr, mtd, ofidx, ofidx + 1, GFP_KERNEL);
- else
- i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
- } else {
- if (ofidx >= 0)
- i = idr_alloc(&mtd_master_idr, mtd, ofidx, ofidx + 1, GFP_KERNEL);
- else
- i = idr_alloc(&mtd_master_idr, mtd, 0, 0, GFP_KERNEL);
- }
+ if (ofidx >= 0)
+ i = idr_alloc(&mtd_idr, mtd, ofidx, ofidx + 1, GFP_KERNEL);
+ else
+ i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
if (i < 0) {
error = i;
goto fail_locked;
@@ -768,18 +738,10 @@ int add_mtd_device(struct mtd_info *mtd, bool partitioned)
/* Caller should have set dev.parent to match the
* physical device, if appropriate.
*/
- if (partitioned) {
- mtd->dev.type = &mtd_devtype;
- mtd->dev.class = &mtd_class;
- mtd->dev.devt = MTD_DEVT(i);
- dev_set_name(&mtd->dev, "mtd%d", i);
- error = dev_set_name(&mtd->dev, "mtd%d", i);
- } else {
- mtd->dev.type = &mtd_master_devtype;
- mtd->dev.class = &mtd_master_class;
- mtd->dev.devt = MKDEV(MAJOR(mtd_master_devt), i);
- error = dev_set_name(&mtd->dev, "mtd_master%d", i);
- }
+ mtd->dev.type = &mtd_devtype;
+ mtd->dev.class = &mtd_class;
+ mtd->dev.devt = MTD_DEVT(i);
+ error = dev_set_name(&mtd->dev, "mtd%d", i);
if (error)
goto fail_devname;
dev_set_drvdata(&mtd->dev, mtd);
@@ -787,7 +749,6 @@ int add_mtd_device(struct mtd_info *mtd, bool partitioned)
of_node_get(mtd_get_of_node(mtd));
error = device_register(&mtd->dev);
if (error) {
- pr_err("mtd: %s device_register fail %d\n", mtd->name, error);
put_device(&mtd->dev);
goto fail_added;
}
@@ -799,13 +760,10 @@ int add_mtd_device(struct mtd_info *mtd, bool partitioned)
mtd_debugfs_populate(mtd);
- if (partitioned) {
- device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL,
- "mtd%dro", i);
- }
+ device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL,
+ "mtd%dro", i);
- pr_debug("mtd: Giving out %spartitioned device %d to %s\n",
- partitioned ? "" : "un-", i, mtd->name);
+ pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
/* No need to get a refcount on the module containing
the notifier, since we hold the mtd_table_mutex */
list_for_each_entry(not, &mtd_notifiers, list)
@@ -813,16 +771,13 @@ int add_mtd_device(struct mtd_info *mtd, bool partitioned)
mutex_unlock(&mtd_table_mutex);
- if (partitioned) {
- if (of_property_read_bool(mtd_get_of_node(mtd), "linux,rootfs")) {
- if (IS_BUILTIN(CONFIG_MTD)) {
- pr_info("mtd: setting mtd%d (%s) as root device\n",
- mtd->index, mtd->name);
- ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, mtd->index);
- } else {
- pr_warn("mtd: can't set mtd%d (%s) as root device - mtd must be builtin\n",
- mtd->index, mtd->name);
- }
+ if (of_property_read_bool(mtd_get_of_node(mtd), "linux,rootfs")) {
+ if (IS_BUILTIN(CONFIG_MTD)) {
+ pr_info("mtd: setting mtd%d (%s) as root device\n", mtd->index, mtd->name);
+ ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, mtd->index);
+ } else {
+ pr_warn("mtd: can't set mtd%d (%s) as root device - mtd must be builtin\n",
+ mtd->index, mtd->name);
}
}
@@ -838,10 +793,7 @@ fail_nvmem_add:
fail_added:
of_node_put(mtd_get_of_node(mtd));
fail_devname:
- if (partitioned)
- idr_remove(&mtd_idr, i);
- else
- idr_remove(&mtd_master_idr, i);
+ idr_remove(&mtd_idr, i);
fail_locked:
mutex_unlock(&mtd_table_mutex);
return error;
@@ -859,14 +811,12 @@ fail_locked:
int del_mtd_device(struct mtd_info *mtd)
{
- struct mtd_notifier *not;
- struct idr *idr;
int ret;
+ struct mtd_notifier *not;
mutex_lock(&mtd_table_mutex);
- idr = mtd->dev.class == &mtd_class ? &mtd_idr : &mtd_master_idr;
- if (idr_find(idr, mtd->index) != mtd) {
+ if (idr_find(&mtd_idr, mtd->index) != mtd) {
ret = -ENODEV;
goto out_error;
}
@@ -1106,7 +1056,6 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
const struct mtd_partition *parts,
int nr_parts)
{
- struct mtd_info *parent;
int ret, err;
mtd_set_dev_defaults(mtd);
@@ -1115,30 +1064,25 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
if (ret)
goto out;
- ret = add_mtd_device(mtd, false);
- if (ret)
- goto out;
-
if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) {
- ret = mtd_add_partition(mtd, mtd->name, 0, MTDPART_SIZ_FULL, &parent);
+ ret = add_mtd_device(mtd);
if (ret)
goto out;
-
- } else {
- parent = mtd;
}
/* Prefer parsed partitions over driver-provided fallback */
- ret = parse_mtd_partitions(parent, types, parser_data);
+ ret = parse_mtd_partitions(mtd, types, parser_data);
if (ret == -EPROBE_DEFER)
goto out;
if (ret > 0)
ret = 0;
else if (nr_parts)
- ret = add_mtd_partitions(parent, parts, nr_parts);
- else if (!IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
- ret = mtd_add_partition(parent, mtd->name, 0, MTDPART_SIZ_FULL, NULL);
+ ret = add_mtd_partitions(mtd, parts, nr_parts);
+ else if (!device_is_registered(&mtd->dev))
+ ret = add_mtd_device(mtd);
+ else
+ ret = 0;
if (ret)
goto out;
@@ -1158,14 +1102,13 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
register_reboot_notifier(&mtd->reboot_notifier);
}
- return 0;
out:
- nvmem_unregister(mtd->otp_user_nvmem);
- nvmem_unregister(mtd->otp_factory_nvmem);
-
- del_mtd_partitions(mtd);
+ if (ret) {
+ nvmem_unregister(mtd->otp_user_nvmem);
+ nvmem_unregister(mtd->otp_factory_nvmem);
+ }
- if (device_is_registered(&mtd->dev)) {
+ if (ret && device_is_registered(&mtd->dev)) {
err = del_mtd_device(mtd);
if (err)
pr_err("Error when deleting MTD device (%d)\n", err);
@@ -1324,7 +1267,8 @@ int __get_mtd_device(struct mtd_info *mtd)
mtd = mtd->parent;
}
- kref_get(&master->refcnt);
+ if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
+ kref_get(&master->refcnt);
return 0;
}
@@ -1418,7 +1362,8 @@ void __put_mtd_device(struct mtd_info *mtd)
mtd = parent;
}
- kref_put(&master->refcnt, mtd_device_release);
+ if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
+ kref_put(&master->refcnt, mtd_device_release);
module_put(master->owner);
@@ -2585,16 +2530,6 @@ static int __init init_mtd(void)
if (ret)
goto err_reg;
- ret = class_register(&mtd_master_class);
- if (ret)
- goto err_reg2;
-
- ret = alloc_chrdev_region(&mtd_master_devt, 0, MTD_MASTER_DEVS, "mtd_master");
- if (ret < 0) {
- pr_err("unable to allocate char dev region\n");
- goto err_chrdev;
- }
-
mtd_bdi = mtd_bdi_init("mtd");
if (IS_ERR(mtd_bdi)) {
ret = PTR_ERR(mtd_bdi);
@@ -2619,10 +2554,6 @@ out_procfs:
bdi_unregister(mtd_bdi);
bdi_put(mtd_bdi);
err_bdi:
- unregister_chrdev_region(mtd_master_devt, MTD_MASTER_DEVS);
-err_chrdev:
- class_unregister(&mtd_master_class);
-err_reg2:
class_unregister(&mtd_class);
err_reg:
pr_err("Error registering mtd class or bdi: %d\n", ret);
@@ -2636,12 +2567,9 @@ static void __exit cleanup_mtd(void)
if (proc_mtd)
remove_proc_entry("mtd", NULL);
class_unregister(&mtd_class);
- class_unregister(&mtd_master_class);
- unregister_chrdev_region(mtd_master_devt, MTD_MASTER_DEVS);
bdi_unregister(mtd_bdi);
bdi_put(mtd_bdi);
idr_destroy(&mtd_idr);
- idr_destroy(&mtd_master_idr);
}
module_init(init_mtd);
diff --git a/drivers/mtd/mtdcore.h b/drivers/mtd/mtdcore.h
index 2258d31c5aa6..b014861a06a6 100644
--- a/drivers/mtd/mtdcore.h
+++ b/drivers/mtd/mtdcore.h
@@ -8,7 +8,7 @@ extern struct mutex mtd_table_mutex;
extern struct backing_dev_info *mtd_bdi;
struct mtd_info *__mtd_next_device(int i);
-int __must_check add_mtd_device(struct mtd_info *mtd, bool partitioned);
+int __must_check add_mtd_device(struct mtd_info *mtd);
int del_mtd_device(struct mtd_info *mtd);
int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
int del_mtd_partitions(struct mtd_info *);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 5a3db36d734e..994e8c51e674 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -86,7 +86,8 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent,
* parent conditional on that option. Note, this is a way to
* distinguish between the parent and its partitions in sysfs.
*/
- child->dev.parent = &parent->dev;
+ child->dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) || mtd_is_partition(parent) ?
+ &parent->dev : parent->dev.parent;
child->dev.of_node = part->of_node;
child->parent = parent;
child->part.offset = part->offset;
@@ -242,7 +243,7 @@ static int mtd_add_partition_attrs(struct mtd_info *new)
}
int mtd_add_partition(struct mtd_info *parent, const char *name,
- long long offset, long long length, struct mtd_info **out)
+ long long offset, long long length)
{
struct mtd_info *master = mtd_get_master(parent);
u64 parent_size = mtd_is_partition(parent) ?
@@ -275,15 +276,12 @@ int mtd_add_partition(struct mtd_info *parent, const char *name,
list_add_tail(&child->part.node, &parent->partitions);
mutex_unlock(&master->master.partitions_lock);
- ret = add_mtd_device(child, true);
+ ret = add_mtd_device(child);
if (ret)
goto err_remove_part;
mtd_add_partition_attrs(child);
- if (out)
- *out = child;
-
return 0;
err_remove_part:
@@ -415,7 +413,7 @@ int add_mtd_partitions(struct mtd_info *parent,
list_add_tail(&child->part.node, &parent->partitions);
mutex_unlock(&master->master.partitions_lock);
- ret = add_mtd_device(child, true);
+ ret = add_mtd_device(child);
if (ret) {
mutex_lock(&master->master.partitions_lock);
list_del(&child->part.node);
@@ -592,6 +590,9 @@ static int mtd_part_of_parse(struct mtd_info *master,
int ret, err = 0;
dev = &master->dev;
+ /* Use parent device (controller) if the top level MTD is not registered */
+ if (!IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) && !mtd_is_partition(master))
+ dev = master->dev.parent;
np = mtd_get_of_node(master);
if (mtd_is_partition(master))
@@ -710,7 +711,6 @@ int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
if (ret < 0 && !err)
err = ret;
}
-
return err;
}
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index 7099db7a62be..c411fe9be3ef 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -1585,6 +1585,7 @@ static void spinand_cleanup(struct spinand_device *spinand)
{
struct nand_device *nand = spinand_to_nand(spinand);
+ nanddev_ecc_engine_cleanup(nand);
nanddev_cleanup(nand);
spinand_manufacturer_cleanup(spinand);
kfree(spinand->databuf);
diff --git a/drivers/mtd/nand/spi/winbond.c b/drivers/mtd/nand/spi/winbond.c
index 19f8dd4a6370..b7a28f001a38 100644
--- a/drivers/mtd/nand/spi/winbond.c
+++ b/drivers/mtd/nand/spi/winbond.c
@@ -25,7 +25,7 @@
static SPINAND_OP_VARIANTS(read_cache_octal_variants,
SPINAND_PAGE_READ_FROM_CACHE_1S_1D_8D_OP(0, 2, NULL, 0, 105 * HZ_PER_MHZ),
- SPINAND_PAGE_READ_FROM_CACHE_1S_8S_8S_OP(0, 16, NULL, 0, 86 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_8S_8S_OP(0, 16, NULL, 0, 162 * HZ_PER_MHZ),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_8S_OP(0, 1, NULL, 0, 133 * HZ_PER_MHZ),
SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
@@ -42,11 +42,11 @@ static SPINAND_OP_VARIANTS(update_cache_octal_variants,
static SPINAND_OP_VARIANTS(read_cache_dual_quad_dtr_variants,
SPINAND_PAGE_READ_FROM_CACHE_1S_4D_4D_OP(0, 8, NULL, 0, 80 * HZ_PER_MHZ),
SPINAND_PAGE_READ_FROM_CACHE_1S_1D_4D_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
- SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0, 104 * HZ_PER_MHZ),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_2D_2D_OP(0, 4, NULL, 0, 80 * HZ_PER_MHZ),
SPINAND_PAGE_READ_FROM_CACHE_1S_1D_2D_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
- SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 104 * HZ_PER_MHZ),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1D_1D_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
@@ -289,7 +289,7 @@ static const struct spinand_info winbond_spinand_table[] = {
SPINAND_ECCINFO(&w35n01jw_ooblayout, NULL)),
SPINAND_INFO("W35N02JW", /* 1.8V */
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xdf, 0x22),
- NAND_MEMORG(1, 4096, 128, 64, 512, 10, 2, 1, 1),
+ NAND_MEMORG(1, 4096, 128, 64, 512, 10, 1, 2, 1),
NAND_ECCREQ(1, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_octal_variants,
&write_cache_octal_variants,
@@ -298,7 +298,7 @@ static const struct spinand_info winbond_spinand_table[] = {
SPINAND_ECCINFO(&w35n01jw_ooblayout, NULL)),
SPINAND_INFO("W35N04JW", /* 1.8V */
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xdf, 0x23),
- NAND_MEMORG(1, 4096, 128, 64, 512, 10, 4, 1, 1),
+ NAND_MEMORG(1, 4096, 128, 64, 512, 10, 1, 4, 1),
NAND_ECCREQ(1, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_octal_variants,
&write_cache_octal_variants,
diff --git a/drivers/net/can/m_can/tcan4x5x-core.c b/drivers/net/can/m_can/tcan4x5x-core.c
index e5c162f8c589..8edaa339d590 100644
--- a/drivers/net/can/m_can/tcan4x5x-core.c
+++ b/drivers/net/can/m_can/tcan4x5x-core.c
@@ -411,10 +411,11 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
priv = cdev_to_priv(mcan_class);
priv->power = devm_regulator_get_optional(&spi->dev, "vsup");
- if (PTR_ERR(priv->power) == -EPROBE_DEFER) {
- ret = -EPROBE_DEFER;
- goto out_m_can_class_free_dev;
- } else {
+ if (IS_ERR(priv->power)) {
+ if (PTR_ERR(priv->power) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto out_m_can_class_free_dev;
+ }
priv->power = NULL;
}
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 862bdccb7439..dc2f4adac9bc 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -2034,9 +2034,6 @@ int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
b53_get_vlan_entry(dev, pvid, vl);
vl->members &= ~BIT(port);
- if (vl->members == BIT(cpu_port))
- vl->members &= ~BIT(cpu_port);
- vl->untag = vl->members;
b53_set_vlan_entry(dev, pvid, vl);
}
@@ -2115,8 +2112,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge)
}
b53_get_vlan_entry(dev, pvid, vl);
- vl->members |= BIT(port) | BIT(cpu_port);
- vl->untag |= BIT(port) | BIT(cpu_port);
+ vl->members |= BIT(port);
b53_set_vlan_entry(dev, pvid, vl);
}
}
diff --git a/drivers/net/ethernet/airoha/airoha_eth.c b/drivers/net/ethernet/airoha/airoha_eth.c
index a7ec609d64de..06dea3a13e77 100644
--- a/drivers/net/ethernet/airoha/airoha_eth.c
+++ b/drivers/net/ethernet/airoha/airoha_eth.c
@@ -1065,23 +1065,18 @@ static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q)
static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma)
{
+ int size, index, num_desc = HW_DSCP_NUM;
struct airoha_eth *eth = qdma->eth;
int id = qdma - &eth->qdma[0];
+ u32 status, buf_size;
dma_addr_t dma_addr;
const char *name;
- int size, index;
- u32 status;
-
- size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc);
- if (!dmam_alloc_coherent(eth->dev, size, &dma_addr, GFP_KERNEL))
- return -ENOMEM;
-
- airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
name = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d-buf", id);
if (!name)
return -ENOMEM;
+ buf_size = id ? AIROHA_MAX_PACKET_SIZE / 2 : AIROHA_MAX_PACKET_SIZE;
index = of_property_match_string(eth->dev->of_node,
"memory-region-names", name);
if (index >= 0) {
@@ -1099,8 +1094,12 @@ static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma)
rmem = of_reserved_mem_lookup(np);
of_node_put(np);
dma_addr = rmem->base;
+ /* Compute the number of hw descriptors according to the
+ * reserved memory size and the payload buffer size
+ */
+ num_desc = div_u64(rmem->size, buf_size);
} else {
- size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM;
+ size = buf_size * num_desc;
if (!dmam_alloc_coherent(eth->dev, size, &dma_addr,
GFP_KERNEL))
return -ENOMEM;
@@ -1108,15 +1107,21 @@ static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma)
airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr);
+ size = num_desc * sizeof(struct airoha_qdma_fwd_desc);
+ if (!dmam_alloc_coherent(eth->dev, size, &dma_addr, GFP_KERNEL))
+ return -ENOMEM;
+
+ airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
+ /* QDMA0: 2KB. QDMA1: 1KB */
airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG,
HW_FWD_DSCP_PAYLOAD_SIZE_MASK,
- FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, 0));
+ FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, !!id));
airoha_qdma_rmw(qdma, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK,
FIELD_PREP(FWD_DSCP_LOW_THR_MASK, 128));
airoha_qdma_rmw(qdma, REG_LMGR_INIT_CFG,
LMGR_INIT_START | LMGR_SRAM_MODE_MASK |
HW_FWD_DESC_NUM_MASK,
- FIELD_PREP(HW_FWD_DESC_NUM_MASK, HW_DSCP_NUM) |
+ FIELD_PREP(HW_FWD_DESC_NUM_MASK, num_desc) |
LMGR_INIT_START | LMGR_SRAM_MODE_MASK);
return read_poll_timeout(airoha_qdma_rr, status,
diff --git a/drivers/net/ethernet/airoha/airoha_ppe.c b/drivers/net/ethernet/airoha/airoha_ppe.c
index 9067d2fc7706..0e217acfc5ef 100644
--- a/drivers/net/ethernet/airoha/airoha_ppe.c
+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
@@ -809,8 +809,10 @@ airoha_ppe_foe_flow_l2_entry_update(struct airoha_ppe *ppe,
int idle;
hwe = airoha_ppe_foe_get_entry(ppe, iter->hash);
- ib1 = READ_ONCE(hwe->ib1);
+ if (!hwe)
+ continue;
+ ib1 = READ_ONCE(hwe->ib1);
state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1);
if (state != AIROHA_FOE_STATE_BIND) {
iter->hash = 0xffff;
diff --git a/drivers/net/ethernet/airoha/airoha_regs.h b/drivers/net/ethernet/airoha/airoha_regs.h
index 04187eb40ec6..150c85995cc1 100644
--- a/drivers/net/ethernet/airoha/airoha_regs.h
+++ b/drivers/net/ethernet/airoha/airoha_regs.h
@@ -614,8 +614,9 @@
RX19_DONE_INT_MASK | RX18_DONE_INT_MASK | \
RX17_DONE_INT_MASK | RX16_DONE_INT_MASK)
-#define RX_DONE_INT_MASK (RX_DONE_HIGH_INT_MASK | RX_DONE_LOW_INT_MASK)
#define RX_DONE_HIGH_OFFSET fls(RX_DONE_HIGH_INT_MASK)
+#define RX_DONE_INT_MASK \
+ ((RX_DONE_HIGH_INT_MASK << RX_DONE_HIGH_OFFSET) | RX_DONE_LOW_INT_MASK)
#define INT_RX2_MASK(_n) \
((RX_NO_CPU_DSCP_HIGH_INT_MASK & (_n)) | \
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 869580b6f70d..ae89a981e052 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2989,6 +2989,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
{
struct bnxt_napi *bnapi = cpr->bnapi;
u32 raw_cons = cpr->cp_raw_cons;
+ bool flush_xdp = false;
u32 cons;
int rx_pkts = 0;
u8 event = 0;
@@ -3042,6 +3043,8 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
else
rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
&event);
+ if (event & BNXT_REDIRECT_EVENT)
+ flush_xdp = true;
if (likely(rc >= 0))
rx_pkts += rc;
/* Increment rx_pkts when rc is -ENOMEM to count towards
@@ -3066,7 +3069,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
}
- if (event & BNXT_REDIRECT_EVENT) {
+ if (flush_xdp) {
xdp_do_flush();
event &= ~BNXT_REDIRECT_EVENT;
}
@@ -10780,6 +10783,72 @@ void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
bp->num_rss_ctx--;
}
+static bool bnxt_vnic_has_rx_ring(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ int rxr_id)
+{
+ u16 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
+ int i, vnic_rx;
+
+ /* Ntuple VNIC always has all the rx rings. Any change of ring id
+ * must be updated because a future filter may use it.
+ */
+ if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
+ return true;
+
+ for (i = 0; i < tbl_size; i++) {
+ if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
+ vnic_rx = ethtool_rxfh_context_indir(vnic->rss_ctx)[i];
+ else
+ vnic_rx = bp->rss_indir_tbl[i];
+
+ if (rxr_id == vnic_rx)
+ return true;
+ }
+
+ return false;
+}
+
+static int bnxt_set_vnic_mru_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ u16 mru, int rxr_id)
+{
+ int rc;
+
+ if (!bnxt_vnic_has_rx_ring(bp, vnic, rxr_id))
+ return 0;
+
+ if (mru) {
+ rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
+ vnic->vnic_id, rc);
+ return rc;
+ }
+ }
+ vnic->mru = mru;
+ bnxt_hwrm_vnic_update(bp, vnic,
+ VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
+
+ return 0;
+}
+
+static int bnxt_set_rss_ctx_vnic_mru(struct bnxt *bp, u16 mru, int rxr_id)
+{
+ struct ethtool_rxfh_context *ctx;
+ unsigned long context;
+ int rc;
+
+ xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
+ struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
+ struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
+
+ rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, rxr_id);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp)
{
bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA);
@@ -15927,6 +15996,7 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
struct bnxt_vnic_info *vnic;
struct bnxt_napi *bnapi;
int i, rc;
+ u16 mru;
rxr = &bp->rx_ring[idx];
clone = qmem;
@@ -15977,21 +16047,15 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
napi_enable_locked(&bnapi->napi);
bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
+ mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
for (i = 0; i < bp->nr_vnics; i++) {
vnic = &bp->vnic_info[i];
- rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
- if (rc) {
- netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
- vnic->vnic_id, rc);
+ rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, idx);
+ if (rc)
return rc;
- }
- vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
- bnxt_hwrm_vnic_update(bp, vnic,
- VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
}
-
- return 0;
+ return bnxt_set_rss_ctx_vnic_mru(bp, mru, idx);
err_reset:
netdev_err(bp->dev, "Unexpected HWRM error during queue start rc: %d\n",
@@ -16013,10 +16077,10 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
for (i = 0; i < bp->nr_vnics; i++) {
vnic = &bp->vnic_info[i];
- vnic->mru = 0;
- bnxt_hwrm_vnic_update(bp, vnic,
- VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
+
+ bnxt_set_vnic_mru_p5(bp, vnic, 0, idx);
}
+ bnxt_set_rss_ctx_vnic_mru(bp, 0, idx);
/* Make sure NAPI sees that the VNIC is disabled */
synchronize_net();
rxr = &bp->rx_ring[idx];
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 84c4812414fd..2450a369b792 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -231,10 +231,9 @@ void bnxt_ulp_stop(struct bnxt *bp)
return;
mutex_lock(&edev->en_dev_lock);
- if (!bnxt_ulp_registered(edev)) {
- mutex_unlock(&edev->en_dev_lock);
- return;
- }
+ if (!bnxt_ulp_registered(edev) ||
+ (edev->flags & BNXT_EN_FLAG_ULP_STOPPED))
+ goto ulp_stop_exit;
edev->flags |= BNXT_EN_FLAG_ULP_STOPPED;
if (aux_priv) {
@@ -250,6 +249,7 @@ void bnxt_ulp_stop(struct bnxt *bp)
adrv->suspend(adev, pm);
}
}
+ulp_stop_exit:
mutex_unlock(&edev->en_dev_lock);
}
@@ -258,19 +258,13 @@ void bnxt_ulp_start(struct bnxt *bp, int err)
struct bnxt_aux_priv *aux_priv = bp->aux_priv;
struct bnxt_en_dev *edev = bp->edev;
- if (!edev)
- return;
-
- edev->flags &= ~BNXT_EN_FLAG_ULP_STOPPED;
-
- if (err)
+ if (!edev || err)
return;
mutex_lock(&edev->en_dev_lock);
- if (!bnxt_ulp_registered(edev)) {
- mutex_unlock(&edev->en_dev_lock);
- return;
- }
+ if (!bnxt_ulp_registered(edev) ||
+ !(edev->flags & BNXT_EN_FLAG_ULP_STOPPED))
+ goto ulp_start_exit;
if (edev->ulp_tbl->msix_requested)
bnxt_fill_msix_vecs(bp, edev->msix_entries);
@@ -287,6 +281,8 @@ void bnxt_ulp_start(struct bnxt *bp, int err)
adrv->resume(adev);
}
}
+ulp_start_exit:
+ edev->flags &= ~BNXT_EN_FLAG_ULP_STOPPED;
mutex_unlock(&edev->en_dev_lock);
}
diff --git a/drivers/net/ethernet/faraday/Kconfig b/drivers/net/ethernet/faraday/Kconfig
index c699bd6bcbb9..474073c7f94d 100644
--- a/drivers/net/ethernet/faraday/Kconfig
+++ b/drivers/net/ethernet/faraday/Kconfig
@@ -31,6 +31,7 @@ config FTGMAC100
depends on ARM || COMPILE_TEST
depends on !64BIT || BROKEN
select PHYLIB
+ select FIXED_PHY
select MDIO_ASPEED if MACH_ASPEED_G6
select CRC32
help
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index e917132d3714..54b0f0a5a6bb 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
config FSL_ENETC_CORE
tristate
+ select NXP_NETC_LIB if NXP_NTMP
help
This module supports common functionality between the PF and VF
drivers for the NXP ENETC controller.
@@ -22,6 +23,9 @@ config NXP_NETC_LIB
Switch, such as NETC Table Management Protocol (NTMP) 2.0, common tc
flower and debugfs interfaces and so on.
+config NXP_NTMP
+ bool
+
config FSL_ENETC
tristate "ENETC PF driver"
depends on PCI_MSI
@@ -45,7 +49,7 @@ config NXP_ENETC4
select FSL_ENETC_CORE
select FSL_ENETC_MDIO
select NXP_ENETC_PF_COMMON
- select NXP_NETC_LIB
+ select NXP_NTMP
select PHYLINK
select DIMLIB
help
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 4098f01479bc..53e8d18c7a34 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -507,7 +507,7 @@ static inline u64 _enetc_rd_reg64(void __iomem *reg)
tmp = ioread32(reg + 4);
} while (high != tmp);
- return le64_to_cpu((__le64)high << 32 | low);
+ return (u64)high << 32 | low;
}
#endif
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 3f089c3d47b2..d8595e84326d 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -477,10 +477,6 @@ static void e1000_down_and_stop(struct e1000_adapter *adapter)
cancel_delayed_work_sync(&adapter->phy_info_task);
cancel_delayed_work_sync(&adapter->fifo_stall_task);
-
- /* Only kill reset task if adapter is not resetting */
- if (!test_bit(__E1000_RESETTING, &adapter->flags))
- cancel_work_sync(&adapter->reset_task);
}
void e1000_down(struct e1000_adapter *adapter)
@@ -1266,6 +1262,10 @@ static void e1000_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
+ /* Only kill reset task if adapter is not resetting */
+ if (!test_bit(__E1000_RESETTING, &adapter->flags))
+ cancel_work_sync(&adapter->reset_task);
+
e1000_phy_hw_reset(hw);
kfree(adapter->tx_ring);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index a96f4cfa6e17..7719e15813ee 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -3534,9 +3534,6 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
- case e1000_pch_mtp:
- case e1000_pch_lnp:
- case e1000_pch_ptp:
case e1000_pch_nvp:
if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
/* Stable 24MHz frequency */
@@ -3552,6 +3549,17 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
adapter->cc.shift = shift;
}
break;
+ case e1000_pch_mtp:
+ case e1000_pch_lnp:
+ case e1000_pch_ptp:
+ /* System firmware can misreport this value, so set it to a
+ * stable 38400KHz frequency.
+ */
+ incperiod = INCPERIOD_38400KHZ;
+ incvalue = INCVALUE_38400KHZ;
+ shift = INCVALUE_SHIFT_38400KHZ;
+ adapter->cc.shift = shift;
+ break;
case e1000_82574:
case e1000_82583:
/* Stable 25MHz frequency */
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 89d57dd911dc..ea3c3eb2ef20 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -295,15 +295,17 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
- case e1000_pch_mtp:
- case e1000_pch_lnp:
- case e1000_pch_ptp:
case e1000_pch_nvp:
if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)
adapter->ptp_clock_info.max_adj = MAX_PPB_24MHZ;
else
adapter->ptp_clock_info.max_adj = MAX_PPB_38400KHZ;
break;
+ case e1000_pch_mtp:
+ case e1000_pch_lnp:
+ case e1000_pch_ptp:
+ adapter->ptp_clock_info.max_adj = MAX_PPB_38400KHZ;
+ break;
case e1000_82574:
case e1000_82583:
adapter->ptp_clock_info.max_adj = MAX_PPB_25MHZ;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 1120f8e4bb67..88e6bef69342 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1546,8 +1546,8 @@ static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
* @vf: pointer to the VF structure
* @flr: VFLR was issued or not
*
- * Returns true if the VF is in reset, resets successfully, or resets
- * are disabled and false otherwise.
+ * Return: True if reset was performed successfully or if resets are disabled.
+ * False if reset is already in progress.
**/
bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
{
@@ -1566,7 +1566,7 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
/* If VF is being reset already we don't need to continue. */
if (test_and_set_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
- return true;
+ return false;
i40e_trigger_vf_reset(vf, flr);
@@ -4328,7 +4328,10 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
if (reg & BIT(bit_idx))
/* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
- i40e_reset_vf(vf, true);
+ if (!i40e_reset_vf(vf, true)) {
+ /* At least one VF did not finish resetting, retry next time */
+ set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
+ }
}
return 0;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 2c0bb41809a4..81d7249d1149 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -3209,6 +3209,17 @@ static void iavf_reset_task(struct work_struct *work)
}
continue_reset:
+ /* If we are still early in the state machine, just restart. */
+ if (adapter->state <= __IAVF_INIT_FAILED) {
+ iavf_shutdown_adminq(hw);
+ iavf_change_state(adapter, __IAVF_STARTUP);
+ iavf_startup(adapter);
+ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
+ msecs_to_jiffies(30));
+ netdev_unlock(netdev);
+ return;
+ }
+
/* We don't use netif_running() because it may be true prior to
* ndo_open() returning, so we can't assume it means all our open
* tasks have finished, since we're not holding the rtnl_lock here.
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index a6f0e5990be2..07f0d0a0f1e2 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -79,6 +79,23 @@ iavf_poll_virtchnl_msg(struct iavf_hw *hw, struct iavf_arq_event_info *event,
return iavf_status_to_errno(status);
received_op =
(enum virtchnl_ops)le32_to_cpu(event->desc.cookie_high);
+
+ if (received_op == VIRTCHNL_OP_EVENT) {
+ struct iavf_adapter *adapter = hw->back;
+ struct virtchnl_pf_event *vpe =
+ (struct virtchnl_pf_event *)event->msg_buf;
+
+ if (vpe->event != VIRTCHNL_EVENT_RESET_IMPENDING)
+ continue;
+
+ dev_info(&adapter->pdev->dev, "Reset indication received from the PF\n");
+ if (!(adapter->flags & IAVF_FLAG_RESET_PENDING))
+ iavf_schedule_reset(adapter,
+ IAVF_FLAG_RESET_PENDING);
+
+ return -EIO;
+ }
+
if (op_to_poll == received_op)
break;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c
index 2bc5c7f59844..1f7834c03550 100644
--- a/drivers/net/ethernet/intel/ice/ice_arfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.c
@@ -378,6 +378,50 @@ ice_arfs_is_perfect_flow_set(struct ice_hw *hw, __be16 l3_proto, u8 l4_proto)
}
/**
+ * ice_arfs_cmp - Check if aRFS filter matches this flow.
+ * @fltr_info: filter info of the saved ARFS entry.
+ * @fk: flow dissector keys.
+ * @n_proto: One of htons(ETH_P_IP) or htons(ETH_P_IPV6).
+ * @ip_proto: One of IPPROTO_TCP or IPPROTO_UDP.
+ *
+ * Since this function assumes limited values for n_proto and ip_proto, it
+ * is meant to be called only from ice_rx_flow_steer().
+ *
+ * Return:
+ * * true - fltr_info refers to the same flow as fk.
+ * * false - fltr_info and fk refer to different flows.
+ */
+static bool
+ice_arfs_cmp(const struct ice_fdir_fltr *fltr_info, const struct flow_keys *fk,
+ __be16 n_proto, u8 ip_proto)
+{
+ /* Determine if the filter is for IPv4 or IPv6 based on flow_type,
+ * which is one of ICE_FLTR_PTYPE_NONF_IPV{4,6}_{TCP,UDP}.
+ */
+ bool is_v4 = fltr_info->flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
+ fltr_info->flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP;
+
+ /* Following checks are arranged in the quickest and most discriminative
+ * fields first for early failure.
+ */
+ if (is_v4)
+ return n_proto == htons(ETH_P_IP) &&
+ fltr_info->ip.v4.src_port == fk->ports.src &&
+ fltr_info->ip.v4.dst_port == fk->ports.dst &&
+ fltr_info->ip.v4.src_ip == fk->addrs.v4addrs.src &&
+ fltr_info->ip.v4.dst_ip == fk->addrs.v4addrs.dst &&
+ fltr_info->ip.v4.proto == ip_proto;
+
+ return fltr_info->ip.v6.src_port == fk->ports.src &&
+ fltr_info->ip.v6.dst_port == fk->ports.dst &&
+ fltr_info->ip.v6.proto == ip_proto &&
+ !memcmp(&fltr_info->ip.v6.src_ip, &fk->addrs.v6addrs.src,
+ sizeof(struct in6_addr)) &&
+ !memcmp(&fltr_info->ip.v6.dst_ip, &fk->addrs.v6addrs.dst,
+ sizeof(struct in6_addr));
+}
+
+/**
* ice_rx_flow_steer - steer the Rx flow to where application is being run
* @netdev: ptr to the netdev being adjusted
* @skb: buffer with required header information
@@ -448,6 +492,10 @@ ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb,
continue;
fltr_info = &arfs_entry->fltr_info;
+
+ if (!ice_arfs_cmp(fltr_info, &fk, n_proto, ip_proto))
+ continue;
+
ret = fltr_info->fltr_id;
if (fltr_info->q_index == rxq_idx ||
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index 6aae03771746..2e4f0969035f 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -508,10 +508,14 @@ err_create_repr:
*/
int ice_eswitch_attach_vf(struct ice_pf *pf, struct ice_vf *vf)
{
- struct ice_repr *repr = ice_repr_create_vf(vf);
struct devlink *devlink = priv_to_devlink(pf);
+ struct ice_repr *repr;
int err;
+ if (!ice_is_eswitch_mode_switchdev(pf))
+ return 0;
+
+ repr = ice_repr_create_vf(vf);
if (IS_ERR(repr))
return PTR_ERR(repr);
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index b79a148ed0f2..55cad824c5b9 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -2299,6 +2299,7 @@ static int ice_capture_crosststamp(ktime_t *device,
ts = ((u64)ts_hi << 32) | ts_lo;
system->cycles = ts;
system->cs_id = CSID_X86_ART;
+ system->use_nsecs = true;
/* Read Device source clock time */
ts_lo = rd32(hw, cfg->dev_time_l[tmr_idx]);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 6f572589f1e5..6b5c9536d26d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -1822,7 +1822,7 @@ int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable)
req->chan_cnt = IEEE_8021QAZ_MAX_TCS;
req->bpid_per_chan = 1;
} else {
- req->chan_cnt = 1;
+ req->chan_cnt = pfvf->hw.rx_chan_cnt;
req->bpid_per_chan = 0;
}
@@ -1847,7 +1847,7 @@ int otx2_nix_cpt_config_bp(struct otx2_nic *pfvf, bool enable)
req->chan_cnt = IEEE_8021QAZ_MAX_TCS;
req->bpid_per_chan = 1;
} else {
- req->chan_cnt = 1;
+ req->chan_cnt = pfvf->hw.rx_chan_cnt;
req->bpid_per_chan = 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index ea078c9f5d15..3cb8d3bf9044 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -43,7 +43,6 @@
#include "en/fs_ethtool.h"
#define LANES_UNKNOWN 0
-#define MAX_LANES 8
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
struct ethtool_drvinfo *drvinfo)
@@ -1098,10 +1097,8 @@ static void get_link_properties(struct net_device *netdev,
speed = info->speed;
lanes = info->lanes;
duplex = DUPLEX_FULL;
- } else if (data_rate_oper) {
+ } else if (data_rate_oper)
speed = 100 * data_rate_oper;
- lanes = MAX_LANES;
- }
out:
link_ksettings->base.duplex = duplex;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index f1d908f61134..fef418e1ed1a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -2028,9 +2028,8 @@ err_out:
return err;
}
-static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
+static bool mlx5_flow_has_geneve_opt(struct mlx5_flow_spec *spec)
{
- struct mlx5_flow_spec *spec = &flow->attr->parse_attr->spec;
void *headers_v = MLX5_ADDR_OF(fte_match_param,
spec->match_value,
misc_parameters_3);
@@ -2069,7 +2068,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
}
complete_all(&flow->del_hw_done);
- if (mlx5_flow_has_geneve_opt(flow))
+ if (mlx5_flow_has_geneve_opt(&attr->parse_attr->spec))
mlx5_geneve_tlv_option_del(priv->mdev->geneve);
if (flow->decap_route)
@@ -2574,12 +2573,13 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
err = mlx5e_tc_tun_parse(filter_dev, priv, tmp_spec, f, match_level);
if (err) {
- kvfree(tmp_spec);
NL_SET_ERR_MSG_MOD(extack, "Failed to parse tunnel attributes");
netdev_warn(priv->netdev, "Failed to parse tunnel attributes");
- return err;
+ } else {
+ err = mlx5e_tc_set_attr_rx_tun(flow, tmp_spec);
}
- err = mlx5e_tc_set_attr_rx_tun(flow, tmp_spec);
+ if (mlx5_flow_has_geneve_opt(tmp_spec))
+ mlx5_geneve_tlv_option_del(priv->mdev->geneve);
kvfree(tmp_spec);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 7fb8a3381f84..4917d185d0c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1295,12 +1295,15 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
ret = mlx5_eswitch_load_pf_vf_vport(esw, MLX5_VPORT_ECPF, enabled_events);
if (ret)
goto ecpf_err;
- if (mlx5_core_ec_sriov_enabled(esw->dev)) {
- ret = mlx5_eswitch_load_ec_vf_vports(esw, esw->esw_funcs.num_ec_vfs,
- enabled_events);
- if (ret)
- goto ec_vf_err;
- }
+ }
+
+ /* Enable ECVF vports */
+ if (mlx5_core_ec_sriov_enabled(esw->dev)) {
+ ret = mlx5_eswitch_load_ec_vf_vports(esw,
+ esw->esw_funcs.num_ec_vfs,
+ enabled_events);
+ if (ret)
+ goto ec_vf_err;
}
/* Enable VF vports */
@@ -1331,9 +1334,11 @@ void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
{
mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
+ if (mlx5_core_ec_sriov_enabled(esw->dev))
+ mlx5_eswitch_unload_ec_vf_vports(esw,
+ esw->esw_funcs.num_ec_vfs);
+
if (mlx5_ecpf_vport_exists(esw->dev)) {
- if (mlx5_core_ec_sriov_enabled(esw->dev))
- mlx5_eswitch_unload_ec_vf_vports(esw, esw->esw_funcs.num_vfs);
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_ECPF);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 23a7e8e7adfa..a8046200d376 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -2228,6 +2228,7 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
struct mlx5_flow_handle *rule;
struct match_list *iter;
bool take_write = false;
+ bool try_again = false;
struct fs_fte *fte;
u64 version = 0;
int err;
@@ -2292,6 +2293,7 @@ skip_search:
nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
if (!g->node.active) {
+ try_again = true;
up_write_ref_node(&g->node, false);
continue;
}
@@ -2313,7 +2315,8 @@ skip_search:
tree_put_node(&fte->node, false);
return rule;
}
- rule = ERR_PTR(-ENOENT);
+ err = try_again ? -EAGAIN : -ENOENT;
+ rule = ERR_PTR(err);
out:
kmem_cache_free(steering->ftes_cache, fte);
return rule;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 972e8e9df585..9bc9bd83c232 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -291,7 +291,7 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 function)
static int alloc_system_page(struct mlx5_core_dev *dev, u32 function)
{
struct device *device = mlx5_core_dma_dev(dev);
- int nid = dev_to_node(device);
+ int nid = dev->priv.numa_node;
struct page *page;
u64 zero_addr = 1;
u64 addr;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
index fb62f3bc4bd4..447ea3f8722c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
@@ -1370,8 +1370,8 @@ mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx,
struct mlx5hws_cmd_set_fte_attr fte_attr = {0};
struct mlx5hws_cmd_forward_tbl *fw_island;
struct mlx5hws_action *action;
- u32 i /*, packet_reformat_id*/;
- int ret;
+ int ret, last_dest_idx = -1;
+ u32 i;
if (num_dest <= 1) {
mlx5hws_err(ctx, "Action must have multiple dests\n");
@@ -1401,11 +1401,8 @@ mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx,
dest_list[i].destination_id = dests[i].dest->dest_obj.obj_id;
fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
fte_attr.ignore_flow_level = ignore_flow_level;
- /* ToDo: In SW steering we have a handling of 'go to WIRE'
- * destination here by upper layer setting 'is_wire_ft' flag
- * if the destination is wire.
- * This is because uplink should be last dest in the list.
- */
+ if (dests[i].is_wire_ft)
+ last_dest_idx = i;
break;
case MLX5HWS_ACTION_TYP_VPORT:
dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
@@ -1429,6 +1426,9 @@ mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx,
}
}
+ if (last_dest_idx != -1)
+ swap(dest_list[last_dest_idx], dest_list[num_dest - 1]);
+
fte_attr.dests_num = num_dest;
fte_attr.dests = dest_list;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c
index 70768953a4f6..ca7501c57468 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c
@@ -1070,7 +1070,7 @@ hws_bwc_rule_complex_hash_node_get(struct mlx5hws_bwc_rule *bwc_rule,
struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
struct mlx5hws_bwc_complex_rule_hash_node *node, *old_node;
struct rhashtable *refcount_hash;
- int i;
+ int ret, i;
bwc_rule->complex_hash_node = NULL;
@@ -1078,7 +1078,11 @@ hws_bwc_rule_complex_hash_node_get(struct mlx5hws_bwc_rule *bwc_rule,
if (unlikely(!node))
return -ENOMEM;
- node->tag = ida_alloc(&bwc_matcher->complex->metadata_ida, GFP_KERNEL);
+ ret = ida_alloc(&bwc_matcher->complex->metadata_ida, GFP_KERNEL);
+ if (ret < 0)
+ goto err_free_node;
+ node->tag = ret;
+
refcount_set(&node->refcount, 1);
/* Clear match buffer - turn off all the unrelated fields
@@ -1094,6 +1098,11 @@ hws_bwc_rule_complex_hash_node_get(struct mlx5hws_bwc_rule *bwc_rule,
old_node = rhashtable_lookup_get_insert_fast(refcount_hash,
&node->hash_node,
hws_refcount_hash);
+ if (IS_ERR(old_node)) {
+ ret = PTR_ERR(old_node);
+ goto err_free_ida;
+ }
+
if (old_node) {
/* Rule with the same tag already exists - update refcount */
refcount_inc(&old_node->refcount);
@@ -1112,6 +1121,12 @@ hws_bwc_rule_complex_hash_node_get(struct mlx5hws_bwc_rule *bwc_rule,
bwc_rule->complex_hash_node = node;
return 0;
+
+err_free_ida:
+ ida_free(&bwc_matcher->complex->metadata_ida, node->tag);
+err_free_node:
+ kfree(node);
+ return ret;
}
static void
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
index 5cc0dc002ac1..d45e1145d197 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
@@ -785,6 +785,9 @@ hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd,
HWS_SET_HDR(fc, match_param, IP_PROTOCOL_O,
outer_headers.ip_protocol,
eth_l3_outer.protocol_next_header);
+ HWS_SET_HDR(fc, match_param, IP_VERSION_O,
+ outer_headers.ip_version,
+ eth_l3_outer.ip_version);
HWS_SET_HDR(fc, match_param, IP_TTL_O,
outer_headers.ttl_hoplimit,
eth_l3_outer.time_to_live_hop_limit);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
index 9d1c0e4b224a..bf4643d0ce17 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
@@ -966,6 +966,9 @@ static int mlx5_fs_fte_get_hws_actions(struct mlx5_flow_root_namespace *ns,
switch (attr->type) {
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
dest_action = mlx5_fs_get_dest_action_ft(fs_ctx, dst);
+ if (dst->dest_attr.ft->flags &
+ MLX5_FLOW_TABLE_UPLINK_VPORT)
+ dest_actions[num_dest_actions].is_wire_ft = true;
break;
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
dest_action = mlx5_fs_get_dest_action_table_num(fs_ctx,
@@ -1357,6 +1360,7 @@ mlx5_cmd_hws_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
pkt_reformat->fs_hws_action.pr_data = pr_data;
}
+ mutex_init(&pkt_reformat->fs_hws_action.lock);
pkt_reformat->owner = MLX5_FLOW_RESOURCE_OWNER_HWS;
pkt_reformat->fs_hws_action.hws_action = hws_action;
return 0;
@@ -1503,7 +1507,6 @@ static int mlx5_cmd_hws_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
err = -ENOMEM;
goto release_mh;
}
- mutex_init(&modify_hdr->fs_hws_action.lock);
modify_hdr->fs_hws_action.mh_data = mh_data;
modify_hdr->fs_hws_action.fs_pool = pool;
modify_hdr->owner = MLX5_FLOW_RESOURCE_OWNER_SW;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
index 9bbadc4d8a0b..d8ac6c196211 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
@@ -213,6 +213,7 @@ struct mlx5hws_action_dest_attr {
struct mlx5hws_action *dest;
/* Optional reformat action */
struct mlx5hws_action *reformat;
+ bool is_wire_ft;
};
/**
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
index fb2e5b844c15..d76d7a945899 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
@@ -447,8 +447,10 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
priv->llu_plu_irq = platform_get_irq(pdev, MLXBF_GIGE_LLU_PLU_INTR_IDX);
phy_irq = acpi_dev_gpio_irq_get_by(ACPI_COMPANION(&pdev->dev), "phy", 0);
- if (phy_irq < 0) {
- dev_err(&pdev->dev, "Error getting PHY irq. Use polling instead");
+ if (phy_irq == -EPROBE_DEFER) {
+ err = -EPROBE_DEFER;
+ goto out;
+ } else if (phy_irq < 0) {
phy_irq = PHY_POLL;
}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
index e2368075ab8c..4521d0483d18 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
@@ -127,11 +127,8 @@ static int fbnic_mbx_map_msg(struct fbnic_dev *fbd, int mbx_idx,
return -EBUSY;
addr = dma_map_single(fbd->dev, msg, PAGE_SIZE, direction);
- if (dma_mapping_error(fbd->dev, addr)) {
- free_page((unsigned long)msg);
-
+ if (dma_mapping_error(fbd->dev, addr))
return -ENOSPC;
- }
mbx->buf_info[tail].msg = msg;
mbx->buf_info[tail].addr = addr;
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.h b/drivers/net/ethernet/microchip/lan743x_ptp.h
index e8d073bfa2ca..f33dc83c5700 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.h
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.h
@@ -18,9 +18,9 @@
*/
#define LAN743X_PTP_N_EVENT_CHAN 2
#define LAN743X_PTP_N_PEROUT LAN743X_PTP_N_EVENT_CHAN
-#define LAN743X_PTP_N_EXTTS 4
-#define LAN743X_PTP_N_PPS 0
#define PCI11X1X_PTP_IO_MAX_CHANNELS 8
+#define LAN743X_PTP_N_EXTTS PCI11X1X_PTP_IO_MAX_CHANNELS
+#define LAN743X_PTP_N_PPS 0
#define PTP_CMD_CTL_TIMEOUT_CNT 50
struct lan743x_adapter;
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index 3504507477c6..52cf7112762c 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -31,6 +31,9 @@ static void mana_gd_init_pf_regs(struct pci_dev *pdev)
gc->db_page_base = gc->bar0_va +
mana_gd_r64(gc, GDMA_PF_REG_DB_PAGE_OFF);
+ gc->phys_db_page_base = gc->bar0_pa +
+ mana_gd_r64(gc, GDMA_PF_REG_DB_PAGE_OFF);
+
sriov_base_off = mana_gd_r64(gc, GDMA_SRIOV_REG_CFG_BASE_OFF);
sriov_base_va = gc->bar0_va + sriov_base_off;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index daf1e82cb76b..0e60a6bef99a 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -516,9 +516,9 @@ static int __ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds,
unsigned long start_time;
unsigned long max_wait;
unsigned long duration;
- int done = 0;
bool fw_up;
int opcode;
+ bool done;
int err;
/* Wait for dev cmd to complete, retrying if we get EAGAIN,
@@ -526,6 +526,7 @@ static int __ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds,
*/
max_wait = jiffies + (max_seconds * HZ);
try_again:
+ done = false;
opcode = idev->opcode;
start_time = jiffies;
for (fw_up = ionic_is_fw_running(idev);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 2ac59564ded1..d10b58ebf603 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -321,7 +321,7 @@ static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame,
len, DMA_TO_DEVICE);
} else /* XDP_REDIRECT */ {
dma_addr = ionic_tx_map_single(q, frame->data, len);
- if (!dma_addr)
+ if (dma_addr == DMA_MAPPING_ERROR)
return -EIO;
}
@@ -357,7 +357,7 @@ static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame,
} else {
dma_addr = ionic_tx_map_frag(q, frag, 0,
skb_frag_size(frag));
- if (dma_mapping_error(q->dev, dma_addr)) {
+ if (dma_addr == DMA_MAPPING_ERROR) {
ionic_tx_desc_unmap_bufs(q, desc_info);
return -EIO;
}
@@ -1083,7 +1083,7 @@ static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
net_warn_ratelimited("%s: DMA single map failed on %s!\n",
dev_name(dev), q->name);
q_to_tx_stats(q)->dma_map_err++;
- return 0;
+ return DMA_MAPPING_ERROR;
}
return dma_addr;
}
@@ -1100,7 +1100,7 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
net_warn_ratelimited("%s: DMA frag map failed on %s!\n",
dev_name(dev), q->name);
q_to_tx_stats(q)->dma_map_err++;
- return 0;
+ return DMA_MAPPING_ERROR;
}
return dma_addr;
}
@@ -1116,7 +1116,7 @@ static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
int frag_idx;
dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
- if (!dma_addr)
+ if (dma_addr == DMA_MAPPING_ERROR)
return -EIO;
buf_info->dma_addr = dma_addr;
buf_info->len = skb_headlen(skb);
@@ -1126,7 +1126,7 @@ static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
nfrags = skb_shinfo(skb)->nr_frags;
for (frag_idx = 0; frag_idx < nfrags; frag_idx++, frag++) {
dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag));
- if (!dma_addr)
+ if (dma_addr == DMA_MAPPING_ERROR)
goto dma_fail;
buf_info->dma_addr = dma_addr;
buf_info->len = skb_frag_size(frag);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
index f55eed092f25..7d78f072b0a1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
@@ -242,7 +242,7 @@ static int qed_mfw_get_tlv_group(u8 tlv_type, u8 *tlv_group)
}
/* Returns size of the data buffer or, -1 in case TLV data is not available. */
-static int
+static noinline_for_stack int
qed_mfw_get_gen_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
struct qed_mfw_tlv_generic *p_drv_buf,
struct qed_tlv_parsed_buf *p_buf)
@@ -304,7 +304,7 @@ qed_mfw_get_gen_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
return -1;
}
-static int
+static noinline_for_stack int
qed_mfw_get_eth_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
struct qed_mfw_tlv_eth *p_drv_buf,
struct qed_tlv_parsed_buf *p_buf)
@@ -438,7 +438,7 @@ qed_mfw_get_tlv_time_value(struct qed_mfw_tlv_time *p_time,
return QED_MFW_TLV_TIME_SIZE;
}
-static int
+static noinline_for_stack int
qed_mfw_get_fcoe_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
struct qed_mfw_tlv_fcoe *p_drv_buf,
struct qed_tlv_parsed_buf *p_buf)
@@ -1073,7 +1073,7 @@ qed_mfw_get_fcoe_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
return -1;
}
-static int
+static noinline_for_stack int
qed_mfw_get_iscsi_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
struct qed_mfw_tlv_iscsi *p_drv_buf,
struct qed_tlv_parsed_buf *p_buf)
diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c
index 5b8fdb882172..12f25cec6255 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_common.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_common.c
@@ -98,20 +98,11 @@ void prueth_xmit_free(struct prueth_tx_chn *tx_chn,
{
struct cppi5_host_desc_t *first_desc, *next_desc;
dma_addr_t buf_dma, next_desc_dma;
- struct prueth_swdata *swdata;
- struct page *page;
u32 buf_dma_len;
first_desc = desc;
next_desc = first_desc;
- swdata = cppi5_hdesc_get_swdata(desc);
- if (swdata->type == PRUETH_SWDATA_PAGE) {
- page = swdata->data.page;
- page_pool_recycle_direct(page->pp, swdata->data.page);
- goto free_desc;
- }
-
cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
@@ -135,7 +126,6 @@ void prueth_xmit_free(struct prueth_tx_chn *tx_chn,
k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
}
-free_desc:
k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
}
EXPORT_SYMBOL_GPL(prueth_xmit_free);
@@ -612,13 +602,8 @@ u32 emac_xmit_xdp_frame(struct prueth_emac *emac,
k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
cppi5_hdesc_attach_buf(first_desc, buf_dma, xdpf->len, buf_dma, xdpf->len);
swdata = cppi5_hdesc_get_swdata(first_desc);
- if (page) {
- swdata->type = PRUETH_SWDATA_PAGE;
- swdata->data.page = page;
- } else {
- swdata->type = PRUETH_SWDATA_XDPF;
- swdata->data.xdpf = xdpf;
- }
+ swdata->type = PRUETH_SWDATA_XDPF;
+ swdata->data.xdpf = xdpf;
/* Report BQL before sending the packet */
netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
index 7f2e6cddfeb1..c57cc4f27249 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -2623,7 +2623,7 @@ static int wx_alloc_page_pool(struct wx_ring *rx_ring)
struct page_pool_params pp_params = {
.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
.order = 0,
- .pool_size = rx_ring->size,
+ .pool_size = rx_ring->count,
.nid = dev_to_node(rx_ring->dev),
.dev = rx_ring->dev,
.dma_dir = DMA_FROM_DEVICE,
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 3d315e30ee47..7edbe76b5455 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -247,15 +247,39 @@ static sci_t make_sci(const u8 *addr, __be16 port)
return sci;
}
-static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present)
+static sci_t macsec_active_sci(struct macsec_secy *secy)
{
- sci_t sci;
+ struct macsec_rx_sc *rx_sc = rcu_dereference_bh(secy->rx_sc);
+
+ /* Case single RX SC */
+ if (rx_sc && !rcu_dereference_bh(rx_sc->next))
+ return (rx_sc->active) ? rx_sc->sci : 0;
+ /* Case no RX SC or multiple */
+ else
+ return 0;
+}
+
+static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present,
+ struct macsec_rxh_data *rxd)
+{
+ struct macsec_dev *macsec;
+ sci_t sci = 0;
- if (sci_present)
+ /* SC = 1 */
+ if (sci_present) {
memcpy(&sci, hdr->secure_channel_id,
sizeof(hdr->secure_channel_id));
- else
+ /* SC = 0; ES = 0 */
+ } else if ((!(hdr->tci_an & (MACSEC_TCI_ES | MACSEC_TCI_SC))) &&
+ (list_is_singular(&rxd->secys))) {
+ /* Only one SECY should exist on this scenario */
+ macsec = list_first_or_null_rcu(&rxd->secys, struct macsec_dev,
+ secys);
+ if (macsec)
+ return macsec_active_sci(&macsec->secy);
+ } else {
sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES);
+ }
return sci;
}
@@ -1109,7 +1133,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
struct macsec_rxh_data *rxd;
struct macsec_dev *macsec;
unsigned int len;
- sci_t sci;
+ sci_t sci = 0;
u32 hdr_pn;
bool cbit;
struct pcpu_rx_sc_stats *rxsc_stats;
@@ -1156,11 +1180,14 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC);
macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK;
- sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci);
rcu_read_lock();
rxd = macsec_data_rcu(skb->dev);
+ sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci, rxd);
+ if (!sci)
+ goto drop_nosc;
+
list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci);
@@ -1283,6 +1310,7 @@ drop:
macsec_rxsa_put(rx_sa);
drop_nosa:
macsec_rxsc_put(rx_sc);
+drop_nosc:
rcu_read_unlock();
drop_direct:
kfree_skb(skb);
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 4289ccd3e41b..176935a8645f 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -1252,7 +1252,6 @@ static int sysdata_append_release(struct netconsole_target *nt, int offset)
*/
static int prepare_extradata(struct netconsole_target *nt)
{
- u32 fields = SYSDATA_CPU_NR | SYSDATA_TASKNAME;
int extradata_len;
/* userdata was appended when configfs write helper was called
@@ -1260,7 +1259,7 @@ static int prepare_extradata(struct netconsole_target *nt)
*/
extradata_len = nt->userdata_length;
- if (!(nt->sysdata_fields & fields))
+ if (!nt->sysdata_fields)
goto out;
if (nt->sysdata_fields & SYSDATA_CPU_NR)
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index af545d42961c..fa5fbd97ad69 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -371,7 +371,8 @@ static int nsim_poll(struct napi_struct *napi, int budget)
int done;
done = nsim_rcv(rq, budget);
- napi_complete(napi);
+ if (done < budget)
+ napi_complete_done(napi, done);
return done;
}
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index a6bcb0fee863..fda2e27c1810 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -445,6 +445,9 @@ int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
lockdep_assert_held_once(&bus->mdio_lock);
+ if (addr >= PHY_MAX_ADDR)
+ return -ENXIO;
+
if (bus->read)
retval = bus->read(bus, addr, regnum);
else
@@ -474,6 +477,9 @@ int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val)
lockdep_assert_held_once(&bus->mdio_lock);
+ if (addr >= PHY_MAX_ADDR)
+ return -ENXIO;
+
if (bus->write)
err = bus->write(bus, addr, regnum, val);
else
@@ -535,6 +541,9 @@ int __mdiobus_c45_read(struct mii_bus *bus, int addr, int devad, u32 regnum)
lockdep_assert_held_once(&bus->mdio_lock);
+ if (addr >= PHY_MAX_ADDR)
+ return -ENXIO;
+
if (bus->read_c45)
retval = bus->read_c45(bus, addr, devad, regnum);
else
@@ -566,6 +575,9 @@ int __mdiobus_c45_write(struct mii_bus *bus, int addr, int devad, u32 regnum,
lockdep_assert_held_once(&bus->mdio_lock);
+ if (addr >= PHY_MAX_ADDR)
+ return -ENXIO;
+
if (bus->write_c45)
err = bus->write_c45(bus, addr, devad, regnum, val);
else
diff --git a/drivers/net/phy/phy_caps.c b/drivers/net/phy/phy_caps.c
index 703321689726..38417e288611 100644
--- a/drivers/net/phy/phy_caps.c
+++ b/drivers/net/phy/phy_caps.c
@@ -188,6 +188,9 @@ phy_caps_lookup_by_linkmode_rev(const unsigned long *linkmodes, bool fdx_only)
* When @exact is not set, we return either an exact match, or matching capabilities
* at lower speed, or the lowest matching speed, or NULL.
*
+ * Non-exact matches will try to return an exact speed and duplex match, but may
+ * return matching capabilities with same speed but a different duplex.
+ *
* Returns: a matched link_capabilities according to the above process, NULL
* otherwise.
*/
@@ -195,7 +198,7 @@ const struct link_capabilities *
phy_caps_lookup(int speed, unsigned int duplex, const unsigned long *supported,
bool exact)
{
- const struct link_capabilities *lcap, *last = NULL;
+ const struct link_capabilities *lcap, *match = NULL, *last = NULL;
for_each_link_caps_desc_speed(lcap) {
if (linkmode_intersects(lcap->linkmodes, supported)) {
@@ -204,16 +207,19 @@ phy_caps_lookup(int speed, unsigned int duplex, const unsigned long *supported,
if (lcap->speed == speed && lcap->duplex == duplex) {
return lcap;
} else if (!exact) {
- if (lcap->speed <= speed)
- return lcap;
+ if (!match && lcap->speed <= speed)
+ match = lcap;
+
+ if (lcap->speed < speed)
+ break;
}
}
}
- if (!exact)
- return last;
+ if (!match && !exact)
+ match = last;
- return NULL;
+ return match;
}
EXPORT_SYMBOL_GPL(phy_caps_lookup);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index b586b1c13a47..f5647ee0adde 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1426,6 +1426,7 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x22de, 0x9051, 2)}, /* Hucom Wireless HM-211S/K */
{QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
{QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */
+ {QMI_QUIRK_SET_DTR(0x1e0e, 0x9071, 3)}, /* SIMCom 8230C ++ */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0195, 4)}, /* Quectel EG95 */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index d6589b24c68d..44cba7acfe7d 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -10054,6 +10054,7 @@ static const struct usb_device_id rtl8152_table[] = {
{ USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041) },
{ USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff) },
{ USB_DEVICE(VENDOR_ID_TPLINK, 0x0601) },
+ { USB_DEVICE(VENDOR_ID_TPLINK, 0x0602) },
{ USB_DEVICE(VENDOR_ID_DLINK, 0xb301) },
{ USB_DEVICE(VENDOR_ID_DELL, 0xb097) },
{ USB_DEVICE(VENDOR_ID_ASUS, 0x1976) },
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index e58a0f1b5c5b..a3046142cb8e 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -909,7 +909,7 @@ static int veth_xdp_rcv(struct veth_rq *rq, int budget,
/* NAPI functions as RCU section */
peer_dev = rcu_dereference_check(priv->peer, rcu_read_lock_bh_held());
- peer_txq = netdev_get_tx_queue(peer_dev, queue_idx);
+ peer_txq = peer_dev ? netdev_get_tx_queue(peer_dev, queue_idx) : NULL;
for (i = 0; i < budget; i++) {
void *ptr = __ptr_ring_consume(&rq->xdp_ring);
@@ -959,7 +959,7 @@ static int veth_xdp_rcv(struct veth_rq *rq, int budget,
rq->stats.vs.xdp_packets += done;
u64_stats_update_end(&rq->stats.syncp);
- if (unlikely(netif_tx_queue_stopped(peer_txq)))
+ if (peer_txq && unlikely(netif_tx_queue_stopped(peer_txq)))
netif_tx_wake_queue(peer_txq);
return done;
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 8c7ffea0fa44..07fe05384cdf 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -4,6 +4,7 @@
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include "mac.h"
@@ -1022,6 +1023,26 @@ static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
return ar->last_wmi_vdev_start_status;
}
+static inline int ath10k_vdev_delete_sync(struct ath10k *ar)
+{
+ unsigned long time_left;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!test_bit(WMI_SERVICE_SYNC_DELETE_CMDS, ar->wmi.svc_map))
+ return 0;
+
+ if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
+ return -ESHUTDOWN;
+
+ time_left = wait_for_completion_timeout(&ar->vdev_delete_done,
+ ATH10K_VDEV_DELETE_TIMEOUT_HZ);
+ if (time_left == 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
{
struct cfg80211_chan_def *chandef = NULL;
@@ -5900,7 +5921,6 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = (void *)vif->drv_priv;
struct ath10k_peer *peer;
- unsigned long time_left;
int ret;
int i;
@@ -5940,13 +5960,10 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n",
arvif->vdev_id, ret);
- if (test_bit(WMI_SERVICE_SYNC_DELETE_CMDS, ar->wmi.svc_map)) {
- time_left = wait_for_completion_timeout(&ar->vdev_delete_done,
- ATH10K_VDEV_DELETE_TIMEOUT_HZ);
- if (time_left == 0) {
- ath10k_warn(ar, "Timeout in receiving vdev delete response\n");
- goto out;
- }
+ ret = ath10k_vdev_delete_sync(ar);
+ if (ret) {
+ ath10k_warn(ar, "Error in receiving vdev delete response: %d\n", ret);
+ goto out;
}
/* Some firmware revisions don't notify host about self-peer removal
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index b2bf9d72b92f..d51f2e5a79a4 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -938,7 +938,9 @@ static int ath10k_snoc_hif_start(struct ath10k *ar)
dev_set_threaded(ar->napi_dev, true);
ath10k_core_napi_enable(ar);
- ath10k_snoc_irq_enable(ar);
+ /* IRQs are left enabled when we restart due to a firmware crash */
+ if (!test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags))
+ ath10k_snoc_irq_enable(ar);
ath10k_snoc_rx_post(ar);
clear_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags);
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index 2e9f8a5e61e4..22a101136135 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -990,6 +990,7 @@ void ath11k_fw_stats_init(struct ath11k *ar)
INIT_LIST_HEAD(&ar->fw_stats.bcn);
init_completion(&ar->fw_stats_complete);
+ init_completion(&ar->fw_stats_done);
}
void ath11k_fw_stats_free(struct ath11k_fw_stats *stats)
@@ -2134,6 +2135,20 @@ int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab)
{
int ret;
+ switch (ath11k_crypto_mode) {
+ case ATH11K_CRYPT_MODE_SW:
+ set_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags);
+ set_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags);
+ break;
+ case ATH11K_CRYPT_MODE_HW:
+ clear_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags);
+ clear_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags);
+ break;
+ default:
+ ath11k_info(ab, "invalid crypto_mode: %d\n", ath11k_crypto_mode);
+ return -EINVAL;
+ }
+
ret = ath11k_core_start_firmware(ab, ab->fw_mode);
if (ret) {
ath11k_err(ab, "failed to start firmware: %d\n", ret);
@@ -2152,20 +2167,6 @@ int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab)
goto err_firmware_stop;
}
- switch (ath11k_crypto_mode) {
- case ATH11K_CRYPT_MODE_SW:
- set_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags);
- set_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags);
- break;
- case ATH11K_CRYPT_MODE_HW:
- clear_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags);
- clear_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags);
- break;
- default:
- ath11k_info(ab, "invalid crypto_mode: %d\n", ath11k_crypto_mode);
- return -EINVAL;
- }
-
if (ath11k_frame_mode == ATH11K_HW_TXRX_RAW)
set_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags);
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index 339d4fca1ed5..6b2f207975e3 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -600,6 +600,8 @@ struct ath11k_fw_stats {
struct list_head pdevs;
struct list_head vdevs;
struct list_head bcn;
+ u32 num_vdev_recvd;
+ u32 num_bcn_recvd;
};
struct ath11k_dbg_htt_stats {
@@ -784,7 +786,7 @@ struct ath11k {
u8 alpha2[REG_ALPHA2_LEN + 1];
struct ath11k_fw_stats fw_stats;
struct completion fw_stats_complete;
- bool fw_stats_done;
+ struct completion fw_stats_done;
/* protected by conf_mutex */
bool ps_state_enable;
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c
index bf192529e3fe..5d46f8e4c231 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/vmalloc.h>
@@ -93,57 +93,14 @@ void ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
spin_unlock_bh(&dbr_data->lock);
}
-static void ath11k_debugfs_fw_stats_reset(struct ath11k *ar)
-{
- spin_lock_bh(&ar->data_lock);
- ar->fw_stats_done = false;
- ath11k_fw_stats_pdevs_free(&ar->fw_stats.pdevs);
- ath11k_fw_stats_vdevs_free(&ar->fw_stats.vdevs);
- spin_unlock_bh(&ar->data_lock);
-}
-
void ath11k_debugfs_fw_stats_process(struct ath11k *ar, struct ath11k_fw_stats *stats)
{
struct ath11k_base *ab = ar->ab;
- struct ath11k_pdev *pdev;
- bool is_end;
- static unsigned int num_vdev, num_bcn;
- size_t total_vdevs_started = 0;
- int i;
-
- /* WMI_REQUEST_PDEV_STAT request has been already processed */
-
- if (stats->stats_id == WMI_REQUEST_RSSI_PER_CHAIN_STAT) {
- ar->fw_stats_done = true;
- return;
- }
-
- if (stats->stats_id == WMI_REQUEST_VDEV_STAT) {
- if (list_empty(&stats->vdevs)) {
- ath11k_warn(ab, "empty vdev stats");
- return;
- }
- /* FW sends all the active VDEV stats irrespective of PDEV,
- * hence limit until the count of all VDEVs started
- */
- for (i = 0; i < ab->num_radios; i++) {
- pdev = rcu_dereference(ab->pdevs_active[i]);
- if (pdev && pdev->ar)
- total_vdevs_started += ar->num_started_vdevs;
- }
-
- is_end = ((++num_vdev) == total_vdevs_started);
-
- list_splice_tail_init(&stats->vdevs,
- &ar->fw_stats.vdevs);
-
- if (is_end) {
- ar->fw_stats_done = true;
- num_vdev = 0;
- }
- return;
- }
+ bool is_end = true;
+ /* WMI_REQUEST_PDEV_STAT, WMI_REQUEST_RSSI_PER_CHAIN_STAT and
+ * WMI_REQUEST_VDEV_STAT requests have been already processed.
+ */
if (stats->stats_id == WMI_REQUEST_BCN_STAT) {
if (list_empty(&stats->bcn)) {
ath11k_warn(ab, "empty bcn stats");
@@ -152,97 +109,18 @@ void ath11k_debugfs_fw_stats_process(struct ath11k *ar, struct ath11k_fw_stats *
/* Mark end until we reached the count of all started VDEVs
* within the PDEV
*/
- is_end = ((++num_bcn) == ar->num_started_vdevs);
+ if (ar->num_started_vdevs)
+ is_end = ((++ar->fw_stats.num_bcn_recvd) ==
+ ar->num_started_vdevs);
list_splice_tail_init(&stats->bcn,
&ar->fw_stats.bcn);
- if (is_end) {
- ar->fw_stats_done = true;
- num_bcn = 0;
- }
+ if (is_end)
+ complete(&ar->fw_stats_done);
}
}
-static int ath11k_debugfs_fw_stats_request(struct ath11k *ar,
- struct stats_request_params *req_param)
-{
- struct ath11k_base *ab = ar->ab;
- unsigned long timeout, time_left;
- int ret;
-
- lockdep_assert_held(&ar->conf_mutex);
-
- /* FW stats can get split when exceeding the stats data buffer limit.
- * In that case, since there is no end marking for the back-to-back
- * received 'update stats' event, we keep a 3 seconds timeout in case,
- * fw_stats_done is not marked yet
- */
- timeout = jiffies + secs_to_jiffies(3);
-
- ath11k_debugfs_fw_stats_reset(ar);
-
- reinit_completion(&ar->fw_stats_complete);
-
- ret = ath11k_wmi_send_stats_request_cmd(ar, req_param);
-
- if (ret) {
- ath11k_warn(ab, "could not request fw stats (%d)\n",
- ret);
- return ret;
- }
-
- time_left = wait_for_completion_timeout(&ar->fw_stats_complete, 1 * HZ);
-
- if (!time_left)
- return -ETIMEDOUT;
-
- for (;;) {
- if (time_after(jiffies, timeout))
- break;
-
- spin_lock_bh(&ar->data_lock);
- if (ar->fw_stats_done) {
- spin_unlock_bh(&ar->data_lock);
- break;
- }
- spin_unlock_bh(&ar->data_lock);
- }
- return 0;
-}
-
-int ath11k_debugfs_get_fw_stats(struct ath11k *ar, u32 pdev_id,
- u32 vdev_id, u32 stats_id)
-{
- struct ath11k_base *ab = ar->ab;
- struct stats_request_params req_param;
- int ret;
-
- mutex_lock(&ar->conf_mutex);
-
- if (ar->state != ATH11K_STATE_ON) {
- ret = -ENETDOWN;
- goto err_unlock;
- }
-
- req_param.pdev_id = pdev_id;
- req_param.vdev_id = vdev_id;
- req_param.stats_id = stats_id;
-
- ret = ath11k_debugfs_fw_stats_request(ar, &req_param);
- if (ret)
- ath11k_warn(ab, "failed to request fw stats: %d\n", ret);
-
- ath11k_dbg(ab, ATH11K_DBG_WMI,
- "debug get fw stat pdev id %d vdev id %d stats id 0x%x\n",
- pdev_id, vdev_id, stats_id);
-
-err_unlock:
- mutex_unlock(&ar->conf_mutex);
-
- return ret;
-}
-
static int ath11k_open_pdev_stats(struct inode *inode, struct file *file)
{
struct ath11k *ar = inode->i_private;
@@ -268,7 +146,7 @@ static int ath11k_open_pdev_stats(struct inode *inode, struct file *file)
req_param.vdev_id = 0;
req_param.stats_id = WMI_REQUEST_PDEV_STAT;
- ret = ath11k_debugfs_fw_stats_request(ar, &req_param);
+ ret = ath11k_mac_fw_stats_request(ar, &req_param);
if (ret) {
ath11k_warn(ab, "failed to request fw pdev stats: %d\n", ret);
goto err_free;
@@ -339,7 +217,7 @@ static int ath11k_open_vdev_stats(struct inode *inode, struct file *file)
req_param.vdev_id = 0;
req_param.stats_id = WMI_REQUEST_VDEV_STAT;
- ret = ath11k_debugfs_fw_stats_request(ar, &req_param);
+ ret = ath11k_mac_fw_stats_request(ar, &req_param);
if (ret) {
ath11k_warn(ar->ab, "failed to request fw vdev stats: %d\n", ret);
goto err_free;
@@ -415,7 +293,7 @@ static int ath11k_open_bcn_stats(struct inode *inode, struct file *file)
continue;
req_param.vdev_id = arvif->vdev_id;
- ret = ath11k_debugfs_fw_stats_request(ar, &req_param);
+ ret = ath11k_mac_fw_stats_request(ar, &req_param);
if (ret) {
ath11k_warn(ar->ab, "failed to request fw bcn stats: %d\n", ret);
goto err_free;
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.h b/drivers/net/wireless/ath/ath11k/debugfs.h
index a39e458637b0..ed7fec177588 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.h
+++ b/drivers/net/wireless/ath/ath11k/debugfs.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ATH11K_DEBUGFS_H_
@@ -273,8 +273,6 @@ void ath11k_debugfs_unregister(struct ath11k *ar);
void ath11k_debugfs_fw_stats_process(struct ath11k *ar, struct ath11k_fw_stats *stats);
void ath11k_debugfs_fw_stats_init(struct ath11k *ar);
-int ath11k_debugfs_get_fw_stats(struct ath11k *ar, u32 pdev_id,
- u32 vdev_id, u32 stats_id);
static inline bool ath11k_debugfs_is_pktlog_lite_mode_enabled(struct ath11k *ar)
{
@@ -381,12 +379,6 @@ static inline int ath11k_debugfs_rx_filter(struct ath11k *ar)
return 0;
}
-static inline int ath11k_debugfs_get_fw_stats(struct ath11k *ar,
- u32 pdev_id, u32 vdev_id, u32 stats_id)
-{
- return 0;
-}
-
static inline void
ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
enum wmi_direct_buffer_module id,
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 08d7b136851f..13301ca317a5 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -8997,6 +8997,81 @@ static void ath11k_mac_put_chain_rssi(struct station_info *sinfo,
}
}
+static void ath11k_mac_fw_stats_reset(struct ath11k *ar)
+{
+ spin_lock_bh(&ar->data_lock);
+ ath11k_fw_stats_pdevs_free(&ar->fw_stats.pdevs);
+ ath11k_fw_stats_vdevs_free(&ar->fw_stats.vdevs);
+ ar->fw_stats.num_vdev_recvd = 0;
+ ar->fw_stats.num_bcn_recvd = 0;
+ spin_unlock_bh(&ar->data_lock);
+}
+
+int ath11k_mac_fw_stats_request(struct ath11k *ar,
+ struct stats_request_params *req_param)
+{
+ struct ath11k_base *ab = ar->ab;
+ unsigned long time_left;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath11k_mac_fw_stats_reset(ar);
+
+ reinit_completion(&ar->fw_stats_complete);
+ reinit_completion(&ar->fw_stats_done);
+
+ ret = ath11k_wmi_send_stats_request_cmd(ar, req_param);
+
+ if (ret) {
+ ath11k_warn(ab, "could not request fw stats (%d)\n",
+ ret);
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->fw_stats_complete, 1 * HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ /* FW stats can get split when exceeding the stats data buffer limit.
+ * In that case, since there is no end marking for the back-to-back
+ * received 'update stats' event, we keep a 3 seconds timeout in case,
+ * fw_stats_done is not marked yet
+ */
+ time_left = wait_for_completion_timeout(&ar->fw_stats_done, 3 * HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int ath11k_mac_get_fw_stats(struct ath11k *ar, u32 pdev_id,
+ u32 vdev_id, u32 stats_id)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct stats_request_params req_param;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON)
+ return -ENETDOWN;
+
+ req_param.pdev_id = pdev_id;
+ req_param.vdev_id = vdev_id;
+ req_param.stats_id = stats_id;
+
+ ret = ath11k_mac_fw_stats_request(ar, &req_param);
+ if (ret)
+ ath11k_warn(ab, "failed to request fw stats: %d\n", ret);
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "debug get fw stat pdev id %d vdev id %d stats id 0x%x\n",
+ pdev_id, vdev_id, stats_id);
+
+ return ret;
+}
+
static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -9031,11 +9106,12 @@ static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw,
ath11k_mac_put_chain_rssi(sinfo, arsta, "ppdu", false);
+ mutex_lock(&ar->conf_mutex);
if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL)) &&
arsta->arvif->vdev_type == WMI_VDEV_TYPE_STA &&
ar->ab->hw_params.supports_rssi_stats &&
- !ath11k_debugfs_get_fw_stats(ar, ar->pdev->pdev_id, 0,
- WMI_REQUEST_RSSI_PER_CHAIN_STAT)) {
+ !ath11k_mac_get_fw_stats(ar, ar->pdev->pdev_id, 0,
+ WMI_REQUEST_RSSI_PER_CHAIN_STAT)) {
ath11k_mac_put_chain_rssi(sinfo, arsta, "fw stats", true);
}
@@ -9043,9 +9119,10 @@ static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw,
if (!signal &&
arsta->arvif->vdev_type == WMI_VDEV_TYPE_STA &&
ar->ab->hw_params.supports_rssi_stats &&
- !(ath11k_debugfs_get_fw_stats(ar, ar->pdev->pdev_id, 0,
- WMI_REQUEST_VDEV_STAT)))
+ !(ath11k_mac_get_fw_stats(ar, ar->pdev->pdev_id, 0,
+ WMI_REQUEST_VDEV_STAT)))
signal = arsta->rssi_beacon;
+ mutex_unlock(&ar->conf_mutex);
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"sta statistics db2dbm %u rssi comb %d rssi beacon %d\n",
@@ -9380,38 +9457,6 @@ exit:
return ret;
}
-static int ath11k_fw_stats_request(struct ath11k *ar,
- struct stats_request_params *req_param)
-{
- struct ath11k_base *ab = ar->ab;
- unsigned long time_left;
- int ret;
-
- lockdep_assert_held(&ar->conf_mutex);
-
- spin_lock_bh(&ar->data_lock);
- ar->fw_stats_done = false;
- ath11k_fw_stats_pdevs_free(&ar->fw_stats.pdevs);
- spin_unlock_bh(&ar->data_lock);
-
- reinit_completion(&ar->fw_stats_complete);
-
- ret = ath11k_wmi_send_stats_request_cmd(ar, req_param);
- if (ret) {
- ath11k_warn(ab, "could not request fw stats (%d)\n",
- ret);
- return ret;
- }
-
- time_left = wait_for_completion_timeout(&ar->fw_stats_complete,
- 1 * HZ);
-
- if (!time_left)
- return -ETIMEDOUT;
-
- return 0;
-}
-
static int ath11k_mac_op_get_txpower(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
unsigned int link_id,
@@ -9419,7 +9464,6 @@ static int ath11k_mac_op_get_txpower(struct ieee80211_hw *hw,
{
struct ath11k *ar = hw->priv;
struct ath11k_base *ab = ar->ab;
- struct stats_request_params req_param = {0};
struct ath11k_fw_stats_pdev *pdev;
int ret;
@@ -9431,9 +9475,6 @@ static int ath11k_mac_op_get_txpower(struct ieee80211_hw *hw,
*/
mutex_lock(&ar->conf_mutex);
- if (ar->state != ATH11K_STATE_ON)
- goto err_fallback;
-
/* Firmware doesn't provide Tx power during CAC hence no need to fetch
* the stats.
*/
@@ -9442,10 +9483,8 @@ static int ath11k_mac_op_get_txpower(struct ieee80211_hw *hw,
return -EAGAIN;
}
- req_param.pdev_id = ar->pdev->pdev_id;
- req_param.stats_id = WMI_REQUEST_PDEV_STAT;
-
- ret = ath11k_fw_stats_request(ar, &req_param);
+ ret = ath11k_mac_get_fw_stats(ar, ar->pdev->pdev_id, 0,
+ WMI_REQUEST_PDEV_STAT);
if (ret) {
ath11k_warn(ab, "failed to request fw pdev stats: %d\n", ret);
goto err_fallback;
diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h
index f5800fbecff8..5e61eea1bb03 100644
--- a/drivers/net/wireless/ath/ath11k/mac.h
+++ b/drivers/net/wireless/ath/ath11k/mac.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023, 2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_MAC_H
@@ -179,4 +179,6 @@ int ath11k_mac_vif_set_keepalive(struct ath11k_vif *arvif,
void ath11k_mac_fill_reg_tpc_info(struct ath11k *ar,
struct ieee80211_vif *vif,
struct ieee80211_chanctx_conf *ctx);
+int ath11k_mac_fw_stats_request(struct ath11k *ar,
+ struct stats_request_params *req_param);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index d7f852bebf4a..56af2e9634f4 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -8158,6 +8158,11 @@ static void ath11k_peer_assoc_conf_event(struct ath11k_base *ab, struct sk_buff
static void ath11k_update_stats_event(struct ath11k_base *ab, struct sk_buff *skb)
{
struct ath11k_fw_stats stats = {};
+ size_t total_vdevs_started = 0;
+ struct ath11k_pdev *pdev;
+ bool is_end = true;
+ int i;
+
struct ath11k *ar;
int ret;
@@ -8184,25 +8189,57 @@ static void ath11k_update_stats_event(struct ath11k_base *ab, struct sk_buff *sk
spin_lock_bh(&ar->data_lock);
- /* WMI_REQUEST_PDEV_STAT can be requested via .get_txpower mac ops or via
+ /* WMI_REQUEST_PDEV_STAT, WMI_REQUEST_VDEV_STAT and
+ * WMI_REQUEST_RSSI_PER_CHAIN_STAT can be requested via mac ops or via
* debugfs fw stats. Therefore, processing it separately.
*/
if (stats.stats_id == WMI_REQUEST_PDEV_STAT) {
list_splice_tail_init(&stats.pdevs, &ar->fw_stats.pdevs);
- ar->fw_stats_done = true;
+ complete(&ar->fw_stats_done);
+ goto complete;
+ }
+
+ if (stats.stats_id == WMI_REQUEST_RSSI_PER_CHAIN_STAT) {
+ complete(&ar->fw_stats_done);
goto complete;
}
- /* WMI_REQUEST_VDEV_STAT, WMI_REQUEST_BCN_STAT and WMI_REQUEST_RSSI_PER_CHAIN_STAT
- * are currently requested only via debugfs fw stats. Hence, processing these
- * in debugfs context
+ if (stats.stats_id == WMI_REQUEST_VDEV_STAT) {
+ if (list_empty(&stats.vdevs)) {
+ ath11k_warn(ab, "empty vdev stats");
+ goto complete;
+ }
+ /* FW sends all the active VDEV stats irrespective of PDEV,
+ * hence limit until the count of all VDEVs started
+ */
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+ if (pdev && pdev->ar)
+ total_vdevs_started += ar->num_started_vdevs;
+ }
+
+ if (total_vdevs_started)
+ is_end = ((++ar->fw_stats.num_vdev_recvd) ==
+ total_vdevs_started);
+
+ list_splice_tail_init(&stats.vdevs,
+ &ar->fw_stats.vdevs);
+
+ if (is_end)
+ complete(&ar->fw_stats_done);
+
+ goto complete;
+ }
+
+ /* WMI_REQUEST_BCN_STAT is currently requested only via debugfs fw stats.
+ * Hence, processing it in debugfs context
*/
ath11k_debugfs_fw_stats_process(ar, &stats);
complete:
complete(&ar->fw_stats_complete);
- rcu_read_unlock();
spin_unlock_bh(&ar->data_lock);
+ rcu_read_unlock();
/* Since the stats's pdev, vdev and beacon list are spliced and reinitialised
* at this point, no need to free the individual list.
diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c
index 31d851d8e688..89ae80934b30 100644
--- a/drivers/net/wireless/ath/ath12k/core.c
+++ b/drivers/net/wireless/ath/ath12k/core.c
@@ -1216,6 +1216,7 @@ void ath12k_fw_stats_init(struct ath12k *ar)
INIT_LIST_HEAD(&ar->fw_stats.pdevs);
INIT_LIST_HEAD(&ar->fw_stats.bcn);
init_completion(&ar->fw_stats_complete);
+ init_completion(&ar->fw_stats_done);
}
void ath12k_fw_stats_free(struct ath12k_fw_stats *stats)
@@ -1228,8 +1229,9 @@ void ath12k_fw_stats_free(struct ath12k_fw_stats *stats)
void ath12k_fw_stats_reset(struct ath12k *ar)
{
spin_lock_bh(&ar->data_lock);
- ar->fw_stats.fw_stats_done = false;
ath12k_fw_stats_free(&ar->fw_stats);
+ ar->fw_stats.num_vdev_recvd = 0;
+ ar->fw_stats.num_bcn_recvd = 0;
spin_unlock_bh(&ar->data_lock);
}
@@ -2129,7 +2131,8 @@ int ath12k_core_init(struct ath12k_base *ab)
if (!ag) {
mutex_unlock(&ath12k_hw_group_mutex);
ath12k_warn(ab, "unable to get hw group\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_unregister_notifier;
}
mutex_unlock(&ath12k_hw_group_mutex);
@@ -2144,7 +2147,7 @@ int ath12k_core_init(struct ath12k_base *ab)
if (ret) {
mutex_unlock(&ag->mutex);
ath12k_warn(ab, "unable to create hw group\n");
- goto err;
+ goto err_destroy_hw_group;
}
}
@@ -2152,9 +2155,12 @@ int ath12k_core_init(struct ath12k_base *ab)
return 0;
-err:
+err_destroy_hw_group:
ath12k_core_hw_group_destroy(ab->ag);
ath12k_core_hw_group_unassign(ab);
+err_unregister_notifier:
+ ath12k_core_panic_notifier_unregister(ab);
+
return ret;
}
diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h
index 941db6e49d6e..7bcd9c70309f 100644
--- a/drivers/net/wireless/ath/ath12k/core.h
+++ b/drivers/net/wireless/ath/ath12k/core.h
@@ -601,6 +601,12 @@ struct ath12k_sta {
#define ATH12K_NUM_CHANS 101
#define ATH12K_MAX_5GHZ_CHAN 173
+static inline bool ath12k_is_2ghz_channel_freq(u32 freq)
+{
+ return freq >= ATH12K_MIN_2GHZ_FREQ &&
+ freq <= ATH12K_MAX_2GHZ_FREQ;
+}
+
enum ath12k_hw_state {
ATH12K_HW_STATE_OFF,
ATH12K_HW_STATE_ON,
@@ -626,7 +632,8 @@ struct ath12k_fw_stats {
struct list_head pdevs;
struct list_head vdevs;
struct list_head bcn;
- bool fw_stats_done;
+ u32 num_vdev_recvd;
+ u32 num_bcn_recvd;
};
struct ath12k_dbg_htt_stats {
@@ -806,6 +813,7 @@ struct ath12k {
bool regdom_set_by_user;
struct completion fw_stats_complete;
+ struct completion fw_stats_done;
struct completion mlo_setup_done;
u32 mlo_setup_status;
diff --git a/drivers/net/wireless/ath/ath12k/debugfs.c b/drivers/net/wireless/ath/ath12k/debugfs.c
index dd624d73b8b2..23da93afaa5c 100644
--- a/drivers/net/wireless/ath/ath12k/debugfs.c
+++ b/drivers/net/wireless/ath/ath12k/debugfs.c
@@ -1251,64 +1251,6 @@ void ath12k_debugfs_soc_destroy(struct ath12k_base *ab)
*/
}
-void
-ath12k_debugfs_fw_stats_process(struct ath12k *ar,
- struct ath12k_fw_stats *stats)
-{
- struct ath12k_base *ab = ar->ab;
- struct ath12k_pdev *pdev;
- bool is_end;
- static unsigned int num_vdev, num_bcn;
- size_t total_vdevs_started = 0;
- int i;
-
- if (stats->stats_id == WMI_REQUEST_VDEV_STAT) {
- if (list_empty(&stats->vdevs)) {
- ath12k_warn(ab, "empty vdev stats");
- return;
- }
- /* FW sends all the active VDEV stats irrespective of PDEV,
- * hence limit until the count of all VDEVs started
- */
- rcu_read_lock();
- for (i = 0; i < ab->num_radios; i++) {
- pdev = rcu_dereference(ab->pdevs_active[i]);
- if (pdev && pdev->ar)
- total_vdevs_started += pdev->ar->num_started_vdevs;
- }
- rcu_read_unlock();
-
- is_end = ((++num_vdev) == total_vdevs_started);
-
- list_splice_tail_init(&stats->vdevs,
- &ar->fw_stats.vdevs);
-
- if (is_end) {
- ar->fw_stats.fw_stats_done = true;
- num_vdev = 0;
- }
- return;
- }
- if (stats->stats_id == WMI_REQUEST_BCN_STAT) {
- if (list_empty(&stats->bcn)) {
- ath12k_warn(ab, "empty beacon stats");
- return;
- }
- /* Mark end until we reached the count of all started VDEVs
- * within the PDEV
- */
- is_end = ((++num_bcn) == ar->num_started_vdevs);
-
- list_splice_tail_init(&stats->bcn,
- &ar->fw_stats.bcn);
-
- if (is_end) {
- ar->fw_stats.fw_stats_done = true;
- num_bcn = 0;
- }
- }
-}
-
static int ath12k_open_vdev_stats(struct inode *inode, struct file *file)
{
struct ath12k *ar = inode->i_private;
diff --git a/drivers/net/wireless/ath/ath12k/debugfs.h b/drivers/net/wireless/ath/ath12k/debugfs.h
index ebef7dace344..21641a8a0346 100644
--- a/drivers/net/wireless/ath/ath12k/debugfs.h
+++ b/drivers/net/wireless/ath/ath12k/debugfs.h
@@ -12,8 +12,6 @@ void ath12k_debugfs_soc_create(struct ath12k_base *ab);
void ath12k_debugfs_soc_destroy(struct ath12k_base *ab);
void ath12k_debugfs_register(struct ath12k *ar);
void ath12k_debugfs_unregister(struct ath12k *ar);
-void ath12k_debugfs_fw_stats_process(struct ath12k *ar,
- struct ath12k_fw_stats *stats);
void ath12k_debugfs_op_vif_add(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
void ath12k_debugfs_pdev_create(struct ath12k_base *ab);
@@ -126,11 +124,6 @@ static inline void ath12k_debugfs_unregister(struct ath12k *ar)
{
}
-static inline void ath12k_debugfs_fw_stats_process(struct ath12k *ar,
- struct ath12k_fw_stats *stats)
-{
-}
-
static inline bool ath12k_debugfs_is_extd_rx_stats_enabled(struct ath12k *ar)
{
return false;
diff --git a/drivers/net/wireless/ath/ath12k/hal.h b/drivers/net/wireless/ath/ath12k/hal.h
index 0ee9c6b26dab..c1750b5dc03c 100644
--- a/drivers/net/wireless/ath/ath12k/hal.h
+++ b/drivers/net/wireless/ath/ath12k/hal.h
@@ -585,7 +585,8 @@ enum hal_reo_cmd_type {
* or cache was blocked
* @HAL_REO_CMD_FAILED: Command execution failed, could be due to
* invalid queue desc
- * @HAL_REO_CMD_RESOURCE_BLOCKED:
+ * @HAL_REO_CMD_RESOURCE_BLOCKED: Command could not be executed because
+ * one or more descriptors were blocked
* @HAL_REO_CMD_DRAIN:
*/
enum hal_reo_cmd_status {
diff --git a/drivers/net/wireless/ath/ath12k/hw.c b/drivers/net/wireless/ath/ath12k/hw.c
index 7e2cf0fb2085..8254dc10b53b 100644
--- a/drivers/net/wireless/ath/ath12k/hw.c
+++ b/drivers/net/wireless/ath/ath12k/hw.c
@@ -951,6 +951,8 @@ static const struct ath12k_hw_regs qcn9274_v1_regs = {
.hal_umac_ce0_dest_reg_base = 0x01b81000,
.hal_umac_ce1_src_reg_base = 0x01b82000,
.hal_umac_ce1_dest_reg_base = 0x01b83000,
+
+ .gcc_gcc_pcie_hot_rst = 0x1e38338,
};
static const struct ath12k_hw_regs qcn9274_v2_regs = {
@@ -1042,6 +1044,8 @@ static const struct ath12k_hw_regs qcn9274_v2_regs = {
.hal_umac_ce0_dest_reg_base = 0x01b81000,
.hal_umac_ce1_src_reg_base = 0x01b82000,
.hal_umac_ce1_dest_reg_base = 0x01b83000,
+
+ .gcc_gcc_pcie_hot_rst = 0x1e38338,
};
static const struct ath12k_hw_regs ipq5332_regs = {
@@ -1215,6 +1219,8 @@ static const struct ath12k_hw_regs wcn7850_regs = {
.hal_umac_ce0_dest_reg_base = 0x01b81000,
.hal_umac_ce1_src_reg_base = 0x01b82000,
.hal_umac_ce1_dest_reg_base = 0x01b83000,
+
+ .gcc_gcc_pcie_hot_rst = 0x1e40304,
};
static const struct ath12k_hw_hal_params ath12k_hw_hal_params_qcn9274 = {
diff --git a/drivers/net/wireless/ath/ath12k/hw.h b/drivers/net/wireless/ath/ath12k/hw.h
index 0fbc17649df4..0a75bc5abfa2 100644
--- a/drivers/net/wireless/ath/ath12k/hw.h
+++ b/drivers/net/wireless/ath/ath12k/hw.h
@@ -375,6 +375,8 @@ struct ath12k_hw_regs {
u32 hal_reo_cmd_ring_base;
u32 hal_reo_status_ring_base;
+
+ u32 gcc_gcc_pcie_hot_rst;
};
static inline const char *ath12k_bd_ie_type_str(enum ath12k_bd_ie_type type)
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
index 88b59f3ff87a..59ec422992d3 100644
--- a/drivers/net/wireless/ath/ath12k/mac.c
+++ b/drivers/net/wireless/ath/ath12k/mac.c
@@ -4360,7 +4360,7 @@ int ath12k_mac_get_fw_stats(struct ath12k *ar,
{
struct ath12k_base *ab = ar->ab;
struct ath12k_hw *ah = ath12k_ar_to_ah(ar);
- unsigned long timeout, time_left;
+ unsigned long time_left;
int ret;
guard(mutex)(&ah->hw_mutex);
@@ -4368,19 +4368,13 @@ int ath12k_mac_get_fw_stats(struct ath12k *ar,
if (ah->state != ATH12K_HW_STATE_ON)
return -ENETDOWN;
- /* FW stats can get split when exceeding the stats data buffer limit.
- * In that case, since there is no end marking for the back-to-back
- * received 'update stats' event, we keep a 3 seconds timeout in case,
- * fw_stats_done is not marked yet
- */
- timeout = jiffies + msecs_to_jiffies(3 * 1000);
ath12k_fw_stats_reset(ar);
reinit_completion(&ar->fw_stats_complete);
+ reinit_completion(&ar->fw_stats_done);
ret = ath12k_wmi_send_stats_request_cmd(ar, param->stats_id,
param->vdev_id, param->pdev_id);
-
if (ret) {
ath12k_warn(ab, "failed to request fw stats: %d\n", ret);
return ret;
@@ -4391,7 +4385,6 @@ int ath12k_mac_get_fw_stats(struct ath12k *ar,
param->pdev_id, param->vdev_id, param->stats_id);
time_left = wait_for_completion_timeout(&ar->fw_stats_complete, 1 * HZ);
-
if (!time_left) {
ath12k_warn(ab, "time out while waiting for get fw stats\n");
return -ETIMEDOUT;
@@ -4400,20 +4393,15 @@ int ath12k_mac_get_fw_stats(struct ath12k *ar,
/* Firmware sends WMI_UPDATE_STATS_EVENTID back-to-back
* when stats data buffer limit is reached. fw_stats_complete
* is completed once host receives first event from firmware, but
- * still end might not be marked in the TLV.
- * Below loop is to confirm that firmware completed sending all the event
- * and fw_stats_done is marked true when end is marked in the TLV.
+ * still there could be more events following. Below is to wait
+ * until firmware completes sending all the events.
*/
- for (;;) {
- if (time_after(jiffies, timeout))
- break;
- spin_lock_bh(&ar->data_lock);
- if (ar->fw_stats.fw_stats_done) {
- spin_unlock_bh(&ar->data_lock);
- break;
- }
- spin_unlock_bh(&ar->data_lock);
+ time_left = wait_for_completion_timeout(&ar->fw_stats_done, 3 * HZ);
+ if (!time_left) {
+ ath12k_warn(ab, "time out while waiting for fw stats done\n");
+ return -ETIMEDOUT;
}
+
return 0;
}
@@ -5890,6 +5878,327 @@ exit:
return ret;
}
+static bool ath12k_mac_is_freq_on_mac(struct ath12k_hw_mode_freq_range_arg *freq_range,
+ u32 freq, u8 mac_id)
+{
+ return (freq >= freq_range[mac_id].low_2ghz_freq &&
+ freq <= freq_range[mac_id].high_2ghz_freq) ||
+ (freq >= freq_range[mac_id].low_5ghz_freq &&
+ freq <= freq_range[mac_id].high_5ghz_freq);
+}
+
+static bool
+ath12k_mac_2_freq_same_mac_in_freq_range(struct ath12k_base *ab,
+ struct ath12k_hw_mode_freq_range_arg *freq_range,
+ u32 freq_link1, u32 freq_link2)
+{
+ u8 i;
+
+ for (i = 0; i < MAX_RADIOS; i++) {
+ if (ath12k_mac_is_freq_on_mac(freq_range, freq_link1, i) &&
+ ath12k_mac_is_freq_on_mac(freq_range, freq_link2, i))
+ return true;
+ }
+
+ return false;
+}
+
+static bool ath12k_mac_is_hw_dbs_capable(struct ath12k_base *ab)
+{
+ return test_bit(WMI_TLV_SERVICE_DUAL_BAND_SIMULTANEOUS_SUPPORT,
+ ab->wmi_ab.svc_map) &&
+ ab->wmi_ab.hw_mode_info.support_dbs;
+}
+
+static bool ath12k_mac_2_freq_same_mac_in_dbs(struct ath12k_base *ab,
+ u32 freq_link1, u32 freq_link2)
+{
+ struct ath12k_hw_mode_freq_range_arg *freq_range;
+
+ if (!ath12k_mac_is_hw_dbs_capable(ab))
+ return true;
+
+ freq_range = ab->wmi_ab.hw_mode_info.freq_range_caps[ATH12K_HW_MODE_DBS];
+ return ath12k_mac_2_freq_same_mac_in_freq_range(ab, freq_range,
+ freq_link1, freq_link2);
+}
+
+static bool ath12k_mac_is_hw_sbs_capable(struct ath12k_base *ab)
+{
+ return test_bit(WMI_TLV_SERVICE_DUAL_BAND_SIMULTANEOUS_SUPPORT,
+ ab->wmi_ab.svc_map) &&
+ ab->wmi_ab.hw_mode_info.support_sbs;
+}
+
+static bool ath12k_mac_2_freq_same_mac_in_sbs(struct ath12k_base *ab,
+ u32 freq_link1, u32 freq_link2)
+{
+ struct ath12k_hw_mode_info *info = &ab->wmi_ab.hw_mode_info;
+ struct ath12k_hw_mode_freq_range_arg *sbs_uppr_share;
+ struct ath12k_hw_mode_freq_range_arg *sbs_low_share;
+ struct ath12k_hw_mode_freq_range_arg *sbs_range;
+
+ if (!ath12k_mac_is_hw_sbs_capable(ab))
+ return true;
+
+ if (ab->wmi_ab.sbs_lower_band_end_freq) {
+ sbs_uppr_share = info->freq_range_caps[ATH12K_HW_MODE_SBS_UPPER_SHARE];
+ sbs_low_share = info->freq_range_caps[ATH12K_HW_MODE_SBS_LOWER_SHARE];
+
+ return ath12k_mac_2_freq_same_mac_in_freq_range(ab, sbs_low_share,
+ freq_link1, freq_link2) ||
+ ath12k_mac_2_freq_same_mac_in_freq_range(ab, sbs_uppr_share,
+ freq_link1, freq_link2);
+ }
+
+ sbs_range = info->freq_range_caps[ATH12K_HW_MODE_SBS];
+ return ath12k_mac_2_freq_same_mac_in_freq_range(ab, sbs_range,
+ freq_link1, freq_link2);
+}
+
+static bool ath12k_mac_freqs_on_same_mac(struct ath12k_base *ab,
+ u32 freq_link1, u32 freq_link2)
+{
+ return ath12k_mac_2_freq_same_mac_in_dbs(ab, freq_link1, freq_link2) &&
+ ath12k_mac_2_freq_same_mac_in_sbs(ab, freq_link1, freq_link2);
+}
+
+static int ath12k_mac_mlo_sta_set_link_active(struct ath12k_base *ab,
+ enum wmi_mlo_link_force_reason reason,
+ enum wmi_mlo_link_force_mode mode,
+ u8 *mlo_vdev_id_lst,
+ u8 num_mlo_vdev,
+ u8 *mlo_inactive_vdev_lst,
+ u8 num_mlo_inactive_vdev)
+{
+ struct wmi_mlo_link_set_active_arg param = {0};
+ u32 entry_idx, entry_offset, vdev_idx;
+ u8 vdev_id;
+
+ param.reason = reason;
+ param.force_mode = mode;
+
+ for (vdev_idx = 0; vdev_idx < num_mlo_vdev; vdev_idx++) {
+ vdev_id = mlo_vdev_id_lst[vdev_idx];
+ entry_idx = vdev_id / 32;
+ entry_offset = vdev_id % 32;
+ if (entry_idx >= WMI_MLO_LINK_NUM_SZ) {
+ ath12k_warn(ab, "Invalid entry_idx %d num_mlo_vdev %d vdev %d",
+ entry_idx, num_mlo_vdev, vdev_id);
+ return -EINVAL;
+ }
+ param.vdev_bitmap[entry_idx] |= 1 << entry_offset;
+ /* update entry number if entry index changed */
+ if (param.num_vdev_bitmap < entry_idx + 1)
+ param.num_vdev_bitmap = entry_idx + 1;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC,
+ "num_vdev_bitmap %d vdev_bitmap[0] = 0x%x, vdev_bitmap[1] = 0x%x",
+ param.num_vdev_bitmap, param.vdev_bitmap[0], param.vdev_bitmap[1]);
+
+ if (mode == WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE) {
+ for (vdev_idx = 0; vdev_idx < num_mlo_inactive_vdev; vdev_idx++) {
+ vdev_id = mlo_inactive_vdev_lst[vdev_idx];
+ entry_idx = vdev_id / 32;
+ entry_offset = vdev_id % 32;
+ if (entry_idx >= WMI_MLO_LINK_NUM_SZ) {
+ ath12k_warn(ab, "Invalid entry_idx %d num_mlo_vdev %d vdev %d",
+ entry_idx, num_mlo_inactive_vdev, vdev_id);
+ return -EINVAL;
+ }
+ param.inactive_vdev_bitmap[entry_idx] |= 1 << entry_offset;
+ /* update entry number if entry index changed */
+ if (param.num_inactive_vdev_bitmap < entry_idx + 1)
+ param.num_inactive_vdev_bitmap = entry_idx + 1;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC,
+ "num_vdev_bitmap %d inactive_vdev_bitmap[0] = 0x%x, inactive_vdev_bitmap[1] = 0x%x",
+ param.num_inactive_vdev_bitmap,
+ param.inactive_vdev_bitmap[0],
+ param.inactive_vdev_bitmap[1]);
+ }
+
+ if (mode == WMI_MLO_LINK_FORCE_MODE_ACTIVE_LINK_NUM ||
+ mode == WMI_MLO_LINK_FORCE_MODE_INACTIVE_LINK_NUM) {
+ param.num_link_entry = 1;
+ param.link_num[0].num_of_link = num_mlo_vdev - 1;
+ }
+
+ return ath12k_wmi_send_mlo_link_set_active_cmd(ab, &param);
+}
+
+static int ath12k_mac_mlo_sta_update_link_active(struct ath12k_base *ab,
+ struct ieee80211_hw *hw,
+ struct ath12k_vif *ahvif)
+{
+ u8 mlo_vdev_id_lst[IEEE80211_MLD_MAX_NUM_LINKS] = {0};
+ u32 mlo_freq_list[IEEE80211_MLD_MAX_NUM_LINKS] = {0};
+ unsigned long links = ahvif->links_map;
+ enum wmi_mlo_link_force_reason reason;
+ struct ieee80211_chanctx_conf *conf;
+ enum wmi_mlo_link_force_mode mode;
+ struct ieee80211_bss_conf *info;
+ struct ath12k_link_vif *arvif;
+ u8 num_mlo_vdev = 0;
+ u8 link_id;
+
+ for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+ /* make sure vdev is created on this device */
+ if (!arvif || !arvif->is_created || arvif->ar->ab != ab)
+ continue;
+
+ info = ath12k_mac_get_link_bss_conf(arvif);
+ conf = wiphy_dereference(hw->wiphy, info->chanctx_conf);
+ mlo_freq_list[num_mlo_vdev] = conf->def.chan->center_freq;
+
+ mlo_vdev_id_lst[num_mlo_vdev] = arvif->vdev_id;
+ num_mlo_vdev++;
+ }
+
+ /* It is not allowed to activate more links than a single device
+ * supported. Something goes wrong if we reach here.
+ */
+ if (num_mlo_vdev > ATH12K_NUM_MAX_ACTIVE_LINKS_PER_DEVICE) {
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ /* if 2 links are established and both link channels fall on the
+ * same hardware MAC, send command to firmware to deactivate one
+ * of them.
+ */
+ if (num_mlo_vdev == 2 &&
+ ath12k_mac_freqs_on_same_mac(ab, mlo_freq_list[0],
+ mlo_freq_list[1])) {
+ mode = WMI_MLO_LINK_FORCE_MODE_INACTIVE_LINK_NUM;
+ reason = WMI_MLO_LINK_FORCE_REASON_NEW_CONNECT;
+ return ath12k_mac_mlo_sta_set_link_active(ab, reason, mode,
+ mlo_vdev_id_lst, num_mlo_vdev,
+ NULL, 0);
+ }
+
+ return 0;
+}
+
+static bool ath12k_mac_are_sbs_chan(struct ath12k_base *ab, u32 freq_1, u32 freq_2)
+{
+ if (!ath12k_mac_is_hw_sbs_capable(ab))
+ return false;
+
+ if (ath12k_is_2ghz_channel_freq(freq_1) ||
+ ath12k_is_2ghz_channel_freq(freq_2))
+ return false;
+
+ return !ath12k_mac_2_freq_same_mac_in_sbs(ab, freq_1, freq_2);
+}
+
+static bool ath12k_mac_are_dbs_chan(struct ath12k_base *ab, u32 freq_1, u32 freq_2)
+{
+ if (!ath12k_mac_is_hw_dbs_capable(ab))
+ return false;
+
+ return !ath12k_mac_2_freq_same_mac_in_dbs(ab, freq_1, freq_2);
+}
+
+static int ath12k_mac_select_links(struct ath12k_base *ab,
+ struct ieee80211_vif *vif,
+ struct ieee80211_hw *hw,
+ u16 *selected_links)
+{
+ unsigned long useful_links = ieee80211_vif_usable_links(vif);
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ u8 num_useful_links = hweight_long(useful_links);
+ struct ieee80211_chanctx_conf *chanctx;
+ struct ath12k_link_vif *assoc_arvif;
+ u32 assoc_link_freq, partner_freq;
+ u16 sbs_links = 0, dbs_links = 0;
+ struct ieee80211_bss_conf *info;
+ struct ieee80211_channel *chan;
+ struct ieee80211_sta *sta;
+ struct ath12k_sta *ahsta;
+ u8 link_id;
+
+ /* activate all useful links if less than max supported */
+ if (num_useful_links <= ATH12K_NUM_MAX_ACTIVE_LINKS_PER_DEVICE) {
+ *selected_links = useful_links;
+ return 0;
+ }
+
+ /* only in station mode we can get here, so it's safe
+ * to use ap_addr
+ */
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, vif->cfg.ap_addr);
+ if (!sta) {
+ rcu_read_unlock();
+ ath12k_warn(ab, "failed to find sta with addr %pM\n", vif->cfg.ap_addr);
+ return -EINVAL;
+ }
+
+ ahsta = ath12k_sta_to_ahsta(sta);
+ assoc_arvif = wiphy_dereference(hw->wiphy, ahvif->link[ahsta->assoc_link_id]);
+ info = ath12k_mac_get_link_bss_conf(assoc_arvif);
+ chanctx = rcu_dereference(info->chanctx_conf);
+ assoc_link_freq = chanctx->def.chan->center_freq;
+ rcu_read_unlock();
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "assoc link %u freq %u\n",
+ assoc_arvif->link_id, assoc_link_freq);
+
+ /* assoc link is already activated and has to be kept active,
+ * only need to select a partner link from others.
+ */
+ useful_links &= ~BIT(assoc_arvif->link_id);
+ for_each_set_bit(link_id, &useful_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ info = wiphy_dereference(hw->wiphy, vif->link_conf[link_id]);
+ if (!info) {
+ ath12k_warn(ab, "failed to get link info for link: %u\n",
+ link_id);
+ return -ENOLINK;
+ }
+
+ chan = info->chanreq.oper.chan;
+ if (!chan) {
+ ath12k_warn(ab, "failed to get chan for link: %u\n", link_id);
+ return -EINVAL;
+ }
+
+ partner_freq = chan->center_freq;
+ if (ath12k_mac_are_sbs_chan(ab, assoc_link_freq, partner_freq)) {
+ sbs_links |= BIT(link_id);
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "new SBS link %u freq %u\n",
+ link_id, partner_freq);
+ continue;
+ }
+
+ if (ath12k_mac_are_dbs_chan(ab, assoc_link_freq, partner_freq)) {
+ dbs_links |= BIT(link_id);
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "new DBS link %u freq %u\n",
+ link_id, partner_freq);
+ continue;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "non DBS/SBS link %u freq %u\n",
+ link_id, partner_freq);
+ }
+
+ /* choose the first candidate no matter how many is in the list */
+ if (sbs_links)
+ link_id = __ffs(sbs_links);
+ else if (dbs_links)
+ link_id = __ffs(dbs_links);
+ else
+ link_id = ffs(useful_links) - 1;
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "select partner link %u\n", link_id);
+
+ *selected_links = BIT(assoc_arvif->link_id) | BIT(link_id);
+
+ return 0;
+}
+
static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -5899,10 +6208,13 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k_base *prev_ab = NULL, *ab;
struct ath12k_link_vif *arvif;
struct ath12k_link_sta *arsta;
unsigned long valid_links;
- u8 link_id = 0;
+ u16 selected_links = 0;
+ u8 link_id = 0, i;
+ struct ath12k *ar;
int ret;
lockdep_assert_wiphy(hw->wiphy);
@@ -5972,8 +6284,24 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
* about to move to the associated state.
*/
if (ieee80211_vif_is_mld(vif) && vif->type == NL80211_IFTYPE_STATION &&
- old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_ASSOC)
- ieee80211_set_active_links(vif, ieee80211_vif_usable_links(vif));
+ old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_ASSOC) {
+ /* TODO: for now only do link selection for single device
+ * MLO case. Other cases would be handled in the future.
+ */
+ ab = ah->radio[0].ab;
+ if (ab->ag->num_devices == 1) {
+ ret = ath12k_mac_select_links(ab, vif, hw, &selected_links);
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to get selected links: %d\n", ret);
+ goto exit;
+ }
+ } else {
+ selected_links = ieee80211_vif_usable_links(vif);
+ }
+
+ ieee80211_set_active_links(vif, selected_links);
+ }
/* Handle all the other state transitions in generic way */
valid_links = ahsta->links_map;
@@ -5997,6 +6325,24 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
}
}
+ if (ieee80211_vif_is_mld(vif) && vif->type == NL80211_IFTYPE_STATION &&
+ old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTHORIZED) {
+ for_each_ar(ah, ar, i) {
+ ab = ar->ab;
+ if (prev_ab == ab)
+ continue;
+
+ ret = ath12k_mac_mlo_sta_update_link_active(ab, hw, ahvif);
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to update link active state on connect %d\n",
+ ret);
+ goto exit;
+ }
+
+ prev_ab = ab;
+ }
+ }
/* IEEE80211_STA_NONE -> IEEE80211_STA_NOTEXIST:
* Remove the station from driver (handle ML sta here since that
* needs special handling. Normal sta will be handled in generic
diff --git a/drivers/net/wireless/ath/ath12k/mac.h b/drivers/net/wireless/ath/ath12k/mac.h
index e6e74b45bfa4..cc81b1f5680f 100644
--- a/drivers/net/wireless/ath/ath12k/mac.h
+++ b/drivers/net/wireless/ath/ath12k/mac.h
@@ -54,6 +54,8 @@ struct ath12k_generic_iter {
#define ATH12K_DEFAULT_SCAN_LINK IEEE80211_MLD_MAX_NUM_LINKS
#define ATH12K_NUM_MAX_LINKS (IEEE80211_MLD_MAX_NUM_LINKS + 1)
+#define ATH12K_NUM_MAX_ACTIVE_LINKS_PER_DEVICE 2
+
enum ath12k_supported_bw {
ATH12K_BW_20 = 0,
ATH12K_BW_40 = 1,
diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c
index 489d546390fc..1f3cfd9b89fd 100644
--- a/drivers/net/wireless/ath/ath12k/pci.c
+++ b/drivers/net/wireless/ath/ath12k/pci.c
@@ -292,10 +292,10 @@ static void ath12k_pci_enable_ltssm(struct ath12k_base *ab)
ath12k_dbg(ab, ATH12K_DBG_PCI, "pci ltssm 0x%x\n", val);
- val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST);
+ val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST(ab));
val |= GCC_GCC_PCIE_HOT_RST_VAL;
- ath12k_pci_write32(ab, GCC_GCC_PCIE_HOT_RST, val);
- val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST);
+ ath12k_pci_write32(ab, GCC_GCC_PCIE_HOT_RST(ab), val);
+ val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST(ab));
ath12k_dbg(ab, ATH12K_DBG_PCI, "pci pcie_hot_rst 0x%x\n", val);
diff --git a/drivers/net/wireless/ath/ath12k/pci.h b/drivers/net/wireless/ath/ath12k/pci.h
index 0b4c459d6d8e..d1ec8aad7f6c 100644
--- a/drivers/net/wireless/ath/ath12k/pci.h
+++ b/drivers/net/wireless/ath/ath12k/pci.h
@@ -28,7 +28,9 @@
#define PCIE_PCIE_PARF_LTSSM 0x1e081b0
#define PARM_LTSSM_VALUE 0x111
-#define GCC_GCC_PCIE_HOT_RST 0x1e38338
+#define GCC_GCC_PCIE_HOT_RST(ab) \
+ ((ab)->hw_params->regs->gcc_gcc_pcie_hot_rst)
+
#define GCC_GCC_PCIE_HOT_RST_VAL 0x10
#define PCIE_PCIE_INT_ALL_CLEAR 0x1e08228
diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
index 60e2444fe08c..465f877fc0fb 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.c
+++ b/drivers/net/wireless/ath/ath12k/wmi.c
@@ -91,6 +91,11 @@ struct ath12k_wmi_svc_rdy_ext2_parse {
bool dma_ring_cap_done;
bool spectral_bin_scaling_done;
bool mac_phy_caps_ext_done;
+ bool hal_reg_caps_ext2_done;
+ bool scan_radio_caps_ext2_done;
+ bool twt_caps_done;
+ bool htt_msdu_idx_to_qtype_map_done;
+ bool dbs_or_sbs_cap_ext_done;
};
struct ath12k_wmi_rdy_parse {
@@ -4395,6 +4400,7 @@ static int ath12k_wmi_hw_mode_caps_parse(struct ath12k_base *soc,
static int ath12k_wmi_hw_mode_caps(struct ath12k_base *soc,
u16 len, const void *ptr, void *data)
{
+ struct ath12k_svc_ext_info *svc_ext_info = &soc->wmi_ab.svc_ext_info;
struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps;
enum wmi_host_hw_mode_config_type mode, pref;
@@ -4427,8 +4433,11 @@ static int ath12k_wmi_hw_mode_caps(struct ath12k_base *soc,
}
}
- ath12k_dbg(soc, ATH12K_DBG_WMI, "preferred_hw_mode:%d\n",
- soc->wmi_ab.preferred_hw_mode);
+ svc_ext_info->num_hw_modes = svc_rdy_ext->n_hw_mode_caps;
+
+ ath12k_dbg(soc, ATH12K_DBG_WMI, "num hw modes %u preferred_hw_mode %d\n",
+ svc_ext_info->num_hw_modes, soc->wmi_ab.preferred_hw_mode);
+
if (soc->wmi_ab.preferred_hw_mode == WMI_HOST_HW_MODE_MAX)
return -EINVAL;
@@ -4658,6 +4667,65 @@ free_dir_buff:
return ret;
}
+static void
+ath12k_wmi_save_mac_phy_info(struct ath12k_base *ab,
+ const struct ath12k_wmi_mac_phy_caps_params *mac_phy_cap,
+ struct ath12k_svc_ext_mac_phy_info *mac_phy_info)
+{
+ mac_phy_info->phy_id = __le32_to_cpu(mac_phy_cap->phy_id);
+ mac_phy_info->supported_bands = __le32_to_cpu(mac_phy_cap->supported_bands);
+ mac_phy_info->hw_freq_range.low_2ghz_freq =
+ __le32_to_cpu(mac_phy_cap->low_2ghz_chan_freq);
+ mac_phy_info->hw_freq_range.high_2ghz_freq =
+ __le32_to_cpu(mac_phy_cap->high_2ghz_chan_freq);
+ mac_phy_info->hw_freq_range.low_5ghz_freq =
+ __le32_to_cpu(mac_phy_cap->low_5ghz_chan_freq);
+ mac_phy_info->hw_freq_range.high_5ghz_freq =
+ __le32_to_cpu(mac_phy_cap->high_5ghz_chan_freq);
+}
+
+static void
+ath12k_wmi_save_all_mac_phy_info(struct ath12k_base *ab,
+ struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext)
+{
+ struct ath12k_svc_ext_info *svc_ext_info = &ab->wmi_ab.svc_ext_info;
+ const struct ath12k_wmi_mac_phy_caps_params *mac_phy_cap;
+ const struct ath12k_wmi_hw_mode_cap_params *hw_mode_cap;
+ struct ath12k_svc_ext_mac_phy_info *mac_phy_info;
+ u32 hw_mode_id, phy_bit_map;
+ u8 hw_idx;
+
+ mac_phy_info = &svc_ext_info->mac_phy_info[0];
+ mac_phy_cap = svc_rdy_ext->mac_phy_caps;
+
+ for (hw_idx = 0; hw_idx < svc_ext_info->num_hw_modes; hw_idx++) {
+ hw_mode_cap = &svc_rdy_ext->hw_mode_caps[hw_idx];
+ hw_mode_id = __le32_to_cpu(hw_mode_cap->hw_mode_id);
+ phy_bit_map = __le32_to_cpu(hw_mode_cap->phy_id_map);
+
+ while (phy_bit_map) {
+ ath12k_wmi_save_mac_phy_info(ab, mac_phy_cap, mac_phy_info);
+ mac_phy_info->hw_mode_config_type =
+ le32_get_bits(hw_mode_cap->hw_mode_config_type,
+ WMI_HW_MODE_CAP_CFG_TYPE);
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "hw_idx %u hw_mode_id %u hw_mode_config_type %u supported_bands %u phy_id %u 2 GHz [%u - %u] 5 GHz [%u - %u]\n",
+ hw_idx, hw_mode_id,
+ mac_phy_info->hw_mode_config_type,
+ mac_phy_info->supported_bands, mac_phy_info->phy_id,
+ mac_phy_info->hw_freq_range.low_2ghz_freq,
+ mac_phy_info->hw_freq_range.high_2ghz_freq,
+ mac_phy_info->hw_freq_range.low_5ghz_freq,
+ mac_phy_info->hw_freq_range.high_5ghz_freq);
+
+ mac_phy_cap++;
+ mac_phy_info++;
+
+ phy_bit_map >>= 1;
+ }
+ }
+}
+
static int ath12k_wmi_svc_rdy_ext_parse(struct ath12k_base *ab,
u16 tag, u16 len,
const void *ptr, void *data)
@@ -4706,6 +4774,8 @@ static int ath12k_wmi_svc_rdy_ext_parse(struct ath12k_base *ab,
return ret;
}
+ ath12k_wmi_save_all_mac_phy_info(ab, svc_rdy_ext);
+
svc_rdy_ext->mac_phy_done = true;
} else if (!svc_rdy_ext->ext_hal_reg_done) {
ret = ath12k_wmi_ext_hal_reg_caps(ab, len, ptr, svc_rdy_ext);
@@ -4922,10 +4992,449 @@ static int ath12k_wmi_tlv_mac_phy_caps_ext(struct ath12k_base *ab, u16 tag,
return 0;
}
+static void
+ath12k_wmi_update_freq_info(struct ath12k_base *ab,
+ struct ath12k_svc_ext_mac_phy_info *mac_cap,
+ enum ath12k_hw_mode mode,
+ u32 phy_id)
+{
+ struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
+ struct ath12k_hw_mode_freq_range_arg *mac_range;
+
+ mac_range = &hw_mode_info->freq_range_caps[mode][phy_id];
+
+ if (mac_cap->supported_bands & WMI_HOST_WLAN_2GHZ_CAP) {
+ mac_range->low_2ghz_freq = max_t(u32,
+ mac_cap->hw_freq_range.low_2ghz_freq,
+ ATH12K_MIN_2GHZ_FREQ);
+ mac_range->high_2ghz_freq = mac_cap->hw_freq_range.high_2ghz_freq ?
+ min_t(u32,
+ mac_cap->hw_freq_range.high_2ghz_freq,
+ ATH12K_MAX_2GHZ_FREQ) :
+ ATH12K_MAX_2GHZ_FREQ;
+ }
+
+ if (mac_cap->supported_bands & WMI_HOST_WLAN_5GHZ_CAP) {
+ mac_range->low_5ghz_freq = max_t(u32,
+ mac_cap->hw_freq_range.low_5ghz_freq,
+ ATH12K_MIN_5GHZ_FREQ);
+ mac_range->high_5ghz_freq = mac_cap->hw_freq_range.high_5ghz_freq ?
+ min_t(u32,
+ mac_cap->hw_freq_range.high_5ghz_freq,
+ ATH12K_MAX_6GHZ_FREQ) :
+ ATH12K_MAX_6GHZ_FREQ;
+ }
+}
+
+static bool
+ath12k_wmi_all_phy_range_updated(struct ath12k_base *ab,
+ enum ath12k_hw_mode hwmode)
+{
+ struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
+ struct ath12k_hw_mode_freq_range_arg *mac_range;
+ u8 phy_id;
+
+ for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
+ mac_range = &hw_mode_info->freq_range_caps[hwmode][phy_id];
+ /* modify SBS/DBS range only when both phy for DBS are filled */
+ if (!mac_range->low_2ghz_freq && !mac_range->low_5ghz_freq)
+ return false;
+ }
+
+ return true;
+}
+
+static void ath12k_wmi_update_dbs_freq_info(struct ath12k_base *ab)
+{
+ struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
+ struct ath12k_hw_mode_freq_range_arg *mac_range;
+ u8 phy_id;
+
+ mac_range = hw_mode_info->freq_range_caps[ATH12K_HW_MODE_DBS];
+ /* Reset 5 GHz range for shared mac for DBS */
+ for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
+ if (mac_range[phy_id].low_2ghz_freq &&
+ mac_range[phy_id].low_5ghz_freq) {
+ mac_range[phy_id].low_5ghz_freq = 0;
+ mac_range[phy_id].high_5ghz_freq = 0;
+ }
+ }
+}
+
+static u32
+ath12k_wmi_get_highest_5ghz_freq_from_range(struct ath12k_hw_mode_freq_range_arg *range)
+{
+ u32 highest_freq = 0;
+ u8 phy_id;
+
+ for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
+ if (range[phy_id].high_5ghz_freq > highest_freq)
+ highest_freq = range[phy_id].high_5ghz_freq;
+ }
+
+ return highest_freq ? highest_freq : ATH12K_MAX_6GHZ_FREQ;
+}
+
+static u32
+ath12k_wmi_get_lowest_5ghz_freq_from_range(struct ath12k_hw_mode_freq_range_arg *range)
+{
+ u32 lowest_freq = 0;
+ u8 phy_id;
+
+ for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
+ if ((!lowest_freq && range[phy_id].low_5ghz_freq) ||
+ range[phy_id].low_5ghz_freq < lowest_freq)
+ lowest_freq = range[phy_id].low_5ghz_freq;
+ }
+
+ return lowest_freq ? lowest_freq : ATH12K_MIN_5GHZ_FREQ;
+}
+
+static void
+ath12k_wmi_fill_upper_share_sbs_freq(struct ath12k_base *ab,
+ u16 sbs_range_sep,
+ struct ath12k_hw_mode_freq_range_arg *ref_freq)
+{
+ struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
+ struct ath12k_hw_mode_freq_range_arg *upper_sbs_freq_range;
+ u8 phy_id;
+
+ upper_sbs_freq_range =
+ hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS_UPPER_SHARE];
+
+ for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
+ upper_sbs_freq_range[phy_id].low_2ghz_freq =
+ ref_freq[phy_id].low_2ghz_freq;
+ upper_sbs_freq_range[phy_id].high_2ghz_freq =
+ ref_freq[phy_id].high_2ghz_freq;
+
+ /* update for shared mac */
+ if (upper_sbs_freq_range[phy_id].low_2ghz_freq) {
+ upper_sbs_freq_range[phy_id].low_5ghz_freq = sbs_range_sep + 10;
+ upper_sbs_freq_range[phy_id].high_5ghz_freq =
+ ath12k_wmi_get_highest_5ghz_freq_from_range(ref_freq);
+ } else {
+ upper_sbs_freq_range[phy_id].low_5ghz_freq =
+ ath12k_wmi_get_lowest_5ghz_freq_from_range(ref_freq);
+ upper_sbs_freq_range[phy_id].high_5ghz_freq = sbs_range_sep;
+ }
+ }
+}
+
+static void
+ath12k_wmi_fill_lower_share_sbs_freq(struct ath12k_base *ab,
+ u16 sbs_range_sep,
+ struct ath12k_hw_mode_freq_range_arg *ref_freq)
+{
+ struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
+ struct ath12k_hw_mode_freq_range_arg *lower_sbs_freq_range;
+ u8 phy_id;
+
+ lower_sbs_freq_range =
+ hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS_LOWER_SHARE];
+
+ for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
+ lower_sbs_freq_range[phy_id].low_2ghz_freq =
+ ref_freq[phy_id].low_2ghz_freq;
+ lower_sbs_freq_range[phy_id].high_2ghz_freq =
+ ref_freq[phy_id].high_2ghz_freq;
+
+ /* update for shared mac */
+ if (lower_sbs_freq_range[phy_id].low_2ghz_freq) {
+ lower_sbs_freq_range[phy_id].low_5ghz_freq =
+ ath12k_wmi_get_lowest_5ghz_freq_from_range(ref_freq);
+ lower_sbs_freq_range[phy_id].high_5ghz_freq = sbs_range_sep;
+ } else {
+ lower_sbs_freq_range[phy_id].low_5ghz_freq = sbs_range_sep + 10;
+ lower_sbs_freq_range[phy_id].high_5ghz_freq =
+ ath12k_wmi_get_highest_5ghz_freq_from_range(ref_freq);
+ }
+ }
+}
+
+static const char *ath12k_wmi_hw_mode_to_str(enum ath12k_hw_mode hw_mode)
+{
+ static const char * const mode_str[] = {
+ [ATH12K_HW_MODE_SMM] = "SMM",
+ [ATH12K_HW_MODE_DBS] = "DBS",
+ [ATH12K_HW_MODE_SBS] = "SBS",
+ [ATH12K_HW_MODE_SBS_UPPER_SHARE] = "SBS_UPPER_SHARE",
+ [ATH12K_HW_MODE_SBS_LOWER_SHARE] = "SBS_LOWER_SHARE",
+ };
+
+ if (hw_mode >= ARRAY_SIZE(mode_str))
+ return "Unknown";
+
+ return mode_str[hw_mode];
+}
+
+static void
+ath12k_wmi_dump_freq_range_per_mac(struct ath12k_base *ab,
+ struct ath12k_hw_mode_freq_range_arg *freq_range,
+ enum ath12k_hw_mode hw_mode)
+{
+ u8 i;
+
+ for (i = 0; i < MAX_RADIOS; i++)
+ if (freq_range[i].low_2ghz_freq || freq_range[i].low_5ghz_freq)
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "frequency range: %s(%d) mac %d 2 GHz [%d - %d] 5 GHz [%d - %d]",
+ ath12k_wmi_hw_mode_to_str(hw_mode),
+ hw_mode, i,
+ freq_range[i].low_2ghz_freq,
+ freq_range[i].high_2ghz_freq,
+ freq_range[i].low_5ghz_freq,
+ freq_range[i].high_5ghz_freq);
+}
+
+static void ath12k_wmi_dump_freq_range(struct ath12k_base *ab)
+{
+ struct ath12k_hw_mode_freq_range_arg *freq_range;
+ u8 i;
+
+ for (i = ATH12K_HW_MODE_SMM; i < ATH12K_HW_MODE_MAX; i++) {
+ freq_range = ab->wmi_ab.hw_mode_info.freq_range_caps[i];
+ ath12k_wmi_dump_freq_range_per_mac(ab, freq_range, i);
+ }
+}
+
+static int ath12k_wmi_modify_sbs_freq(struct ath12k_base *ab, u8 phy_id)
+{
+ struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
+ struct ath12k_hw_mode_freq_range_arg *sbs_mac_range, *shared_mac_range;
+ struct ath12k_hw_mode_freq_range_arg *non_shared_range;
+ u8 shared_phy_id;
+
+ sbs_mac_range = &hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS][phy_id];
+
+ /* if SBS mac range has both 2.4 and 5 GHz ranges, i.e. shared phy_id
+ * keep the range as it is in SBS
+ */
+ if (sbs_mac_range->low_2ghz_freq && sbs_mac_range->low_5ghz_freq)
+ return 0;
+
+ if (sbs_mac_range->low_2ghz_freq && !sbs_mac_range->low_5ghz_freq) {
+ ath12k_err(ab, "Invalid DBS/SBS mode with only 2.4Ghz");
+ ath12k_wmi_dump_freq_range_per_mac(ab, sbs_mac_range, ATH12K_HW_MODE_SBS);
+ return -EINVAL;
+ }
+
+ non_shared_range = sbs_mac_range;
+ /* if SBS mac range has only 5 GHz then it's the non-shared phy, so
+ * modify the range as per the shared mac.
+ */
+ shared_phy_id = phy_id ? 0 : 1;
+ shared_mac_range =
+ &hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS][shared_phy_id];
+
+ if (shared_mac_range->low_5ghz_freq > non_shared_range->low_5ghz_freq) {
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "high 5 GHz shared");
+ /* If the shared mac lower 5 GHz frequency is greater than
+ * non-shared mac lower 5 GHz frequency then the shared mac has
+ * high 5 GHz shared with 2.4 GHz. So non-shared mac's 5 GHz high
+ * freq should be less than the shared mac's low 5 GHz freq.
+ */
+ if (non_shared_range->high_5ghz_freq >=
+ shared_mac_range->low_5ghz_freq)
+ non_shared_range->high_5ghz_freq =
+ max_t(u32, shared_mac_range->low_5ghz_freq - 10,
+ non_shared_range->low_5ghz_freq);
+ } else if (shared_mac_range->high_5ghz_freq <
+ non_shared_range->high_5ghz_freq) {
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "low 5 GHz shared");
+ /* If the shared mac high 5 GHz frequency is less than
+ * non-shared mac high 5 GHz frequency then the shared mac has
+ * low 5 GHz shared with 2.4 GHz. So non-shared mac's 5 GHz low
+ * freq should be greater than the shared mac's high 5 GHz freq.
+ */
+ if (shared_mac_range->high_5ghz_freq >=
+ non_shared_range->low_5ghz_freq)
+ non_shared_range->low_5ghz_freq =
+ min_t(u32, shared_mac_range->high_5ghz_freq + 10,
+ non_shared_range->high_5ghz_freq);
+ } else {
+ ath12k_warn(ab, "invalid SBS range with all 5 GHz shared");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void ath12k_wmi_update_sbs_freq_info(struct ath12k_base *ab)
+{
+ struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
+ struct ath12k_hw_mode_freq_range_arg *mac_range;
+ u16 sbs_range_sep;
+ u8 phy_id;
+ int ret;
+
+ mac_range = hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS];
+
+ /* If sbs_lower_band_end_freq has a value, then the frequency range
+ * will be split using that value.
+ */
+ sbs_range_sep = ab->wmi_ab.sbs_lower_band_end_freq;
+ if (sbs_range_sep) {
+ ath12k_wmi_fill_upper_share_sbs_freq(ab, sbs_range_sep,
+ mac_range);
+ ath12k_wmi_fill_lower_share_sbs_freq(ab, sbs_range_sep,
+ mac_range);
+ /* Hardware specifies the range boundary with sbs_range_sep,
+ * (i.e. the boundary between 5 GHz high and 5 GHz low),
+ * reset the original one to make sure it will not get used.
+ */
+ memset(mac_range, 0, sizeof(*mac_range) * MAX_RADIOS);
+ return;
+ }
+
+ /* If sbs_lower_band_end_freq is not set that means firmware will send one
+ * shared mac range and one non-shared mac range. so update that freq.
+ */
+ for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
+ ret = ath12k_wmi_modify_sbs_freq(ab, phy_id);
+ if (ret) {
+ memset(mac_range, 0, sizeof(*mac_range) * MAX_RADIOS);
+ break;
+ }
+ }
+}
+
+static void
+ath12k_wmi_update_mac_freq_info(struct ath12k_base *ab,
+ enum wmi_host_hw_mode_config_type hw_config_type,
+ u32 phy_id,
+ struct ath12k_svc_ext_mac_phy_info *mac_cap)
+{
+ if (phy_id >= MAX_RADIOS) {
+ ath12k_err(ab, "mac more than two not supported: %d", phy_id);
+ return;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "hw_mode_cfg %d mac %d band 0x%x SBS cutoff freq %d 2 GHz [%d - %d] 5 GHz [%d - %d]",
+ hw_config_type, phy_id, mac_cap->supported_bands,
+ ab->wmi_ab.sbs_lower_band_end_freq,
+ mac_cap->hw_freq_range.low_2ghz_freq,
+ mac_cap->hw_freq_range.high_2ghz_freq,
+ mac_cap->hw_freq_range.low_5ghz_freq,
+ mac_cap->hw_freq_range.high_5ghz_freq);
+
+ switch (hw_config_type) {
+ case WMI_HOST_HW_MODE_SINGLE:
+ if (phy_id) {
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "mac phy 1 is not supported");
+ break;
+ }
+ ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_SMM, phy_id);
+ break;
+
+ case WMI_HOST_HW_MODE_DBS:
+ if (!ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_DBS))
+ ath12k_wmi_update_freq_info(ab, mac_cap,
+ ATH12K_HW_MODE_DBS, phy_id);
+ break;
+ case WMI_HOST_HW_MODE_DBS_SBS:
+ case WMI_HOST_HW_MODE_DBS_OR_SBS:
+ ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_DBS, phy_id);
+ if (ab->wmi_ab.sbs_lower_band_end_freq ||
+ mac_cap->hw_freq_range.low_5ghz_freq ||
+ mac_cap->hw_freq_range.low_2ghz_freq)
+ ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_SBS,
+ phy_id);
+
+ if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_DBS))
+ ath12k_wmi_update_dbs_freq_info(ab);
+ if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS))
+ ath12k_wmi_update_sbs_freq_info(ab);
+ break;
+ case WMI_HOST_HW_MODE_SBS:
+ case WMI_HOST_HW_MODE_SBS_PASSIVE:
+ ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_SBS, phy_id);
+ if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS))
+ ath12k_wmi_update_sbs_freq_info(ab);
+
+ break;
+ default:
+ break;
+ }
+}
+
+static bool ath12k_wmi_sbs_range_present(struct ath12k_base *ab)
+{
+ if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS) ||
+ (ab->wmi_ab.sbs_lower_band_end_freq &&
+ ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS_LOWER_SHARE) &&
+ ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS_UPPER_SHARE)))
+ return true;
+
+ return false;
+}
+
+static int ath12k_wmi_update_hw_mode_list(struct ath12k_base *ab)
+{
+ struct ath12k_svc_ext_info *svc_ext_info = &ab->wmi_ab.svc_ext_info;
+ struct ath12k_hw_mode_info *info = &ab->wmi_ab.hw_mode_info;
+ enum wmi_host_hw_mode_config_type hw_config_type;
+ struct ath12k_svc_ext_mac_phy_info *tmp;
+ bool dbs_mode = false, sbs_mode = false;
+ u32 i, j = 0;
+
+ if (!svc_ext_info->num_hw_modes) {
+ ath12k_err(ab, "invalid number of hw modes");
+ return -EINVAL;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "updated HW mode list: num modes %d",
+ svc_ext_info->num_hw_modes);
+
+ memset(info->freq_range_caps, 0, sizeof(info->freq_range_caps));
+
+ for (i = 0; i < svc_ext_info->num_hw_modes; i++) {
+ if (j >= ATH12K_MAX_MAC_PHY_CAP)
+ return -EINVAL;
+
+ /* Update for MAC0 */
+ tmp = &svc_ext_info->mac_phy_info[j++];
+ hw_config_type = tmp->hw_mode_config_type;
+ ath12k_wmi_update_mac_freq_info(ab, hw_config_type, tmp->phy_id, tmp);
+
+ /* SBS and DBS have dual MAC. Up to 2 MACs are considered. */
+ if (hw_config_type == WMI_HOST_HW_MODE_DBS ||
+ hw_config_type == WMI_HOST_HW_MODE_SBS_PASSIVE ||
+ hw_config_type == WMI_HOST_HW_MODE_SBS ||
+ hw_config_type == WMI_HOST_HW_MODE_DBS_OR_SBS) {
+ if (j >= ATH12K_MAX_MAC_PHY_CAP)
+ return -EINVAL;
+ /* Update for MAC1 */
+ tmp = &svc_ext_info->mac_phy_info[j++];
+ ath12k_wmi_update_mac_freq_info(ab, hw_config_type,
+ tmp->phy_id, tmp);
+
+ if (hw_config_type == WMI_HOST_HW_MODE_DBS ||
+ hw_config_type == WMI_HOST_HW_MODE_DBS_OR_SBS)
+ dbs_mode = true;
+
+ if (ath12k_wmi_sbs_range_present(ab) &&
+ (hw_config_type == WMI_HOST_HW_MODE_SBS_PASSIVE ||
+ hw_config_type == WMI_HOST_HW_MODE_SBS ||
+ hw_config_type == WMI_HOST_HW_MODE_DBS_OR_SBS))
+ sbs_mode = true;
+ }
+ }
+
+ info->support_dbs = dbs_mode;
+ info->support_sbs = sbs_mode;
+
+ ath12k_wmi_dump_freq_range(ab);
+
+ return 0;
+}
+
static int ath12k_wmi_svc_rdy_ext2_parse(struct ath12k_base *ab,
u16 tag, u16 len,
const void *ptr, void *data)
{
+ const struct ath12k_wmi_dbs_or_sbs_cap_params *dbs_or_sbs_caps;
struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
struct ath12k_wmi_svc_rdy_ext2_parse *parse = data;
int ret;
@@ -4967,7 +5476,32 @@ static int ath12k_wmi_svc_rdy_ext2_parse(struct ath12k_base *ab,
}
parse->mac_phy_caps_ext_done = true;
+ } else if (!parse->hal_reg_caps_ext2_done) {
+ parse->hal_reg_caps_ext2_done = true;
+ } else if (!parse->scan_radio_caps_ext2_done) {
+ parse->scan_radio_caps_ext2_done = true;
+ } else if (!parse->twt_caps_done) {
+ parse->twt_caps_done = true;
+ } else if (!parse->htt_msdu_idx_to_qtype_map_done) {
+ parse->htt_msdu_idx_to_qtype_map_done = true;
+ } else if (!parse->dbs_or_sbs_cap_ext_done) {
+ dbs_or_sbs_caps = ptr;
+ ab->wmi_ab.sbs_lower_band_end_freq =
+ __le32_to_cpu(dbs_or_sbs_caps->sbs_lower_band_end_freq);
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "sbs_lower_band_end_freq %u\n",
+ ab->wmi_ab.sbs_lower_band_end_freq);
+
+ ret = ath12k_wmi_update_hw_mode_list(ab);
+ if (ret) {
+ ath12k_warn(ab, "failed to update hw mode list: %d\n",
+ ret);
+ return ret;
+ }
+
+ parse->dbs_or_sbs_cap_ext_done = true;
}
+
break;
default:
break;
@@ -7626,6 +8160,64 @@ static int ath12k_wmi_pull_fw_stats(struct ath12k_base *ab, struct sk_buff *skb,
&parse);
}
+static void ath12k_wmi_fw_stats_process(struct ath12k *ar,
+ struct ath12k_fw_stats *stats)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_pdev *pdev;
+ bool is_end = true;
+ size_t total_vdevs_started = 0;
+ int i;
+
+ if (stats->stats_id == WMI_REQUEST_VDEV_STAT) {
+ if (list_empty(&stats->vdevs)) {
+ ath12k_warn(ab, "empty vdev stats");
+ return;
+ }
+ /* FW sends all the active VDEV stats irrespective of PDEV,
+ * hence limit until the count of all VDEVs started
+ */
+ rcu_read_lock();
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+ if (pdev && pdev->ar)
+ total_vdevs_started += pdev->ar->num_started_vdevs;
+ }
+ rcu_read_unlock();
+
+ if (total_vdevs_started)
+ is_end = ((++ar->fw_stats.num_vdev_recvd) ==
+ total_vdevs_started);
+
+ list_splice_tail_init(&stats->vdevs,
+ &ar->fw_stats.vdevs);
+
+ if (is_end)
+ complete(&ar->fw_stats_done);
+
+ return;
+ }
+
+ if (stats->stats_id == WMI_REQUEST_BCN_STAT) {
+ if (list_empty(&stats->bcn)) {
+ ath12k_warn(ab, "empty beacon stats");
+ return;
+ }
+ /* Mark end until we reached the count of all started VDEVs
+ * within the PDEV
+ */
+ if (ar->num_started_vdevs)
+ is_end = ((++ar->fw_stats.num_bcn_recvd) ==
+ ar->num_started_vdevs);
+
+ list_splice_tail_init(&stats->bcn,
+ &ar->fw_stats.bcn);
+
+ if (is_end)
+ complete(&ar->fw_stats_done);
+ }
+}
+
static void ath12k_update_stats_event(struct ath12k_base *ab, struct sk_buff *skb)
{
struct ath12k_fw_stats stats = {};
@@ -7655,19 +8247,15 @@ static void ath12k_update_stats_event(struct ath12k_base *ab, struct sk_buff *sk
spin_lock_bh(&ar->data_lock);
- /* WMI_REQUEST_PDEV_STAT can be requested via .get_txpower mac ops or via
- * debugfs fw stats. Therefore, processing it separately.
- */
+ /* Handle WMI_REQUEST_PDEV_STAT status update */
if (stats.stats_id == WMI_REQUEST_PDEV_STAT) {
list_splice_tail_init(&stats.pdevs, &ar->fw_stats.pdevs);
- ar->fw_stats.fw_stats_done = true;
+ complete(&ar->fw_stats_done);
goto complete;
}
- /* WMI_REQUEST_VDEV_STAT and WMI_REQUEST_BCN_STAT are currently requested only
- * via debugfs fw stats. Hence, processing these in debugfs context.
- */
- ath12k_debugfs_fw_stats_process(ar, &stats);
+ /* Handle WMI_REQUEST_VDEV_STAT and WMI_REQUEST_BCN_STAT updates. */
+ ath12k_wmi_fw_stats_process(ar, &stats);
complete:
complete(&ar->fw_stats_complete);
@@ -9911,3 +10499,224 @@ int ath12k_wmi_send_vdev_set_tpc_power(struct ath12k *ar,
return 0;
}
+
+static int
+ath12k_wmi_fill_disallowed_bmap(struct ath12k_base *ab,
+ struct wmi_disallowed_mlo_mode_bitmap_params *dislw_bmap,
+ struct wmi_mlo_link_set_active_arg *arg)
+{
+ struct wmi_ml_disallow_mode_bmap_arg *dislw_bmap_arg;
+ u8 i;
+
+ if (arg->num_disallow_mode_comb >
+ ARRAY_SIZE(arg->disallow_bmap)) {
+ ath12k_warn(ab, "invalid num_disallow_mode_comb: %d",
+ arg->num_disallow_mode_comb);
+ return -EINVAL;
+ }
+
+ dislw_bmap_arg = &arg->disallow_bmap[0];
+ for (i = 0; i < arg->num_disallow_mode_comb; i++) {
+ dislw_bmap->tlv_header =
+ ath12k_wmi_tlv_cmd_hdr(0, sizeof(*dislw_bmap));
+ dislw_bmap->disallowed_mode_bitmap =
+ cpu_to_le32(dislw_bmap_arg->disallowed_mode);
+ dislw_bmap->ieee_link_id_comb =
+ le32_encode_bits(dislw_bmap_arg->ieee_link_id[0],
+ WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_1) |
+ le32_encode_bits(dislw_bmap_arg->ieee_link_id[1],
+ WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_2) |
+ le32_encode_bits(dislw_bmap_arg->ieee_link_id[2],
+ WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_3) |
+ le32_encode_bits(dislw_bmap_arg->ieee_link_id[3],
+ WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_4);
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "entry %d disallowed_mode %d ieee_link_id_comb 0x%x",
+ i, dislw_bmap_arg->disallowed_mode,
+ dislw_bmap_arg->ieee_link_id_comb);
+ dislw_bmap++;
+ dislw_bmap_arg++;
+ }
+
+ return 0;
+}
+
+int ath12k_wmi_send_mlo_link_set_active_cmd(struct ath12k_base *ab,
+ struct wmi_mlo_link_set_active_arg *arg)
+{
+ struct wmi_disallowed_mlo_mode_bitmap_params *disallowed_mode_bmap;
+ struct wmi_mlo_set_active_link_number_params *link_num_param;
+ u32 num_link_num_param = 0, num_vdev_bitmap = 0;
+ struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
+ struct wmi_mlo_link_set_active_cmd *cmd;
+ u32 num_inactive_vdev_bitmap = 0;
+ u32 num_disallow_mode_comb = 0;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ __le32 *vdev_bitmap;
+ void *buf_ptr;
+ int i, ret;
+ u32 len;
+
+ if (!arg->num_vdev_bitmap && !arg->num_link_entry) {
+ ath12k_warn(ab, "Invalid num_vdev_bitmap and num_link_entry");
+ return -EINVAL;
+ }
+
+ switch (arg->force_mode) {
+ case WMI_MLO_LINK_FORCE_MODE_ACTIVE_LINK_NUM:
+ case WMI_MLO_LINK_FORCE_MODE_INACTIVE_LINK_NUM:
+ num_link_num_param = arg->num_link_entry;
+ fallthrough;
+ case WMI_MLO_LINK_FORCE_MODE_ACTIVE:
+ case WMI_MLO_LINK_FORCE_MODE_INACTIVE:
+ case WMI_MLO_LINK_FORCE_MODE_NO_FORCE:
+ num_vdev_bitmap = arg->num_vdev_bitmap;
+ break;
+ case WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE:
+ num_vdev_bitmap = arg->num_vdev_bitmap;
+ num_inactive_vdev_bitmap = arg->num_inactive_vdev_bitmap;
+ break;
+ default:
+ ath12k_warn(ab, "Invalid force mode: %u", arg->force_mode);
+ return -EINVAL;
+ }
+
+ num_disallow_mode_comb = arg->num_disallow_mode_comb;
+ len = sizeof(*cmd) +
+ TLV_HDR_SIZE + sizeof(*link_num_param) * num_link_num_param +
+ TLV_HDR_SIZE + sizeof(*vdev_bitmap) * num_vdev_bitmap +
+ TLV_HDR_SIZE + TLV_HDR_SIZE + TLV_HDR_SIZE +
+ TLV_HDR_SIZE + sizeof(*disallowed_mode_bmap) * num_disallow_mode_comb;
+ if (arg->force_mode == WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE)
+ len += sizeof(*vdev_bitmap) * num_inactive_vdev_bitmap;
+
+ skb = ath12k_wmi_alloc_skb(wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_mlo_link_set_active_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_LINK_SET_ACTIVE_CMD,
+ sizeof(*cmd));
+ cmd->force_mode = cpu_to_le32(arg->force_mode);
+ cmd->reason = cpu_to_le32(arg->reason);
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "mode %d reason %d num_link_num_param %d num_vdev_bitmap %d inactive %d num_disallow_mode_comb %d",
+ arg->force_mode, arg->reason, num_link_num_param,
+ num_vdev_bitmap, num_inactive_vdev_bitmap,
+ num_disallow_mode_comb);
+
+ buf_ptr = skb->data + sizeof(*cmd);
+ tlv = buf_ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+ sizeof(*link_num_param) * num_link_num_param);
+ buf_ptr += TLV_HDR_SIZE;
+
+ if (num_link_num_param) {
+ cmd->ctrl_flags =
+ le32_encode_bits(arg->ctrl_flags.dync_force_link_num ? 1 : 0,
+ CRTL_F_DYNC_FORCE_LINK_NUM);
+
+ link_num_param = buf_ptr;
+ for (i = 0; i < num_link_num_param; i++) {
+ link_num_param->tlv_header =
+ ath12k_wmi_tlv_cmd_hdr(0, sizeof(*link_num_param));
+ link_num_param->num_of_link =
+ cpu_to_le32(arg->link_num[i].num_of_link);
+ link_num_param->vdev_type =
+ cpu_to_le32(arg->link_num[i].vdev_type);
+ link_num_param->vdev_subtype =
+ cpu_to_le32(arg->link_num[i].vdev_subtype);
+ link_num_param->home_freq =
+ cpu_to_le32(arg->link_num[i].home_freq);
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "entry %d num_of_link %d vdev type %d subtype %d freq %d control_flags %d",
+ i, arg->link_num[i].num_of_link,
+ arg->link_num[i].vdev_type,
+ arg->link_num[i].vdev_subtype,
+ arg->link_num[i].home_freq,
+ __le32_to_cpu(cmd->ctrl_flags));
+ link_num_param++;
+ }
+
+ buf_ptr += sizeof(*link_num_param) * num_link_num_param;
+ }
+
+ tlv = buf_ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32,
+ sizeof(*vdev_bitmap) * num_vdev_bitmap);
+ buf_ptr += TLV_HDR_SIZE;
+
+ if (num_vdev_bitmap) {
+ vdev_bitmap = buf_ptr;
+ for (i = 0; i < num_vdev_bitmap; i++) {
+ vdev_bitmap[i] = cpu_to_le32(arg->vdev_bitmap[i]);
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "entry %d vdev_id_bitmap 0x%x",
+ i, arg->vdev_bitmap[i]);
+ }
+
+ buf_ptr += sizeof(*vdev_bitmap) * num_vdev_bitmap;
+ }
+
+ if (arg->force_mode == WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE) {
+ tlv = buf_ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32,
+ sizeof(*vdev_bitmap) *
+ num_inactive_vdev_bitmap);
+ buf_ptr += TLV_HDR_SIZE;
+
+ if (num_inactive_vdev_bitmap) {
+ vdev_bitmap = buf_ptr;
+ for (i = 0; i < num_inactive_vdev_bitmap; i++) {
+ vdev_bitmap[i] =
+ cpu_to_le32(arg->inactive_vdev_bitmap[i]);
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "entry %d inactive_vdev_id_bitmap 0x%x",
+ i, arg->inactive_vdev_bitmap[i]);
+ }
+
+ buf_ptr += sizeof(*vdev_bitmap) * num_inactive_vdev_bitmap;
+ }
+ } else {
+ /* add empty vdev bitmap2 tlv */
+ tlv = buf_ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
+ buf_ptr += TLV_HDR_SIZE;
+ }
+
+ /* add empty ieee_link_id_bitmap tlv */
+ tlv = buf_ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
+ buf_ptr += TLV_HDR_SIZE;
+
+ /* add empty ieee_link_id_bitmap2 tlv */
+ tlv = buf_ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
+ buf_ptr += TLV_HDR_SIZE;
+
+ tlv = buf_ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+ sizeof(*disallowed_mode_bmap) *
+ arg->num_disallow_mode_comb);
+ buf_ptr += TLV_HDR_SIZE;
+
+ ret = ath12k_wmi_fill_disallowed_bmap(ab, buf_ptr, arg);
+ if (ret)
+ goto free_skb;
+
+ ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], skb, WMI_MLO_LINK_SET_ACTIVE_CMDID);
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to send WMI_MLO_LINK_SET_ACTIVE_CMDID: %d\n", ret);
+ goto free_skb;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "WMI mlo link set active cmd");
+
+ return ret;
+
+free_skb:
+ dev_kfree_skb(skb);
+ return ret;
+}
diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h
index ac18f75e0449..c640ffa180c8 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.h
+++ b/drivers/net/wireless/ath/ath12k/wmi.h
@@ -1974,6 +1974,7 @@ enum wmi_tlv_tag {
WMI_TAG_TPC_STATS_CTL_PWR_TABLE_EVENT,
WMI_TAG_VDEV_SET_TPC_POWER_CMD = 0x3B5,
WMI_TAG_VDEV_CH_POWER_INFO,
+ WMI_TAG_MLO_LINK_SET_ACTIVE_CMD = 0x3BE,
WMI_TAG_EHT_RATE_SET = 0x3C4,
WMI_TAG_DCS_AWGN_INT_TYPE = 0x3C5,
WMI_TAG_MLO_TX_SEND_PARAMS,
@@ -2617,6 +2618,8 @@ struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params {
__le32 num_chainmask_tables;
} __packed;
+#define WMI_HW_MODE_CAP_CFG_TYPE GENMASK(27, 0)
+
struct ath12k_wmi_hw_mode_cap_params {
__le32 tlv_header;
__le32 hw_mode_id;
@@ -2666,6 +2669,12 @@ struct ath12k_wmi_mac_phy_caps_params {
__le32 he_cap_info_2g_ext;
__le32 he_cap_info_5g_ext;
__le32 he_cap_info_internal;
+ __le32 wireless_modes;
+ __le32 low_2ghz_chan_freq;
+ __le32 high_2ghz_chan_freq;
+ __le32 low_5ghz_chan_freq;
+ __le32 high_5ghz_chan_freq;
+ __le32 nss_ratio;
} __packed;
struct ath12k_wmi_hal_reg_caps_ext_params {
@@ -2739,6 +2748,11 @@ struct wmi_service_ready_ext2_event {
__le32 default_num_msduq_supported_per_tid;
} __packed;
+struct ath12k_wmi_dbs_or_sbs_cap_params {
+ __le32 hw_mode_id;
+ __le32 sbs_lower_band_end_freq;
+} __packed;
+
struct ath12k_wmi_caps_ext_params {
__le32 hw_mode_id;
__le32 pdev_and_hw_link_ids;
@@ -5049,6 +5063,53 @@ struct ath12k_wmi_pdev {
u32 rx_decap_mode;
};
+struct ath12k_hw_mode_freq_range_arg {
+ u32 low_2ghz_freq;
+ u32 high_2ghz_freq;
+ u32 low_5ghz_freq;
+ u32 high_5ghz_freq;
+};
+
+struct ath12k_svc_ext_mac_phy_info {
+ enum wmi_host_hw_mode_config_type hw_mode_config_type;
+ u32 phy_id;
+ u32 supported_bands;
+ struct ath12k_hw_mode_freq_range_arg hw_freq_range;
+};
+
+#define ATH12K_MAX_MAC_PHY_CAP 8
+
+struct ath12k_svc_ext_info {
+ u32 num_hw_modes;
+ struct ath12k_svc_ext_mac_phy_info mac_phy_info[ATH12K_MAX_MAC_PHY_CAP];
+};
+
+/**
+ * enum ath12k_hw_mode - enum for host mode
+ * @ATH12K_HW_MODE_SMM: Single mac mode
+ * @ATH12K_HW_MODE_DBS: DBS mode
+ * @ATH12K_HW_MODE_SBS: SBS mode with either high share or low share
+ * @ATH12K_HW_MODE_SBS_UPPER_SHARE: Higher 5 GHz shared with 2.4 GHz
+ * @ATH12K_HW_MODE_SBS_LOWER_SHARE: Lower 5 GHz shared with 2.4 GHz
+ * @ATH12K_HW_MODE_MAX: Max, used to indicate invalid mode
+ */
+enum ath12k_hw_mode {
+ ATH12K_HW_MODE_SMM,
+ ATH12K_HW_MODE_DBS,
+ ATH12K_HW_MODE_SBS,
+ ATH12K_HW_MODE_SBS_UPPER_SHARE,
+ ATH12K_HW_MODE_SBS_LOWER_SHARE,
+ ATH12K_HW_MODE_MAX,
+};
+
+struct ath12k_hw_mode_info {
+ bool support_dbs:1;
+ bool support_sbs:1;
+
+ struct ath12k_hw_mode_freq_range_arg freq_range_caps[ATH12K_HW_MODE_MAX]
+ [MAX_RADIOS];
+};
+
struct ath12k_wmi_base {
struct ath12k_base *ab;
struct ath12k_wmi_pdev wmi[MAX_RADIOS];
@@ -5066,6 +5127,10 @@ struct ath12k_wmi_base {
enum wmi_host_hw_mode_config_type preferred_hw_mode;
struct ath12k_wmi_target_cap_arg *targ_cap;
+
+ struct ath12k_svc_ext_info svc_ext_info;
+ u32 sbs_lower_band_end_freq;
+ struct ath12k_hw_mode_info hw_mode_info;
};
struct wmi_pdev_set_bios_interface_cmd {
@@ -5997,6 +6062,118 @@ struct wmi_vdev_set_tpc_power_cmd {
*/
} __packed;
+#define CRTL_F_DYNC_FORCE_LINK_NUM GENMASK(3, 2)
+
+struct wmi_mlo_link_set_active_cmd {
+ __le32 tlv_header;
+ __le32 force_mode;
+ __le32 reason;
+ __le32 use_ieee_link_id_bitmap;
+ struct ath12k_wmi_mac_addr_params ap_mld_mac_addr;
+ __le32 ctrl_flags;
+} __packed;
+
+struct wmi_mlo_set_active_link_number_params {
+ __le32 tlv_header;
+ __le32 num_of_link;
+ __le32 vdev_type;
+ __le32 vdev_subtype;
+ __le32 home_freq;
+} __packed;
+
+#define WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_1 GENMASK(7, 0)
+#define WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_2 GENMASK(15, 8)
+#define WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_3 GENMASK(23, 16)
+#define WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_4 GENMASK(31, 24)
+
+struct wmi_disallowed_mlo_mode_bitmap_params {
+ __le32 tlv_header;
+ __le32 disallowed_mode_bitmap;
+ __le32 ieee_link_id_comb;
+} __packed;
+
+enum wmi_mlo_link_force_mode {
+ WMI_MLO_LINK_FORCE_MODE_ACTIVE = 1,
+ WMI_MLO_LINK_FORCE_MODE_INACTIVE = 2,
+ WMI_MLO_LINK_FORCE_MODE_ACTIVE_LINK_NUM = 3,
+ WMI_MLO_LINK_FORCE_MODE_INACTIVE_LINK_NUM = 4,
+ WMI_MLO_LINK_FORCE_MODE_NO_FORCE = 5,
+ WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE = 6,
+ WMI_MLO_LINK_FORCE_MODE_NON_FORCE_UPDATE = 7,
+};
+
+enum wmi_mlo_link_force_reason {
+ WMI_MLO_LINK_FORCE_REASON_NEW_CONNECT = 1,
+ WMI_MLO_LINK_FORCE_REASON_NEW_DISCONNECT = 2,
+ WMI_MLO_LINK_FORCE_REASON_LINK_REMOVAL = 3,
+ WMI_MLO_LINK_FORCE_REASON_TDLS = 4,
+ WMI_MLO_LINK_FORCE_REASON_REVERT_FAILURE = 5,
+ WMI_MLO_LINK_FORCE_REASON_LINK_DELETE = 6,
+ WMI_MLO_LINK_FORCE_REASON_SINGLE_LINK_EMLSR_OP = 7,
+};
+
+struct wmi_mlo_link_num_arg {
+ u32 num_of_link;
+ u32 vdev_type;
+ u32 vdev_subtype;
+ u32 home_freq;
+};
+
+struct wmi_mlo_control_flags_arg {
+ bool overwrite_force_active_bitmap;
+ bool overwrite_force_inactive_bitmap;
+ bool dync_force_link_num;
+ bool post_re_evaluate;
+ u8 post_re_evaluate_loops;
+ bool dont_reschedule_workqueue;
+};
+
+struct wmi_ml_link_force_cmd_arg {
+ u8 ap_mld_mac_addr[ETH_ALEN];
+ u16 ieee_link_id_bitmap;
+ u16 ieee_link_id_bitmap2;
+ u8 link_num;
+};
+
+struct wmi_ml_disallow_mode_bmap_arg {
+ u32 disallowed_mode;
+ union {
+ u32 ieee_link_id_comb;
+ u8 ieee_link_id[4];
+ };
+};
+
+/* maximum size of link number param array
+ * for MLO link set active command
+ */
+#define WMI_MLO_LINK_NUM_SZ 2
+
+/* maximum size of vdev bitmap array for
+ * MLO link set active command
+ */
+#define WMI_MLO_VDEV_BITMAP_SZ 2
+
+/* Max number of disallowed bitmap combination
+ * sent to firmware
+ */
+#define WMI_ML_MAX_DISALLOW_BMAP_COMB 4
+
+struct wmi_mlo_link_set_active_arg {
+ enum wmi_mlo_link_force_mode force_mode;
+ enum wmi_mlo_link_force_reason reason;
+ u32 num_link_entry;
+ u32 num_vdev_bitmap;
+ u32 num_inactive_vdev_bitmap;
+ struct wmi_mlo_link_num_arg link_num[WMI_MLO_LINK_NUM_SZ];
+ u32 vdev_bitmap[WMI_MLO_VDEV_BITMAP_SZ];
+ u32 inactive_vdev_bitmap[WMI_MLO_VDEV_BITMAP_SZ];
+ struct wmi_mlo_control_flags_arg ctrl_flags;
+ bool use_ieee_link_id;
+ struct wmi_ml_link_force_cmd_arg force_cmd;
+ u32 num_disallow_mode_comb;
+ struct wmi_ml_disallow_mode_bmap_arg disallow_bmap[WMI_ML_MAX_DISALLOW_BMAP_COMB];
+};
+
void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
struct ath12k_wmi_resource_config_arg *config);
void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
@@ -6195,5 +6372,6 @@ bool ath12k_wmi_supports_6ghz_cc_ext(struct ath12k *ar);
int ath12k_wmi_send_vdev_set_tpc_power(struct ath12k *ar,
u32 vdev_id,
struct ath12k_reg_tpc_power_info *param);
-
+int ath12k_wmi_send_mlo_link_set_active_cmd(struct ath12k_base *ab,
+ struct wmi_mlo_link_set_active_arg *param);
#endif
diff --git a/drivers/net/wireless/ath/ath6kl/bmi.c b/drivers/net/wireless/ath/ath6kl/bmi.c
index af98e871199d..5a9e93fd1ef4 100644
--- a/drivers/net/wireless/ath/ath6kl/bmi.c
+++ b/drivers/net/wireless/ath/ath6kl/bmi.c
@@ -87,7 +87,9 @@ int ath6kl_bmi_get_target_info(struct ath6kl *ar,
* We need to do some backwards compatibility to make this work.
*/
if (le32_to_cpu(targ_info->byte_count) != sizeof(*targ_info)) {
- WARN_ON(1);
+ ath6kl_err("mismatched byte count %d vs. expected %zd\n",
+ le32_to_cpu(targ_info->byte_count),
+ sizeof(*targ_info));
return -EINVAL;
}
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index a3e03580cd9f..564ca6a61985 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -438,14 +438,21 @@ static void carl9170_usb_rx_complete(struct urb *urb)
if (atomic_read(&ar->rx_anch_urbs) == 0) {
/*
- * The system is too slow to cope with
- * the enormous workload. We have simply
- * run out of active rx urbs and this
- * unfortunately leads to an unpredictable
- * device.
+ * At this point, either the system is too slow to
+ * cope with the enormous workload (so we have simply
+ * run out of active rx urbs and this unfortunately
+ * leads to an unpredictable device), or the device
+ * is not fully functional after an unsuccessful
+ * firmware loading attempts (so it doesn't pass
+ * ieee80211_register_hw() and there is no internal
+ * workqueue at all).
*/
- ieee80211_queue_work(ar->hw, &ar->ping_work);
+ if (ar->registered)
+ ieee80211_queue_work(ar->hw, &ar->ping_work);
+ else
+ pr_warn_once("device %s is not registered\n",
+ dev_name(&ar->udev->dev));
}
} else {
/*
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 67172385a5d6..89d4394cedcf 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -179,9 +179,11 @@ void wil_mask_irq(struct wil6210_priv *wil)
wil_dbg_irq(wil, "mask_irq\n");
wil6210_mask_irq_tx(wil);
- wil6210_mask_irq_tx_edma(wil);
+ if (wil->use_enhanced_dma_hw)
+ wil6210_mask_irq_tx_edma(wil);
wil6210_mask_irq_rx(wil);
- wil6210_mask_irq_rx_edma(wil);
+ if (wil->use_enhanced_dma_hw)
+ wil6210_mask_irq_rx_edma(wil);
wil6210_mask_irq_misc(wil, true);
wil6210_mask_irq_pseudo(wil);
}
@@ -190,10 +192,12 @@ void wil_unmask_irq(struct wil6210_priv *wil)
{
wil_dbg_irq(wil, "unmask_irq\n");
- wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, ICC),
- WIL_ICR_ICC_VALUE);
- wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, ICC),
- WIL_ICR_ICC_VALUE);
+ if (wil->use_enhanced_dma_hw) {
+ wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, ICC),
+ WIL_ICR_ICC_VALUE);
+ wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, ICC),
+ WIL_ICR_ICC_VALUE);
+ }
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICC),
WIL_ICR_ICC_MISC_VALUE);
wil_w(wil, RGF_INT_GEN_TX_ICR + offsetof(struct RGF_ICR, ICC),
@@ -845,10 +849,12 @@ void wil6210_clear_irq(struct wil6210_priv *wil)
offsetof(struct RGF_ICR, ICR));
wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) +
offsetof(struct RGF_ICR, ICR));
- wil_clear32(wil->csr + HOSTADDR(RGF_INT_GEN_RX_ICR) +
- offsetof(struct RGF_ICR, ICR));
- wil_clear32(wil->csr + HOSTADDR(RGF_INT_GEN_TX_ICR) +
- offsetof(struct RGF_ICR, ICR));
+ if (wil->use_enhanced_dma_hw) {
+ wil_clear32(wil->csr + HOSTADDR(RGF_INT_GEN_RX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ wil_clear32(wil->csr + HOSTADDR(RGF_INT_GEN_TX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ }
wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) +
offsetof(struct RGF_ICR, ICR));
wmb(); /* make sure write completed */
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-rs.c b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
index 0e5130d1fccd..031d88bf6393 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
@@ -203,7 +203,8 @@ il4965_rs_extract_rate(u32 rate_n_flags)
return (u8) (rate_n_flags & 0xFF);
}
-static void
+/* noinline works around https://github.com/llvm/llvm-project/issues/143908 */
+static noinline_for_stack void
il4965_rs_rate_scale_clear_win(struct il_rate_scale_data *win)
{
win->data = 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
index dbfd45948e8b..66211426aa3a 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
@@ -1316,6 +1316,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
sizeof(trans->conf.no_reclaim_cmds));
memcpy(trans->conf.no_reclaim_cmds, no_reclaim_cmds,
sizeof(no_reclaim_cmds));
+ trans->conf.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
switch (iwlwifi_mod_params.amsdu_size) {
case IWL_AMSDU_DEF:
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mld.c b/drivers/net/wireless/intel/iwlwifi/mld/mld.c
index e8820e7cf8fa..1774bb84dd3f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/mld.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mld.c
@@ -77,6 +77,7 @@ void iwl_construct_mld(struct iwl_mld *mld, struct iwl_trans *trans,
/* Setup async RX handling */
spin_lock_init(&mld->async_handlers_lock);
+ INIT_LIST_HEAD(&mld->async_handlers_list);
wiphy_work_init(&mld->async_handlers_wk,
iwl_mld_async_handlers_wk);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c
index 81ca9ff67be9..3f8b840871d3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c
@@ -32,9 +32,9 @@ static void iwl_mvm_mld_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
unsigned int link_id;
int cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
WIDE_ID(MAC_CONF_GROUP,
- MAC_CONFIG_CMD), 0);
+ MAC_CONFIG_CMD), 1);
- if (WARN_ON(cmd_ver < 1 && cmd_ver > 3))
+ if (WARN_ON(cmd_ver > 3))
return;
cmd->id_and_color = cpu_to_le32(mvmvif->id);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
index cb36baac14da..4f2be0c1bd97 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
@@ -166,7 +166,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_context_info *ctxt_info;
struct iwl_context_info_rbd_cfg *rx_cfg;
- u32 control_flags = 0, rb_size;
+ u32 control_flags = 0, rb_size, cb_size;
dma_addr_t phys;
int ret;
@@ -202,11 +202,12 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
rb_size = IWL_CTXT_INFO_RB_SIZE_4K;
}
- WARN_ON(RX_QUEUE_CB_SIZE(iwl_trans_get_num_rbds(trans)) > 12);
+ cb_size = RX_QUEUE_CB_SIZE(iwl_trans_get_num_rbds(trans));
+ if (WARN_ON(cb_size > 12))
+ cb_size = 12;
+
control_flags = IWL_CTXT_INFO_TFD_FORMAT_LONG;
- control_flags |=
- u32_encode_bits(RX_QUEUE_CB_SIZE(iwl_trans_get_num_rbds(trans)),
- IWL_CTXT_INFO_RB_CB_SIZE);
+ control_flags |= u32_encode_bits(cb_size, IWL_CTXT_INFO_RB_CB_SIZE);
control_flags |= u32_encode_bits(rb_size, IWL_CTXT_INFO_RB_SIZE);
ctxt_info->control.control_flags = cpu_to_le32(control_flags);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 656f8b06c27b..0a9e0dbb58fb 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -1501,11 +1501,27 @@ static int _iwl_pci_resume(struct device *device, bool restore)
* Scratch value was altered, this means the device was powered off, we
* need to reset it completely.
* Note: MAC (bits 0:7) will be cleared upon suspend even with wowlan,
- * so assume that any bits there mean that the device is usable.
+ * but not bits [15:8]. So if we have bits set in lower word, assume
+ * the device is alive.
+ * For older devices, just try silently to grab the NIC.
*/
- if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ &&
- !iwl_read32(trans, CSR_FUNC_SCRATCH))
- device_was_powered_off = true;
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ if (!(iwl_read32(trans, CSR_FUNC_SCRATCH) &
+ CSR_FUNC_SCRATCH_POWER_OFF_MASK))
+ device_was_powered_off = true;
+ } else {
+ /*
+ * bh are re-enabled by iwl_trans_pcie_release_nic_access,
+ * so re-enable them if _iwl_trans_pcie_grab_nic_access fails.
+ */
+ local_bh_disable();
+ if (_iwl_trans_pcie_grab_nic_access(trans, true)) {
+ iwl_trans_pcie_release_nic_access(trans);
+ } else {
+ device_was_powered_off = true;
+ local_bh_enable();
+ }
+ }
if (restore || device_was_powered_off) {
trans->state = IWL_TRANS_NO_FW;
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c
index 738bafc3749b..66f0f5377ac1 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n.c
@@ -403,14 +403,12 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
if (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 &&
bss_desc->bcn_ht_oper->ht_param &
- IEEE80211_HT_PARAM_CHAN_WIDTH_ANY) {
- chan_list->chan_scan_param[0].radio_type |=
- CHAN_BW_40MHZ << 2;
+ IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)
SET_SECONDARYCHAN(chan_list->chan_scan_param[0].
radio_type,
(bss_desc->bcn_ht_oper->ht_param &
IEEE80211_HT_PARAM_CHA_SEC_OFFSET));
- }
+
*buffer += struct_size(chan_list, chan_scan_param, 1);
ret_len += struct_size(chan_list, chan_scan_param, 1);
}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 92697f98c601..e533d791955d 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2015,21 +2015,41 @@ static void nvme_configure_metadata(struct nvme_ctrl *ctrl,
}
-static void nvme_update_atomic_write_disk_info(struct nvme_ns *ns,
- struct nvme_id_ns *id, struct queue_limits *lim,
- u32 bs, u32 atomic_bs)
+static u32 nvme_configure_atomic_write(struct nvme_ns *ns,
+ struct nvme_id_ns *id, struct queue_limits *lim, u32 bs)
{
- unsigned int boundary = 0;
+ u32 atomic_bs, boundary = 0;
- if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf) {
- if (le16_to_cpu(id->nabspf))
+ /*
+ * We do not support an offset for the atomic boundaries.
+ */
+ if (id->nabo)
+ return bs;
+
+ if ((id->nsfeat & NVME_NS_FEAT_ATOMICS) && id->nawupf) {
+ /*
+ * Use the per-namespace atomic write unit when available.
+ */
+ atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs;
+ if (id->nabspf)
boundary = (le16_to_cpu(id->nabspf) + 1) * bs;
+ } else {
+ /*
+ * Use the controller wide atomic write unit. This sucks
+ * because the limit is defined in terms of logical blocks while
+ * namespaces can have different formats, and because there is
+ * no clear language in the specification prohibiting different
+ * values for different controllers in the subsystem.
+ */
+ atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs;
}
+
lim->atomic_write_hw_max = atomic_bs;
lim->atomic_write_hw_boundary = boundary;
lim->atomic_write_hw_unit_min = bs;
lim->atomic_write_hw_unit_max = rounddown_pow_of_two(atomic_bs);
lim->features |= BLK_FEAT_ATOMIC_WRITES;
+ return atomic_bs;
}
static u32 nvme_max_drv_segments(struct nvme_ctrl *ctrl)
@@ -2067,34 +2087,8 @@ static bool nvme_update_disk_info(struct nvme_ns *ns, struct nvme_id_ns *id,
valid = false;
}
- atomic_bs = phys_bs = bs;
- if (id->nabo == 0) {
- /*
- * Bit 1 indicates whether NAWUPF is defined for this namespace
- * and whether it should be used instead of AWUPF. If NAWUPF ==
- * 0 then AWUPF must be used instead.
- */
- if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf)
- atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs;
- else
- atomic_bs = (1 + ns->ctrl->awupf) * bs;
-
- /*
- * Set subsystem atomic bs.
- */
- if (ns->ctrl->subsys->atomic_bs) {
- if (atomic_bs != ns->ctrl->subsys->atomic_bs) {
- dev_err_ratelimited(ns->ctrl->device,
- "%s: Inconsistent Atomic Write Size, Namespace will not be added: Subsystem=%d bytes, Controller/Namespace=%d bytes\n",
- ns->disk ? ns->disk->disk_name : "?",
- ns->ctrl->subsys->atomic_bs,
- atomic_bs);
- }
- } else
- ns->ctrl->subsys->atomic_bs = atomic_bs;
-
- nvme_update_atomic_write_disk_info(ns, id, lim, bs, atomic_bs);
- }
+ phys_bs = bs;
+ atomic_bs = nvme_configure_atomic_write(ns, id, lim, bs);
if (id->nsfeat & NVME_NS_FEAT_IO_OPT) {
/* NPWG = Namespace Preferred Write Granularity */
@@ -2382,16 +2376,6 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
if (!nvme_update_disk_info(ns, id, &lim))
capacity = 0;
- /*
- * Validate the max atomic write size fits within the subsystem's
- * atomic write capabilities.
- */
- if (lim.atomic_write_hw_max > ns->ctrl->subsys->atomic_bs) {
- blk_mq_unfreeze_queue(ns->disk->queue, memflags);
- ret = -ENXIO;
- goto out;
- }
-
nvme_config_discard(ns, &lim);
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
ns->head->ids.csi == NVME_CSI_ZNS)
@@ -3215,6 +3199,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
memcpy(subsys->model, id->mn, sizeof(subsys->model));
subsys->vendor_id = le16_to_cpu(id->vid);
subsys->cmic = id->cmic;
+ subsys->awupf = le16_to_cpu(id->awupf);
/* Versions prior to 1.4 don't necessarily report a valid type */
if (id->cntrltype == NVME_CTRL_DISC ||
@@ -3552,6 +3537,15 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl)
if (ret)
goto out_free;
}
+
+ if (le16_to_cpu(id->awupf) != ctrl->subsys->awupf) {
+ dev_err_ratelimited(ctrl->device,
+ "inconsistent AWUPF, controller not added (%u/%u).\n",
+ le16_to_cpu(id->awupf), ctrl->subsys->awupf);
+ ret = -EINVAL;
+ goto out_free;
+ }
+
memcpy(ctrl->subsys->firmware_rev, id->fr,
sizeof(ctrl->subsys->firmware_rev));
@@ -3647,7 +3641,6 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl)
dev_pm_qos_expose_latency_tolerance(ctrl->device);
else if (!ctrl->apst_enabled && prev_apst_enabled)
dev_pm_qos_hide_latency_tolerance(ctrl->device);
- ctrl->awupf = le16_to_cpu(id->awupf);
out_free:
kfree(id);
return ret;
@@ -4036,6 +4029,10 @@ static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info)
list_add_tail_rcu(&ns->siblings, &head->list);
ns->head = head;
mutex_unlock(&ctrl->subsys->lock);
+
+#ifdef CONFIG_NVME_MULTIPATH
+ cancel_delayed_work(&head->remove_work);
+#endif
return 0;
out_put_ns_head:
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 0b50da2f1175..6b3ac8ae3f34 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -429,21 +429,14 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
pdu->result = le64_to_cpu(nvme_req(req)->result.u64);
/*
- * For iopoll, complete it directly. Note that using the uring_cmd
- * helper for this is safe only because we check blk_rq_is_poll().
- * As that returns false if we're NOT on a polled queue, then it's
- * safe to use the polled completion helper.
- *
- * Otherwise, move the completion to task work.
+ * IOPOLL could potentially complete this request directly, but
+ * if multiple rings are polling on the same queue, then it's possible
+ * for one ring to find completions for another ring. Punting the
+ * completion via task_work will always direct it to the right
+ * location, rather than potentially complete requests for ringA
+ * under iopoll invocations from ringB.
*/
- if (blk_rq_is_poll(req)) {
- if (pdu->bio)
- blk_rq_unmap_user(pdu->bio);
- io_uring_cmd_iopoll_done(ioucmd, pdu->result, pdu->status);
- } else {
- io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb);
- }
-
+ io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb);
return RQ_END_IO_FREE;
}
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index e040e467f9fa..316a269842fa 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -1311,7 +1311,7 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
*/
if (!try_module_get(THIS_MODULE))
goto out;
- queue_delayed_work(nvme_wq, &head->remove_work,
+ mod_delayed_work(nvme_wq, &head->remove_work,
head->delayed_removal_secs * HZ);
} else {
list_del_init(&head->entry);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index a468cdc5b5cb..7df2ea21851f 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -410,7 +410,6 @@ struct nvme_ctrl {
enum nvme_ctrl_type cntrltype;
enum nvme_dctype dctype;
- u16 awupf; /* 0's based value. */
};
static inline enum nvme_ctrl_state nvme_ctrl_state(struct nvme_ctrl *ctrl)
@@ -443,11 +442,11 @@ struct nvme_subsystem {
u8 cmic;
enum nvme_subsys_type subtype;
u16 vendor_id;
+ u16 awupf; /* 0's based value. */
struct ida ns_ida;
#ifdef CONFIG_NVME_MULTIPATH
enum nvme_iopolicy iopolicy;
#endif
- u32 atomic_bs;
};
/*
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index ebd342bda235..91d2d92717d9 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -771,7 +771,7 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
u16 ignored_events = PCI_EXP_SLTSTA_DLLSC;
if (!ctrl->inband_presence_disabled)
- ignored_events |= events & PCI_EXP_SLTSTA_PDC;
+ ignored_events |= PCI_EXP_SLTSTA_PDC;
events &= ~ignored_events;
pciehp_ignore_link_change(ctrl, pdev, irq, ignored_events);
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index b78e0e417324..af370628e583 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -1676,19 +1676,24 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
return NULL;
root_ops = kzalloc(sizeof(*root_ops), GFP_KERNEL);
- if (!root_ops)
- goto free_ri;
+ if (!root_ops) {
+ kfree(ri);
+ return NULL;
+ }
ri->cfg = pci_acpi_setup_ecam_mapping(root);
- if (!ri->cfg)
- goto free_root_ops;
+ if (!ri->cfg) {
+ kfree(ri);
+ kfree(root_ops);
+ return NULL;
+ }
root_ops->release_info = pci_acpi_generic_release_info;
root_ops->prepare_resources = pci_acpi_root_prepare_resources;
root_ops->pci_ops = (struct pci_ops *)&ri->cfg->ops->pci_ops;
bus = acpi_pci_root_create(root, root_ops, &ri->common, ri->cfg);
if (!bus)
- goto free_cfg;
+ return NULL;
/* If we must preserve the resource configuration, claim now */
host = pci_find_host_bridge(bus);
@@ -1705,14 +1710,6 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
pcie_bus_configure_settings(child);
return bus;
-
-free_cfg:
- pci_ecam_free(ri->cfg);
-free_root_ops:
- kfree(root_ops);
-free_ri:
- kfree(ri);
- return NULL;
}
void pcibios_add_bus(struct pci_bus *bus)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e9448d55113b..9e42090fb108 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -3217,14 +3217,14 @@ void pci_pm_init(struct pci_dev *dev)
/* find PCI PM capability in list */
pm = pci_find_capability(dev, PCI_CAP_ID_PM);
if (!pm)
- return;
+ goto poweron;
/* Check device's ability to generate PME# */
pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
pci_err(dev, "unsupported PM cap regs version (%u)\n",
pmc & PCI_PM_CAP_VER_MASK);
- return;
+ goto poweron;
}
dev->pm_cap = pm;
@@ -3269,6 +3269,7 @@ void pci_pm_init(struct pci_dev *dev)
pci_read_config_word(dev, PCI_STATUS, &status);
if (status & PCI_STATUS_IMM_READY)
dev->imm_ready = 1;
+poweron:
pci_pm_power_up_and_verify_state(dev);
pm_runtime_forbid(&dev->dev);
pm_runtime_set_active(&dev->dev);
diff --git a/drivers/pci/pcie/ptm.c b/drivers/pci/pcie/ptm.c
index ee5f615a9023..4bd73f038ffb 100644
--- a/drivers/pci/pcie/ptm.c
+++ b/drivers/pci/pcie/ptm.c
@@ -254,6 +254,7 @@ bool pcie_ptm_enabled(struct pci_dev *dev)
}
EXPORT_SYMBOL(pcie_ptm_enabled);
+#if IS_ENABLED(CONFIG_DEBUG_FS)
static ssize_t context_update_write(struct file *file, const char __user *ubuf,
size_t count, loff_t *ppos)
{
@@ -552,3 +553,4 @@ void pcie_ptm_destroy_debugfs(struct pci_ptm_debugfs *ptm_debugfs)
debugfs_remove_recursive(ptm_debugfs->debugfs);
}
EXPORT_SYMBOL_GPL(pcie_ptm_destroy_debugfs);
+#endif
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index fe2d52e434db..8a2ef74862d3 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -374,11 +374,6 @@ static struct st_pio_control *st_get_pio_control(
}
/* Low level functions.. */
-static inline int st_gpio_bank(int gpio)
-{
- return gpio/ST_GPIO_PINS_PER_BANK;
-}
-
static inline int st_gpio_pin(int gpio)
{
return gpio%ST_GPIO_PINS_PER_BANK;
diff --git a/drivers/pinctrl/pinctrl-tb10x.c b/drivers/pinctrl/pinctrl-tb10x.c
index d6bb8f58978d..4edb20e61951 100644
--- a/drivers/pinctrl/pinctrl-tb10x.c
+++ b/drivers/pinctrl/pinctrl-tb10x.c
@@ -823,7 +823,7 @@ static struct platform_driver tb10x_pinctrl_pdrv = {
.remove = tb10x_pinctrl_remove,
.driver = {
.name = "tb10x_pinctrl",
- .of_match_table = of_match_ptr(tb10x_pinctrl_dt_ids),
+ .of_match_table = tb10x_pinctrl_dt_ids,
}
};
diff --git a/drivers/pinctrl/qcom/pinctrl-apq8064.c b/drivers/pinctrl/qcom/pinctrl-apq8064.c
index 20c3b9025044..3654913f1ae5 100644
--- a/drivers/pinctrl/qcom/pinctrl-apq8064.c
+++ b/drivers/pinctrl/qcom/pinctrl-apq8064.c
@@ -629,7 +629,6 @@ static struct platform_driver apq8064_pinctrl_driver = {
.of_match_table = apq8064_pinctrl_of_match,
},
.probe = apq8064_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init apq8064_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-apq8084.c b/drivers/pinctrl/qcom/pinctrl-apq8084.c
index 3fc0a40762b6..27693cd64881 100644
--- a/drivers/pinctrl/qcom/pinctrl-apq8084.c
+++ b/drivers/pinctrl/qcom/pinctrl-apq8084.c
@@ -1207,7 +1207,6 @@ static struct platform_driver apq8084_pinctrl_driver = {
.of_match_table = apq8084_pinctrl_of_match,
},
.probe = apq8084_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init apq8084_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq4019.c b/drivers/pinctrl/qcom/pinctrl-ipq4019.c
index 1f7944dd829d..6ede3149b6e1 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq4019.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq4019.c
@@ -710,7 +710,6 @@ static struct platform_driver ipq4019_pinctrl_driver = {
.of_match_table = ipq4019_pinctrl_of_match,
},
.probe = ipq4019_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init ipq4019_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq5018.c b/drivers/pinctrl/qcom/pinctrl-ipq5018.c
index e2951f81c3ee..10b99d5d8a11 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq5018.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq5018.c
@@ -754,7 +754,6 @@ static struct platform_driver ipq5018_pinctrl_driver = {
.of_match_table = ipq5018_pinctrl_of_match,
},
.probe = ipq5018_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init ipq5018_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq5332.c b/drivers/pinctrl/qcom/pinctrl-ipq5332.c
index 625f8014051f..1ac2fc09c119 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq5332.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq5332.c
@@ -834,7 +834,6 @@ static struct platform_driver ipq5332_pinctrl_driver = {
.of_match_table = ipq5332_pinctrl_of_match,
},
.probe = ipq5332_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init ipq5332_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq5424.c b/drivers/pinctrl/qcom/pinctrl-ipq5424.c
index 0d610b076da3..7ff1f8acc1a3 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq5424.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq5424.c
@@ -791,7 +791,6 @@ static struct platform_driver ipq5424_pinctrl_driver = {
.of_match_table = ipq5424_pinctrl_of_match,
},
.probe = ipq5424_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init ipq5424_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq6018.c b/drivers/pinctrl/qcom/pinctrl-ipq6018.c
index 0ad08647dbcd..a4ba980252e1 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq6018.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq6018.c
@@ -1080,7 +1080,6 @@ static struct platform_driver ipq6018_pinctrl_driver = {
.of_match_table = ipq6018_pinctrl_of_match,
},
.probe = ipq6018_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init ipq6018_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq8064.c b/drivers/pinctrl/qcom/pinctrl-ipq8064.c
index e2bb94e86aef..0a9e357e64c6 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq8064.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq8064.c
@@ -631,7 +631,6 @@ static struct platform_driver ipq8064_pinctrl_driver = {
.of_match_table = ipq8064_pinctrl_of_match,
},
.probe = ipq8064_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init ipq8064_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq8074.c b/drivers/pinctrl/qcom/pinctrl-ipq8074.c
index 337f3a1c92c1..482f13282fc2 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq8074.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq8074.c
@@ -1041,7 +1041,6 @@ static struct platform_driver ipq8074_pinctrl_driver = {
.of_match_table = ipq8074_pinctrl_of_match,
},
.probe = ipq8074_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init ipq8074_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq9574.c b/drivers/pinctrl/qcom/pinctrl-ipq9574.c
index e2491617b236..89c05d8eb550 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq9574.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq9574.c
@@ -799,7 +799,6 @@ static struct platform_driver ipq9574_pinctrl_driver = {
.of_match_table = ipq9574_pinctrl_of_match,
},
.probe = ipq9574_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init ipq9574_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-mdm9607.c b/drivers/pinctrl/qcom/pinctrl-mdm9607.c
index e7cd3ef1cf3e..3e18ba124fed 100644
--- a/drivers/pinctrl/qcom/pinctrl-mdm9607.c
+++ b/drivers/pinctrl/qcom/pinctrl-mdm9607.c
@@ -1059,7 +1059,6 @@ static struct platform_driver mdm9607_pinctrl_driver = {
.of_match_table = mdm9607_pinctrl_of_match,
},
.probe = mdm9607_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init mdm9607_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-mdm9615.c b/drivers/pinctrl/qcom/pinctrl-mdm9615.c
index 0a2ae383d3d5..bea1ca3d1b7f 100644
--- a/drivers/pinctrl/qcom/pinctrl-mdm9615.c
+++ b/drivers/pinctrl/qcom/pinctrl-mdm9615.c
@@ -446,7 +446,6 @@ static struct platform_driver mdm9615_pinctrl_driver = {
.of_match_table = mdm9615_pinctrl_of_match,
},
.probe = mdm9615_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init mdm9615_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index f012ea88aa22..5c4687de1464 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -1442,7 +1442,7 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
girq->handler = handle_bad_irq;
girq->parents[0] = pctrl->irq;
- ret = gpiochip_add_data(&pctrl->chip, pctrl);
+ ret = devm_gpiochip_add_data(pctrl->dev, &pctrl->chip, pctrl);
if (ret) {
dev_err(pctrl->dev, "Failed register gpiochip\n");
return ret;
@@ -1463,7 +1463,6 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
dev_name(pctrl->dev), 0, 0, chip->ngpio);
if (ret) {
dev_err(pctrl->dev, "Failed to add pin range\n");
- gpiochip_remove(&pctrl->chip);
return ret;
}
}
@@ -1599,13 +1598,5 @@ int msm_pinctrl_probe(struct platform_device *pdev,
}
EXPORT_SYMBOL(msm_pinctrl_probe);
-void msm_pinctrl_remove(struct platform_device *pdev)
-{
- struct msm_pinctrl *pctrl = platform_get_drvdata(pdev);
-
- gpiochip_remove(&pctrl->chip);
-}
-EXPORT_SYMBOL(msm_pinctrl_remove);
-
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. TLMM driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h
index 63852ed70295..d7dc0947bb16 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.h
+++ b/drivers/pinctrl/qcom/pinctrl-msm.h
@@ -171,6 +171,5 @@ extern const struct dev_pm_ops msm_pinctrl_dev_pm_ops;
int msm_pinctrl_probe(struct platform_device *pdev,
const struct msm_pinctrl_soc_data *soc_data);
-void msm_pinctrl_remove(struct platform_device *pdev);
#endif
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8226.c b/drivers/pinctrl/qcom/pinctrl-msm8226.c
index 64fee70f1772..f9a957347340 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8226.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8226.c
@@ -654,7 +654,6 @@ static struct platform_driver msm8226_pinctrl_driver = {
.of_match_table = msm8226_pinctrl_of_match,
},
.probe = msm8226_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init msm8226_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8660.c b/drivers/pinctrl/qcom/pinctrl-msm8660.c
index 999a5f867eb5..4dbc19ffd80e 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8660.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8660.c
@@ -981,7 +981,6 @@ static struct platform_driver msm8660_pinctrl_driver = {
.of_match_table = msm8660_pinctrl_of_match,
},
.probe = msm8660_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init msm8660_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8909.c b/drivers/pinctrl/qcom/pinctrl-msm8909.c
index 756856d20d6b..0aa4f77b774f 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8909.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8909.c
@@ -929,7 +929,6 @@ static struct platform_driver msm8909_pinctrl_driver = {
.of_match_table = msm8909_pinctrl_of_match,
},
.probe = msm8909_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init msm8909_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8916.c b/drivers/pinctrl/qcom/pinctrl-msm8916.c
index cea5c54f92fe..0dfc6dd33d58 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8916.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8916.c
@@ -969,7 +969,6 @@ static struct platform_driver msm8916_pinctrl_driver = {
.of_match_table = msm8916_pinctrl_of_match,
},
.probe = msm8916_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init msm8916_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8917.c b/drivers/pinctrl/qcom/pinctrl-msm8917.c
index 350636807b07..2e1a94ab18b2 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8917.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8917.c
@@ -1607,7 +1607,6 @@ static struct platform_driver msm8917_pinctrl_driver = {
.of_match_table = msm8917_pinctrl_of_match,
},
.probe = msm8917_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init msm8917_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8953.c b/drivers/pinctrl/qcom/pinctrl-msm8953.c
index 998351bdfee1..956383341a7a 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8953.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8953.c
@@ -1816,7 +1816,6 @@ static struct platform_driver msm8953_pinctrl_driver = {
.of_match_table = msm8953_pinctrl_of_match,
},
.probe = msm8953_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init msm8953_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8960.c b/drivers/pinctrl/qcom/pinctrl-msm8960.c
index ebe230b3b437..a937ea867de7 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8960.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8960.c
@@ -1246,7 +1246,6 @@ static struct platform_driver msm8960_pinctrl_driver = {
.of_match_table = msm8960_pinctrl_of_match,
},
.probe = msm8960_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init msm8960_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8976.c b/drivers/pinctrl/qcom/pinctrl-msm8976.c
index c30d80e4e98c..3bcb03387781 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8976.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8976.c
@@ -1096,7 +1096,6 @@ static struct platform_driver msm8976_pinctrl_driver = {
.of_match_table = msm8976_pinctrl_of_match,
},
.probe = msm8976_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init msm8976_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8994.c b/drivers/pinctrl/qcom/pinctrl-msm8994.c
index b1a6759ab4a5..7a3b6cbccb68 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8994.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8994.c
@@ -1343,7 +1343,6 @@ static struct platform_driver msm8994_pinctrl_driver = {
.of_match_table = msm8994_pinctrl_of_match,
},
.probe = msm8994_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init msm8994_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8996.c b/drivers/pinctrl/qcom/pinctrl-msm8996.c
index 1b5d80eaab83..d86d83106d3b 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8996.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8996.c
@@ -1920,7 +1920,6 @@ static struct platform_driver msm8996_pinctrl_driver = {
.of_match_table = msm8996_pinctrl_of_match,
},
.probe = msm8996_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init msm8996_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8998.c b/drivers/pinctrl/qcom/pinctrl-msm8998.c
index b7cbf32b3125..1daee815888f 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8998.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8998.c
@@ -1535,7 +1535,6 @@ static struct platform_driver msm8998_pinctrl_driver = {
.of_match_table = msm8998_pinctrl_of_match,
},
.probe = msm8998_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init msm8998_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8x74.c b/drivers/pinctrl/qcom/pinctrl-msm8x74.c
index 238c83f6ec4f..8253aa25775b 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8x74.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8x74.c
@@ -1083,7 +1083,6 @@ static struct platform_driver msm8x74_pinctrl_driver = {
.of_match_table = msm8x74_pinctrl_of_match,
},
.probe = msm8x74_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init msm8x74_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-qcm2290.c b/drivers/pinctrl/qcom/pinctrl-qcm2290.c
index f885af571ec9..eeeec6434f6a 100644
--- a/drivers/pinctrl/qcom/pinctrl-qcm2290.c
+++ b/drivers/pinctrl/qcom/pinctrl-qcm2290.c
@@ -167,6 +167,10 @@ static const struct pinctrl_pin_desc qcm2290_pins[] = {
PINCTRL_PIN(62, "GPIO_62"),
PINCTRL_PIN(63, "GPIO_63"),
PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
PINCTRL_PIN(69, "GPIO_69"),
PINCTRL_PIN(70, "GPIO_70"),
PINCTRL_PIN(71, "GPIO_71"),
@@ -181,12 +185,17 @@ static const struct pinctrl_pin_desc qcm2290_pins[] = {
PINCTRL_PIN(80, "GPIO_80"),
PINCTRL_PIN(81, "GPIO_81"),
PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
PINCTRL_PIN(86, "GPIO_86"),
PINCTRL_PIN(87, "GPIO_87"),
PINCTRL_PIN(88, "GPIO_88"),
PINCTRL_PIN(89, "GPIO_89"),
PINCTRL_PIN(90, "GPIO_90"),
PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
PINCTRL_PIN(94, "GPIO_94"),
PINCTRL_PIN(95, "GPIO_95"),
PINCTRL_PIN(96, "GPIO_96"),
@@ -1125,7 +1134,6 @@ static struct platform_driver qcm2290_pinctrl_driver = {
.of_match_table = qcm2290_pinctrl_of_match,
},
.probe = qcm2290_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init qcm2290_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-qcs404.c b/drivers/pinctrl/qcom/pinctrl-qcs404.c
index ae7224012f8a..54e3b4435349 100644
--- a/drivers/pinctrl/qcom/pinctrl-qcs404.c
+++ b/drivers/pinctrl/qcom/pinctrl-qcs404.c
@@ -1644,7 +1644,6 @@ static struct platform_driver qcs404_pinctrl_driver = {
.of_match_table = qcs404_pinctrl_of_match,
},
.probe = qcs404_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init qcs404_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-qcs615.c b/drivers/pinctrl/qcom/pinctrl-qcs615.c
index 17ca743c2210..2a943bc46a62 100644
--- a/drivers/pinctrl/qcom/pinctrl-qcs615.c
+++ b/drivers/pinctrl/qcom/pinctrl-qcs615.c
@@ -1087,7 +1087,6 @@ static struct platform_driver qcs615_tlmm_driver = {
.of_match_table = qcs615_tlmm_of_match,
},
.probe = qcs615_tlmm_probe,
- .remove = msm_pinctrl_remove,
};
static int __init qcs615_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-qcs8300.c b/drivers/pinctrl/qcom/pinctrl-qcs8300.c
index 5f5f7c4ac644..d6437e26392b 100644
--- a/drivers/pinctrl/qcom/pinctrl-qcs8300.c
+++ b/drivers/pinctrl/qcom/pinctrl-qcs8300.c
@@ -1227,7 +1227,6 @@ static struct platform_driver qcs8300_pinctrl_driver = {
.of_match_table = qcs8300_pinctrl_of_match,
},
.probe = qcs8300_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init qcs8300_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c b/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c
index b5808fcfb13c..9ecc4d40e4dc 100644
--- a/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c
+++ b/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c
@@ -145,7 +145,6 @@ static struct platform_driver qdf2xxx_pinctrl_driver = {
.acpi_match_table = qdf2xxx_acpi_ids,
},
.probe = qdf2xxx_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init qdf2xxx_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-qdu1000.c b/drivers/pinctrl/qcom/pinctrl-qdu1000.c
index 47bc529ef550..eacb89fa3888 100644
--- a/drivers/pinctrl/qcom/pinctrl-qdu1000.c
+++ b/drivers/pinctrl/qcom/pinctrl-qdu1000.c
@@ -1248,7 +1248,6 @@ static struct platform_driver qdu1000_tlmm_driver = {
.of_match_table = qdu1000_tlmm_of_match,
},
.probe = qdu1000_tlmm_probe,
- .remove = msm_pinctrl_remove,
};
static int __init qdu1000_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sa8775p.c b/drivers/pinctrl/qcom/pinctrl-sa8775p.c
index a5b38221aea8..1b62eb3e6620 100644
--- a/drivers/pinctrl/qcom/pinctrl-sa8775p.c
+++ b/drivers/pinctrl/qcom/pinctrl-sa8775p.c
@@ -1540,7 +1540,6 @@ static struct platform_driver sa8775p_pinctrl_driver = {
.of_match_table = sa8775p_pinctrl_of_match,
},
.probe = sa8775p_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sa8775p_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sar2130p.c b/drivers/pinctrl/qcom/pinctrl-sar2130p.c
index 19a2e37826c7..3dd1b5e5cfee 100644
--- a/drivers/pinctrl/qcom/pinctrl-sar2130p.c
+++ b/drivers/pinctrl/qcom/pinctrl-sar2130p.c
@@ -1486,7 +1486,6 @@ static struct platform_driver sar2130p_tlmm_driver = {
.of_match_table = sar2130p_tlmm_of_match,
},
.probe = sar2130p_tlmm_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sar2130p_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sc7180.c b/drivers/pinctrl/qcom/pinctrl-sc7180.c
index 6eb0c73791c0..c43fe10b71ad 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc7180.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc7180.c
@@ -1159,7 +1159,6 @@ static struct platform_driver sc7180_pinctrl_driver = {
.of_match_table = sc7180_pinctrl_of_match,
},
.probe = sc7180_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sc7180_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sc7280.c b/drivers/pinctrl/qcom/pinctrl-sc7280.c
index 0c10eeb60b55..1b070e9d41f5 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc7280.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc7280.c
@@ -1505,7 +1505,6 @@ static struct platform_driver sc7280_pinctrl_driver = {
.of_match_table = sc7280_pinctrl_of_match,
},
.probe = sc7280_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sc7280_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sc8180x.c b/drivers/pinctrl/qcom/pinctrl-sc8180x.c
index d6a79ad41a40..26dd165d1543 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc8180x.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc8180x.c
@@ -1720,7 +1720,6 @@ static struct platform_driver sc8180x_pinctrl_driver = {
.acpi_match_table = sc8180x_pinctrl_acpi_match,
},
.probe = sc8180x_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sc8180x_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sc8280xp.c b/drivers/pinctrl/qcom/pinctrl-sc8280xp.c
index 96f4fb5a5d29..6ccd7e5648d4 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc8280xp.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc8280xp.c
@@ -1926,7 +1926,6 @@ static struct platform_driver sc8280xp_pinctrl_driver = {
.of_match_table = sc8280xp_pinctrl_of_match,
},
.probe = sc8280xp_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sc8280xp_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm660.c b/drivers/pinctrl/qcom/pinctrl-sdm660.c
index 907e4ffca5e7..1a78288f1bc8 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm660.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm660.c
@@ -1442,7 +1442,6 @@ static struct platform_driver sdm660_pinctrl_driver = {
.of_match_table = sdm660_pinctrl_of_match,
},
.probe = sdm660_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sdm660_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm670.c b/drivers/pinctrl/qcom/pinctrl-sdm670.c
index c76183ba95e1..0fe1fa94cd6d 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm670.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm670.c
@@ -1337,7 +1337,6 @@ static struct platform_driver sdm670_pinctrl_driver = {
.of_match_table = sdm670_pinctrl_of_match,
},
.probe = sdm670_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sdm670_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm845.c b/drivers/pinctrl/qcom/pinctrl-sdm845.c
index cc05c415ed15..0446e291aa48 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm845.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm845.c
@@ -1351,7 +1351,6 @@ static struct platform_driver sdm845_pinctrl_driver = {
.acpi_match_table = ACPI_PTR(sdm845_pinctrl_acpi_match),
},
.probe = sdm845_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sdm845_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sdx55.c b/drivers/pinctrl/qcom/pinctrl-sdx55.c
index 8826db9d21d0..2c17bf889146 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdx55.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdx55.c
@@ -990,7 +990,6 @@ static struct platform_driver sdx55_pinctrl_driver = {
.of_match_table = sdx55_pinctrl_of_match,
},
.probe = sdx55_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sdx55_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sdx65.c b/drivers/pinctrl/qcom/pinctrl-sdx65.c
index f6f319c997fc..85b5c0206dbd 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdx65.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdx65.c
@@ -939,7 +939,6 @@ static struct platform_driver sdx65_pinctrl_driver = {
.of_match_table = sdx65_pinctrl_of_match,
},
.probe = sdx65_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sdx65_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sdx75.c b/drivers/pinctrl/qcom/pinctrl-sdx75.c
index 3cfe8c7f04df..ab13a3a57a83 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdx75.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdx75.c
@@ -1124,7 +1124,6 @@ static struct platform_driver sdx75_pinctrl_driver = {
.of_match_table = sdx75_pinctrl_of_match,
},
.probe = sdx75_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sdx75_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm4450.c b/drivers/pinctrl/qcom/pinctrl-sm4450.c
index 622f20e6f6f8..1ecdf1ab4f27 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm4450.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm4450.c
@@ -994,7 +994,6 @@ static struct platform_driver sm4450_tlmm_driver = {
.of_match_table = sm4450_tlmm_of_match,
},
.probe = sm4450_tlmm_probe,
- .remove = msm_pinctrl_remove,
};
MODULE_DEVICE_TABLE(of, sm4450_tlmm_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-sm6115.c b/drivers/pinctrl/qcom/pinctrl-sm6115.c
index 4e91c75ad952..c273efa43996 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm6115.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm6115.c
@@ -907,7 +907,6 @@ static struct platform_driver sm6115_tlmm_driver = {
.of_match_table = sm6115_tlmm_of_match,
},
.probe = sm6115_tlmm_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sm6115_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm6125.c b/drivers/pinctrl/qcom/pinctrl-sm6125.c
index c188842047aa..5092f20e0c1b 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm6125.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm6125.c
@@ -1266,7 +1266,6 @@ static struct platform_driver sm6125_tlmm_driver = {
.of_match_table = sm6125_tlmm_of_match,
},
.probe = sm6125_tlmm_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sm6125_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm6350.c b/drivers/pinctrl/qcom/pinctrl-sm6350.c
index f3828c07b134..ba4686c86c54 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm6350.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm6350.c
@@ -1373,7 +1373,6 @@ static struct platform_driver sm6350_tlmm_driver = {
.of_match_table = sm6350_tlmm_of_match,
},
.probe = sm6350_tlmm_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sm6350_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm6375.c b/drivers/pinctrl/qcom/pinctrl-sm6375.c
index c82c8516932e..49031571e65e 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm6375.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm6375.c
@@ -1516,7 +1516,6 @@ static struct platform_driver sm6375_tlmm_driver = {
.of_match_table = sm6375_tlmm_of_match,
},
.probe = sm6375_tlmm_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sm6375_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm7150.c b/drivers/pinctrl/qcom/pinctrl-sm7150.c
index 3c7fd8af6635..6e89966cd70e 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm7150.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm7150.c
@@ -1255,7 +1255,6 @@ static struct platform_driver sm7150_tlmm_driver = {
.of_match_table = sm7150_tlmm_of_match,
},
.probe = sm7150_tlmm_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sm7150_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8150.c b/drivers/pinctrl/qcom/pinctrl-sm8150.c
index 01aea9c70b7a..794ed99463f7 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8150.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8150.c
@@ -1542,7 +1542,6 @@ static struct platform_driver sm8150_pinctrl_driver = {
.of_match_table = sm8150_pinctrl_of_match,
},
.probe = sm8150_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sm8150_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8250.c b/drivers/pinctrl/qcom/pinctrl-sm8250.c
index e9961a49ff98..fb6f005d64f5 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8250.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8250.c
@@ -1351,7 +1351,6 @@ static struct platform_driver sm8250_pinctrl_driver = {
.of_match_table = sm8250_pinctrl_of_match,
},
.probe = sm8250_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sm8250_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8350.c b/drivers/pinctrl/qcom/pinctrl-sm8350.c
index 9c69458bd910..c8a3f39ce6f1 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8350.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8350.c
@@ -1642,7 +1642,6 @@ static struct platform_driver sm8350_tlmm_driver = {
.of_match_table = sm8350_tlmm_of_match,
},
.probe = sm8350_tlmm_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sm8350_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8450.c b/drivers/pinctrl/qcom/pinctrl-sm8450.c
index d11bb1ee9e3d..f2e52d5a0f93 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8450.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8450.c
@@ -1677,7 +1677,6 @@ static struct platform_driver sm8450_tlmm_driver = {
.of_match_table = sm8450_tlmm_of_match,
},
.probe = sm8450_tlmm_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sm8450_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8550.c b/drivers/pinctrl/qcom/pinctrl-sm8550.c
index 3c847d9cb5d9..1b4496cb39eb 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8550.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8550.c
@@ -1762,7 +1762,6 @@ static struct platform_driver sm8550_tlmm_driver = {
.of_match_table = sm8550_tlmm_of_match,
},
.probe = sm8550_tlmm_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sm8550_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8650.c b/drivers/pinctrl/qcom/pinctrl-sm8650.c
index 104708252d12..449a0077f4b1 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8650.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8650.c
@@ -1742,7 +1742,6 @@ static struct platform_driver sm8650_tlmm_driver = {
.of_match_table = sm8650_tlmm_of_match,
},
.probe = sm8650_tlmm_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sm8650_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8750.c b/drivers/pinctrl/qcom/pinctrl-sm8750.c
index b94fb4ee0ec3..8516693d1db5 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8750.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8750.c
@@ -1711,7 +1711,6 @@ static struct platform_driver sm8750_tlmm_driver = {
.of_match_table = sm8750_tlmm_of_match,
},
.probe = sm8750_tlmm_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sm8750_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-x1e80100.c b/drivers/pinctrl/qcom/pinctrl-x1e80100.c
index 419cb8facb2f..d4b215f34c39 100644
--- a/drivers/pinctrl/qcom/pinctrl-x1e80100.c
+++ b/drivers/pinctrl/qcom/pinctrl-x1e80100.c
@@ -1861,7 +1861,6 @@ static struct platform_driver x1e80100_pinctrl_driver = {
.of_match_table = x1e80100_pinctrl_of_match,
},
.probe = x1e80100_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init x1e80100_pinctrl_init(void)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c b/drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c
index 1833078f6877..4e34b0cd3b73 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c
@@ -143,7 +143,7 @@ static struct sunxi_desc_pin *init_pins_table(struct device *dev,
*/
static int prepare_function_table(struct device *dev, struct device_node *pnode,
struct sunxi_desc_pin *pins, int npins,
- const u8 *irq_bank_muxes)
+ unsigned pin_base, const u8 *irq_bank_muxes)
{
struct device_node *node;
struct property *prop;
@@ -166,7 +166,7 @@ static int prepare_function_table(struct device *dev, struct device_node *pnode,
*/
for (i = 0; i < npins; i++) {
struct sunxi_desc_pin *pin = &pins[i];
- int bank = pin->pin.number / PINS_PER_BANK;
+ int bank = (pin->pin.number - pin_base) / PINS_PER_BANK;
if (irq_bank_muxes[bank]) {
pin->variant++;
@@ -211,7 +211,7 @@ static int prepare_function_table(struct device *dev, struct device_node *pnode,
last_bank = 0;
for (i = 0; i < npins; i++) {
struct sunxi_desc_pin *pin = &pins[i];
- int bank = pin->pin.number / PINS_PER_BANK;
+ int bank = (pin->pin.number - pin_base) / PINS_PER_BANK;
int lastfunc = pin->variant + 1;
int irq_mux = irq_bank_muxes[bank];
@@ -353,7 +353,7 @@ int sunxi_pinctrl_dt_table_init(struct platform_device *pdev,
return PTR_ERR(pins);
ret = prepare_function_table(&pdev->dev, pnode, pins, desc->npins,
- irq_bank_muxes);
+ desc->pin_base, irq_bank_muxes);
if (ret)
return ret;
diff --git a/drivers/platform/x86/amd/amd_isp4.c b/drivers/platform/x86/amd/amd_isp4.c
index 0cc01441bcbb..9f291aeb35f1 100644
--- a/drivers/platform/x86/amd/amd_isp4.c
+++ b/drivers/platform/x86/amd/amd_isp4.c
@@ -11,6 +11,7 @@
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/property.h>
+#include <linux/soc/amd/isp4_misc.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/units.h>
@@ -151,7 +152,7 @@ MODULE_DEVICE_TABLE(acpi, amdisp_sensor_ids);
static inline bool is_isp_i2c_adapter(struct i2c_adapter *adap)
{
- return !strcmp(adap->owner->name, "i2c_designware_amdisp");
+ return !strcmp(adap->name, AMDISP_I2C_ADAP_NAME);
}
static void instantiate_isp_i2c_client(struct amdisp_platform *isp4_platform,
diff --git a/drivers/platform/x86/amd/hsmp/hsmp.c b/drivers/platform/x86/amd/hsmp/hsmp.c
index 538b36b97095..885e2f8136fd 100644
--- a/drivers/platform/x86/amd/hsmp/hsmp.c
+++ b/drivers/platform/x86/amd/hsmp/hsmp.c
@@ -97,7 +97,7 @@ static int __hsmp_send_message(struct hsmp_socket *sock, struct hsmp_message *ms
short_sleep = jiffies + msecs_to_jiffies(HSMP_SHORT_SLEEP);
timeout = jiffies + msecs_to_jiffies(HSMP_MSG_TIMEOUT);
- while (time_before(jiffies, timeout)) {
+ while (true) {
ret = sock->amd_hsmp_rdwr(sock, mbinfo->msg_resp_off, &mbox_status, HSMP_RD);
if (ret) {
dev_err(sock->dev, "Error %d reading mailbox status\n", ret);
@@ -106,6 +106,10 @@ static int __hsmp_send_message(struct hsmp_socket *sock, struct hsmp_message *ms
if (mbox_status != HSMP_STATUS_NOT_READY)
break;
+
+ if (!time_before(jiffies, timeout))
+ break;
+
if (time_before(jiffies, short_sleep))
usleep_range(50, 100);
else
@@ -210,13 +214,7 @@ int hsmp_send_message(struct hsmp_message *msg)
return -ENODEV;
sock = &hsmp_pdev.sock[msg->sock_ind];
- /*
- * The time taken by smu operation to complete is between
- * 10us to 1ms. Sometime it may take more time.
- * In SMP system timeout of 100 millisecs should
- * be enough for the previous thread to finish the operation
- */
- ret = down_timeout(&sock->hsmp_sem, msecs_to_jiffies(HSMP_MSG_TIMEOUT));
+ ret = down_interruptible(&sock->hsmp_sem);
if (ret < 0)
return ret;
diff --git a/drivers/platform/x86/amd/pmc/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c
index 5c7c01f66cde..f292111bd065 100644
--- a/drivers/platform/x86/amd/pmc/pmc-quirks.c
+++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c
@@ -225,6 +225,15 @@ static const struct dmi_system_id fwbug_list[] = {
DMI_MATCH(DMI_BOARD_NAME, "WUJIE14-GX4HRXL"),
}
},
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=220116 */
+ {
+ .ident = "PCSpecialist Lafite Pro V 14M",
+ .driver_data = &quirk_spurious_8042,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PCSpecialist"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Lafite Pro V 14M"),
+ }
+ },
{}
};
diff --git a/drivers/platform/x86/amd/pmc/pmc.c b/drivers/platform/x86/amd/pmc/pmc.c
index 37c7a57afee5..0b9b23eb7c2c 100644
--- a/drivers/platform/x86/amd/pmc/pmc.c
+++ b/drivers/platform/x86/amd/pmc/pmc.c
@@ -157,6 +157,8 @@ static int amd_pmc_setup_smu_logging(struct amd_pmc_dev *dev)
return -ENOMEM;
}
+ memset_io(dev->smu_virt_addr, 0, sizeof(struct smu_metrics));
+
/* Start the logging */
amd_pmc_send_cmd(dev, 0, NULL, SMU_MSG_LOG_RESET, false);
amd_pmc_send_cmd(dev, 0, NULL, SMU_MSG_LOG_START, false);
diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
index 76910601cac8..ef988605c4da 100644
--- a/drivers/platform/x86/amd/pmf/core.c
+++ b/drivers/platform/x86/amd/pmf/core.c
@@ -280,7 +280,7 @@ int amd_pmf_set_dram_addr(struct amd_pmf_dev *dev, bool alloc_buffer)
dev_err(dev->dev, "Invalid CPU id: 0x%x", dev->cpu_id);
}
- dev->buf = kzalloc(dev->mtable_size, GFP_KERNEL);
+ dev->buf = devm_kzalloc(dev->dev, dev->mtable_size, GFP_KERNEL);
if (!dev->buf)
return -ENOMEM;
}
@@ -493,7 +493,6 @@ static void amd_pmf_remove(struct platform_device *pdev)
mutex_destroy(&dev->lock);
mutex_destroy(&dev->update_mutex);
mutex_destroy(&dev->cb_mutex);
- kfree(dev->buf);
}
static const struct attribute_group *amd_pmf_driver_groups[] = {
diff --git a/drivers/platform/x86/amd/pmf/tee-if.c b/drivers/platform/x86/amd/pmf/tee-if.c
index d3bd12ad036a..4f626ebcb619 100644
--- a/drivers/platform/x86/amd/pmf/tee-if.c
+++ b/drivers/platform/x86/amd/pmf/tee-if.c
@@ -358,30 +358,28 @@ static ssize_t amd_pmf_get_pb_data(struct file *filp, const char __user *buf,
return -EINVAL;
/* re-alloc to the new buffer length of the policy binary */
- new_policy_buf = memdup_user(buf, length);
- if (IS_ERR(new_policy_buf))
- return PTR_ERR(new_policy_buf);
+ new_policy_buf = devm_kzalloc(dev->dev, length, GFP_KERNEL);
+ if (!new_policy_buf)
+ return -ENOMEM;
+
+ if (copy_from_user(new_policy_buf, buf, length)) {
+ devm_kfree(dev->dev, new_policy_buf);
+ return -EFAULT;
+ }
- kfree(dev->policy_buf);
+ devm_kfree(dev->dev, dev->policy_buf);
dev->policy_buf = new_policy_buf;
dev->policy_sz = length;
- if (!amd_pmf_pb_valid(dev)) {
- ret = -EINVAL;
- goto cleanup;
- }
+ if (!amd_pmf_pb_valid(dev))
+ return -EINVAL;
amd_pmf_hex_dump_pb(dev);
ret = amd_pmf_start_policy_engine(dev);
if (ret < 0)
- goto cleanup;
+ return ret;
return length;
-
-cleanup:
- kfree(dev->policy_buf);
- dev->policy_buf = NULL;
- return ret;
}
static const struct file_operations pb_fops = {
@@ -422,12 +420,12 @@ static int amd_pmf_ta_open_session(struct tee_context *ctx, u32 *id, const uuid_
rc = tee_client_open_session(ctx, &sess_arg, NULL);
if (rc < 0 || sess_arg.ret != 0) {
pr_err("Failed to open TEE session err:%#x, rc:%d\n", sess_arg.ret, rc);
- return rc;
+ return rc ?: -EINVAL;
}
*id = sess_arg.session;
- return rc;
+ return 0;
}
static int amd_pmf_register_input_device(struct amd_pmf_dev *dev)
@@ -462,7 +460,9 @@ static int amd_pmf_tee_init(struct amd_pmf_dev *dev, const uuid_t *uuid)
dev->tee_ctx = tee_client_open_context(NULL, amd_pmf_amdtee_ta_match, NULL, NULL);
if (IS_ERR(dev->tee_ctx)) {
dev_err(dev->dev, "Failed to open TEE context\n");
- return PTR_ERR(dev->tee_ctx);
+ ret = PTR_ERR(dev->tee_ctx);
+ dev->tee_ctx = NULL;
+ return ret;
}
ret = amd_pmf_ta_open_session(dev->tee_ctx, &dev->session_id, uuid);
@@ -502,9 +502,12 @@ out_ctx:
static void amd_pmf_tee_deinit(struct amd_pmf_dev *dev)
{
+ if (!dev->tee_ctx)
+ return;
tee_shm_free(dev->fw_shm_pool);
tee_client_close_session(dev->tee_ctx, dev->session_id);
tee_client_close_context(dev->tee_ctx);
+ dev->tee_ctx = NULL;
}
int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev)
@@ -527,64 +530,45 @@ int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev)
ret = amd_pmf_set_dram_addr(dev, true);
if (ret)
- goto err_cancel_work;
+ return ret;
dev->policy_base = devm_ioremap_resource(dev->dev, dev->res);
- if (IS_ERR(dev->policy_base)) {
- ret = PTR_ERR(dev->policy_base);
- goto err_free_dram_buf;
- }
+ if (IS_ERR(dev->policy_base))
+ return PTR_ERR(dev->policy_base);
- dev->policy_buf = kzalloc(dev->policy_sz, GFP_KERNEL);
- if (!dev->policy_buf) {
- ret = -ENOMEM;
- goto err_free_dram_buf;
- }
+ dev->policy_buf = devm_kzalloc(dev->dev, dev->policy_sz, GFP_KERNEL);
+ if (!dev->policy_buf)
+ return -ENOMEM;
memcpy_fromio(dev->policy_buf, dev->policy_base, dev->policy_sz);
if (!amd_pmf_pb_valid(dev)) {
dev_info(dev->dev, "No Smart PC policy present\n");
- ret = -EINVAL;
- goto err_free_policy;
+ return -EINVAL;
}
amd_pmf_hex_dump_pb(dev);
- dev->prev_data = kzalloc(sizeof(*dev->prev_data), GFP_KERNEL);
- if (!dev->prev_data) {
- ret = -ENOMEM;
- goto err_free_policy;
- }
+ dev->prev_data = devm_kzalloc(dev->dev, sizeof(*dev->prev_data), GFP_KERNEL);
+ if (!dev->prev_data)
+ return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(amd_pmf_ta_uuid); i++) {
ret = amd_pmf_tee_init(dev, &amd_pmf_ta_uuid[i]);
if (ret)
- goto err_free_prev_data;
+ return ret;
ret = amd_pmf_start_policy_engine(dev);
- switch (ret) {
- case TA_PMF_TYPE_SUCCESS:
- status = true;
- break;
- case TA_ERROR_CRYPTO_INVALID_PARAM:
- case TA_ERROR_CRYPTO_BIN_TOO_LARGE:
- amd_pmf_tee_deinit(dev);
- status = false;
- break;
- default:
- ret = -EINVAL;
- amd_pmf_tee_deinit(dev);
- goto err_free_prev_data;
- }
-
+ dev_dbg(dev->dev, "start policy engine ret: %d\n", ret);
+ status = ret == TA_PMF_TYPE_SUCCESS;
if (status)
break;
+ amd_pmf_tee_deinit(dev);
}
if (!status && !pb_side_load) {
ret = -EINVAL;
- goto err_free_prev_data;
+ goto err;
}
if (pb_side_load)
@@ -592,22 +576,12 @@ int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev)
ret = amd_pmf_register_input_device(dev);
if (ret)
- goto err_pmf_remove_pb;
+ goto err;
return 0;
-err_pmf_remove_pb:
- if (pb_side_load && dev->esbin)
- amd_pmf_remove_pb(dev);
- amd_pmf_tee_deinit(dev);
-err_free_prev_data:
- kfree(dev->prev_data);
-err_free_policy:
- kfree(dev->policy_buf);
-err_free_dram_buf:
- kfree(dev->buf);
-err_cancel_work:
- cancel_delayed_work_sync(&dev->pb_work);
+err:
+ amd_pmf_deinit_smart_pc(dev);
return ret;
}
@@ -621,11 +595,5 @@ void amd_pmf_deinit_smart_pc(struct amd_pmf_dev *dev)
amd_pmf_remove_pb(dev);
cancel_delayed_work_sync(&dev->pb_work);
- kfree(dev->prev_data);
- dev->prev_data = NULL;
- kfree(dev->policy_buf);
- dev->policy_buf = NULL;
- kfree(dev->buf);
- dev->buf = NULL;
amd_pmf_tee_deinit(dev);
}
diff --git a/drivers/platform/x86/dell/alienware-wmi-wmax.c b/drivers/platform/x86/dell/alienware-wmi-wmax.c
index c42f9228b0b2..20ec122a9fe0 100644
--- a/drivers/platform/x86/dell/alienware-wmi-wmax.c
+++ b/drivers/platform/x86/dell/alienware-wmi-wmax.c
@@ -119,7 +119,7 @@ static const struct dmi_system_id awcc_dmi_table[] __initconst = {
DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m16 R1 AMD"),
},
- .driver_data = &g_series_quirks,
+ .driver_data = &generic_quirks,
},
{
.ident = "Alienware m16 R2",
diff --git a/drivers/platform/x86/dell/dell_rbu.c b/drivers/platform/x86/dell/dell_rbu.c
index e30ca325938c..9dd9f2cb074f 100644
--- a/drivers/platform/x86/dell/dell_rbu.c
+++ b/drivers/platform/x86/dell/dell_rbu.c
@@ -45,7 +45,7 @@
MODULE_AUTHOR("Abhay Salunke <abhay_salunke@dell.com>");
MODULE_DESCRIPTION("Driver for updating BIOS image on DELL systems");
MODULE_LICENSE("GPL");
-MODULE_VERSION("3.2");
+MODULE_VERSION("3.3");
#define BIOS_SCAN_LIMIT 0xffffffff
#define MAX_IMAGE_LENGTH 16
@@ -91,7 +91,7 @@ static void init_packet_head(void)
rbu_data.imagesize = 0;
}
-static int create_packet(void *data, size_t length)
+static int create_packet(void *data, size_t length) __must_hold(&rbu_data.lock)
{
struct packet_data *newpacket;
int ordernum = 0;
@@ -292,7 +292,7 @@ static int packet_read_list(char *data, size_t * pread_length)
remaining_bytes = *pread_length;
bytes_read = rbu_data.packet_read_count;
- list_for_each_entry(newpacket, (&packet_data_head.list)->next, list) {
+ list_for_each_entry(newpacket, &packet_data_head.list, list) {
bytes_copied = do_packet_read(pdest, newpacket,
remaining_bytes, bytes_read, &temp_count);
remaining_bytes -= bytes_copied;
@@ -315,14 +315,14 @@ static void packet_empty_list(void)
{
struct packet_data *newpacket, *tmp;
- list_for_each_entry_safe(newpacket, tmp, (&packet_data_head.list)->next, list) {
+ list_for_each_entry_safe(newpacket, tmp, &packet_data_head.list, list) {
list_del(&newpacket->list);
/*
* zero out the RBU packet memory before freeing
* to make sure there are no stale RBU packets left in memory
*/
- memset(newpacket->data, 0, rbu_data.packetsize);
+ memset(newpacket->data, 0, newpacket->length);
set_memory_wb((unsigned long)newpacket->data,
1 << newpacket->ordernum);
free_pages((unsigned long) newpacket->data,
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index ede483573fe0..b5e4da6a6779 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -15,6 +15,7 @@
#include <linux/bug.h>
#include <linux/cleanup.h>
#include <linux/debugfs.h>
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dmi.h>
#include <linux/i8042.h>
@@ -267,6 +268,20 @@ static void ideapad_shared_exit(struct ideapad_private *priv)
*/
#define IDEAPAD_EC_TIMEOUT 200 /* in ms */
+/*
+ * Some models (e.g., ThinkBook since 2024) have a low tolerance for being
+ * polled too frequently. Doing so may break the state machine in the EC,
+ * resulting in a hard shutdown.
+ *
+ * It is also observed that frequent polls may disturb the ongoing operation
+ * and notably delay the availability of EC response.
+ *
+ * These values are used as the delay before the first poll and the interval
+ * between subsequent polls to solve the above issues.
+ */
+#define IDEAPAD_EC_POLL_MIN_US 150
+#define IDEAPAD_EC_POLL_MAX_US 300
+
static int eval_int(acpi_handle handle, const char *name, unsigned long *res)
{
unsigned long long result;
@@ -383,7 +398,7 @@ static int read_ec_data(acpi_handle handle, unsigned long cmd, unsigned long *da
end_jiffies = jiffies + msecs_to_jiffies(IDEAPAD_EC_TIMEOUT) + 1;
while (time_before(jiffies, end_jiffies)) {
- schedule();
+ usleep_range(IDEAPAD_EC_POLL_MIN_US, IDEAPAD_EC_POLL_MAX_US);
err = eval_vpcr(handle, 1, &val);
if (err)
@@ -414,7 +429,7 @@ static int write_ec_cmd(acpi_handle handle, unsigned long cmd, unsigned long dat
end_jiffies = jiffies + msecs_to_jiffies(IDEAPAD_EC_TIMEOUT) + 1;
while (time_before(jiffies, end_jiffies)) {
- schedule();
+ usleep_range(IDEAPAD_EC_POLL_MIN_US, IDEAPAD_EC_POLL_MAX_US);
err = eval_vpcr(handle, 1, &val);
if (err)
diff --git a/drivers/platform/x86/intel/pmc/core.h b/drivers/platform/x86/intel/pmc/core.h
index e136d18b1d38..4a94a4ee031e 100644
--- a/drivers/platform/x86/intel/pmc/core.h
+++ b/drivers/platform/x86/intel/pmc/core.h
@@ -299,6 +299,13 @@ enum ppfear_regs {
#define PTL_PCD_PMC_MMIO_REG_LEN 0x31A8
/* SSRAM PMC Device ID */
+/* LNL */
+#define PMC_DEVID_LNL_SOCM 0xa87f
+
+/* PTL */
+#define PMC_DEVID_PTL_PCDH 0xe37f
+#define PMC_DEVID_PTL_PCDP 0xe47f
+
/* ARL */
#define PMC_DEVID_ARL_SOCM 0x777f
#define PMC_DEVID_ARL_SOCS 0xae7f
diff --git a/drivers/platform/x86/intel/pmc/ssram_telemetry.c b/drivers/platform/x86/intel/pmc/ssram_telemetry.c
index b207247eb5dd..93579152188e 100644
--- a/drivers/platform/x86/intel/pmc/ssram_telemetry.c
+++ b/drivers/platform/x86/intel/pmc/ssram_telemetry.c
@@ -187,6 +187,9 @@ static const struct pci_device_id intel_pmc_ssram_telemetry_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PMC_DEVID_MTL_SOCM) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PMC_DEVID_ARL_SOCS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PMC_DEVID_ARL_SOCM) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PMC_DEVID_LNL_SOCM) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PMC_DEVID_PTL_PCDH) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PMC_DEVID_PTL_PCDP) },
{ }
};
MODULE_DEVICE_TABLE(pci, intel_pmc_ssram_telemetry_pci_ids);
diff --git a/drivers/platform/x86/intel/tpmi_power_domains.c b/drivers/platform/x86/intel/tpmi_power_domains.c
index 0c5c88eb7baf..9d8247bb9cfa 100644
--- a/drivers/platform/x86/intel/tpmi_power_domains.c
+++ b/drivers/platform/x86/intel/tpmi_power_domains.c
@@ -228,8 +228,10 @@ static int __init tpmi_init(void)
domain_die_map = kcalloc(size_mul(topology_max_packages(), MAX_POWER_DOMAINS),
sizeof(*domain_die_map), GFP_KERNEL);
- if (!domain_die_map)
+ if (!domain_die_map) {
+ ret = -ENOMEM;
goto free_domain_mask;
+ }
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"platform/x86/tpmi_power_domains:online",
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
index 0f8aea18275b..65897fae17df 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
@@ -58,7 +58,7 @@ static ssize_t show_agent_types(struct kobject *kobj, struct kobj_attribute *att
if (length)
length += sysfs_emit_at(buf, length, " ");
- length += sysfs_emit_at(buf, length, agent_name[agent]);
+ length += sysfs_emit_at(buf, length, "%s", agent_name[agent]);
}
length += sysfs_emit_at(buf, length, "\n");
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
index 1c7b2f2716ca..44d9948ed224 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
@@ -511,10 +511,13 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_
/* Get the package ID from the TPMI core */
plat_info = tpmi_get_platform_data(auxdev);
- if (plat_info)
- pkg = plat_info->package_id;
- else
+ if (unlikely(!plat_info)) {
dev_info(&auxdev->dev, "Platform information is NULL\n");
+ ret = -ENODEV;
+ goto err_rem_common;
+ }
+
+ pkg = plat_info->package_id;
for (i = 0; i < num_resources; ++i) {
struct tpmi_uncore_power_domain_info *pd_info;
diff --git a/drivers/platform/x86/samsung-galaxybook.c b/drivers/platform/x86/samsung-galaxybook.c
index 5878a351993e..3c13e13d4885 100644
--- a/drivers/platform/x86/samsung-galaxybook.c
+++ b/drivers/platform/x86/samsung-galaxybook.c
@@ -1403,6 +1403,7 @@ static int galaxybook_probe(struct platform_device *pdev)
}
static const struct acpi_device_id galaxybook_device_ids[] = {
+ { "SAM0426" },
{ "SAM0427" },
{ "SAM0428" },
{ "SAM0429" },
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 35a5994bf64f..36f57d7b4a66 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -121,7 +121,8 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx)
struct ptp_clock_info *ops;
int err = -EOPNOTSUPP;
- if (ptp_clock_freerun(ptp)) {
+ if (tx->modes & (ADJ_SETOFFSET | ADJ_FREQUENCY | ADJ_OFFSET) &&
+ ptp_clock_freerun(ptp)) {
pr_err("ptp: physical clock is free running\n");
return -EBUSY;
}
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
index 18934e28469e..a6aad743c282 100644
--- a/drivers/ptp/ptp_private.h
+++ b/drivers/ptp/ptp_private.h
@@ -100,10 +100,20 @@ static inline bool ptp_vclock_in_use(struct ptp_clock *ptp)
{
bool in_use = false;
+ /* Virtual clocks can't be stacked on top of virtual clocks.
+ * Avoid acquiring the n_vclocks_mux on virtual clocks, to allow this
+ * function to be called from code paths where the n_vclocks_mux of the
+ * parent physical clock is already held. Functionally that's not an
+ * issue, but lockdep would complain, because they have the same lock
+ * class.
+ */
+ if (ptp->is_virtual_clock)
+ return false;
+
if (mutex_lock_interruptible(&ptp->n_vclocks_mux))
return true;
- if (!ptp->is_virtual_clock && ptp->n_vclocks)
+ if (ptp->n_vclocks)
in_use = true;
mutex_unlock(&ptp->n_vclocks_mux);
diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c
index 97287e838ce1..66464674223f 100644
--- a/drivers/rapidio/rio_cm.c
+++ b/drivers/rapidio/rio_cm.c
@@ -783,6 +783,9 @@ static int riocm_ch_send(u16 ch_id, void *buf, int len)
if (buf == NULL || ch_id == 0 || len == 0 || len > RIO_MAX_MSG_SIZE)
return -EINVAL;
+ if (len < sizeof(struct rio_ch_chan_hdr))
+ return -EINVAL; /* insufficient data from user */
+
ch = riocm_get_channel(ch_id);
if (!ch) {
riocm_error("%s(%d) ch_%d not found", current->comm,
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
index bd9447dac596..c282236959b1 100644
--- a/drivers/regulator/fan53555.c
+++ b/drivers/regulator/fan53555.c
@@ -147,6 +147,7 @@ struct fan53555_device_info {
unsigned int slew_mask;
const unsigned int *ramp_delay_table;
unsigned int n_ramp_values;
+ unsigned int enable_time;
unsigned int slew_rate;
};
@@ -282,6 +283,7 @@ static int fan53526_voltages_setup_fairchild(struct fan53555_device_info *di)
di->slew_mask = CTL_SLEW_MASK;
di->ramp_delay_table = slew_rates;
di->n_ramp_values = ARRAY_SIZE(slew_rates);
+ di->enable_time = 250;
di->vsel_count = FAN53526_NVOLTAGES;
return 0;
@@ -296,10 +298,12 @@ static int fan53555_voltages_setup_fairchild(struct fan53555_device_info *di)
case FAN53555_CHIP_REV_00:
di->vsel_min = 600000;
di->vsel_step = 10000;
+ di->enable_time = 400;
break;
case FAN53555_CHIP_REV_13:
di->vsel_min = 800000;
di->vsel_step = 10000;
+ di->enable_time = 400;
break;
default:
dev_err(di->dev,
@@ -311,13 +315,19 @@ static int fan53555_voltages_setup_fairchild(struct fan53555_device_info *di)
case FAN53555_CHIP_ID_01:
case FAN53555_CHIP_ID_03:
case FAN53555_CHIP_ID_05:
+ di->vsel_min = 600000;
+ di->vsel_step = 10000;
+ di->enable_time = 400;
+ break;
case FAN53555_CHIP_ID_08:
di->vsel_min = 600000;
di->vsel_step = 10000;
+ di->enable_time = 175;
break;
case FAN53555_CHIP_ID_04:
di->vsel_min = 603000;
di->vsel_step = 12826;
+ di->enable_time = 400;
break;
default:
dev_err(di->dev,
@@ -350,6 +360,7 @@ static int fan53555_voltages_setup_rockchip(struct fan53555_device_info *di)
di->slew_mask = CTL_SLEW_MASK;
di->ramp_delay_table = slew_rates;
di->n_ramp_values = ARRAY_SIZE(slew_rates);
+ di->enable_time = 360;
di->vsel_count = FAN53555_NVOLTAGES;
return 0;
@@ -372,6 +383,7 @@ static int rk8602_voltages_setup_rockchip(struct fan53555_device_info *di)
di->slew_mask = CTL_SLEW_MASK;
di->ramp_delay_table = slew_rates;
di->n_ramp_values = ARRAY_SIZE(slew_rates);
+ di->enable_time = 360;
di->vsel_count = RK8602_NVOLTAGES;
return 0;
@@ -395,6 +407,7 @@ static int fan53555_voltages_setup_silergy(struct fan53555_device_info *di)
di->slew_mask = CTL_SLEW_MASK;
di->ramp_delay_table = slew_rates;
di->n_ramp_values = ARRAY_SIZE(slew_rates);
+ di->enable_time = 400;
di->vsel_count = FAN53555_NVOLTAGES;
return 0;
@@ -594,6 +607,7 @@ static int fan53555_regulator_register(struct fan53555_device_info *di,
rdesc->ramp_mask = di->slew_mask;
rdesc->ramp_delay_table = di->ramp_delay_table;
rdesc->n_ramp_values = di->n_ramp_values;
+ rdesc->enable_time = di->enable_time;
rdesc->owner = THIS_MODULE;
rdev = devm_regulator_register(di->dev, &di->desc, config);
diff --git a/drivers/regulator/max20086-regulator.c b/drivers/regulator/max20086-regulator.c
index b4fe76e33ff2..fcdd2d0317a5 100644
--- a/drivers/regulator/max20086-regulator.c
+++ b/drivers/regulator/max20086-regulator.c
@@ -5,6 +5,7 @@
// Copyright (C) 2022 Laurent Pinchart <laurent.pinchart@idesonboard.com>
// Copyright (C) 2018 Avnet, Inc.
+#include <linux/cleanup.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
@@ -133,11 +134,11 @@ static int max20086_regulators_register(struct max20086 *chip)
static int max20086_parse_regulators_dt(struct max20086 *chip, bool *boot_on)
{
struct of_regulator_match *matches;
- struct device_node *node;
unsigned int i;
int ret;
- node = of_get_child_by_name(chip->dev->of_node, "regulators");
+ struct device_node *node __free(device_node) =
+ of_get_child_by_name(chip->dev->of_node, "regulators");
if (!node) {
dev_err(chip->dev, "regulators node not found\n");
return -ENODEV;
@@ -153,7 +154,6 @@ static int max20086_parse_regulators_dt(struct max20086 *chip, bool *boot_on)
ret = of_regulator_match(chip->dev, node, matches,
chip->info->num_outputs);
- of_node_put(node);
if (ret < 0) {
dev_err(chip->dev, "Failed to match regulators\n");
return -EINVAL;
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 8172869bd3d7..0743c6acd6e2 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -692,8 +692,12 @@ static irqreturn_t cmos_interrupt(int irq, void *p)
{
u8 irqstat;
u8 rtc_control;
+ unsigned long flags;
- spin_lock(&rtc_lock);
+ /* We cannot use spin_lock() here, as cmos_interrupt() is also called
+ * in a non-irq context.
+ */
+ spin_lock_irqsave(&rtc_lock, flags);
/* When the HPET interrupt handler calls us, the interrupt
* status is passed as arg1 instead of the irq number. But
@@ -727,7 +731,7 @@ static irqreturn_t cmos_interrupt(int irq, void *p)
hpet_mask_rtc_irq_bit(RTC_AIE);
CMOS_READ(RTC_INTR_FLAGS);
}
- spin_unlock(&rtc_lock);
+ spin_unlock_irqrestore(&rtc_lock, flags);
if (is_intr(irqstat)) {
rtc_update_irq(p, 1, irqstat);
@@ -1295,9 +1299,7 @@ static void cmos_check_wkalrm(struct device *dev)
* ACK the rtc irq here
*/
if (t_now >= cmos->alarm_expires && cmos_use_acpi_alarm()) {
- local_irq_disable();
cmos_interrupt(0, (void *)cmos->rtc);
- local_irq_enable();
return;
}
diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
index 31c7dca8f469..2e1ac0c42e93 100644
--- a/drivers/rtc/rtc-pcf2127.c
+++ b/drivers/rtc/rtc-pcf2127.c
@@ -1538,7 +1538,12 @@ static int pcf2127_spi_probe(struct spi_device *spi)
variant = &pcf21xx_cfg[type];
}
- config.max_register = variant->max_register,
+ if (variant->type == PCF2131) {
+ config.read_flag_mask = 0x0;
+ config.write_flag_mask = 0x0;
+ }
+
+ config.max_register = variant->max_register;
regmap = devm_regmap_init_spi(spi, &config);
if (IS_ERR(regmap)) {
diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c
index db5c9b641277..a7220b4d0e8d 100644
--- a/drivers/rtc/rtc-s5m.c
+++ b/drivers/rtc/rtc-s5m.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/bcd.h>
+#include <linux/reboot.h>
#include <linux/regmap.h>
#include <linux/rtc.h>
#include <linux/platform_device.h>
@@ -53,6 +54,7 @@ enum {
* Device | Write time | Read time | Write alarm
* =================================================
* S5M8767 | UDR + TIME | | UDR
+ * S2MPG10 | WUDR | RUDR | AUDR
* S2MPS11/14 | WUDR | RUDR | WUDR + RUDR
* S2MPS13 | WUDR | RUDR | WUDR + AUDR
* S2MPS15 | WUDR | RUDR | AUDR
@@ -99,6 +101,20 @@ static const struct s5m_rtc_reg_config s5m_rtc_regs = {
.write_alarm_udr_mask = S5M_RTC_UDR_MASK,
};
+/* Register map for S2MPG10 */
+static const struct s5m_rtc_reg_config s2mpg10_rtc_regs = {
+ .regs_count = 7,
+ .time = S2MPG10_RTC_SEC,
+ .ctrl = S2MPG10_RTC_CTRL,
+ .alarm0 = S2MPG10_RTC_A0SEC,
+ .alarm1 = S2MPG10_RTC_A1SEC,
+ .udr_update = S2MPG10_RTC_UPDATE,
+ .autoclear_udr_mask = S2MPS15_RTC_WUDR_MASK | S2MPS15_RTC_AUDR_MASK,
+ .read_time_udr_mask = S2MPS_RTC_RUDR_MASK,
+ .write_time_udr_mask = S2MPS15_RTC_WUDR_MASK,
+ .write_alarm_udr_mask = S2MPS15_RTC_AUDR_MASK,
+};
+
/* Register map for S2MPS13 */
static const struct s5m_rtc_reg_config s2mps13_rtc_regs = {
.regs_count = 7,
@@ -227,8 +243,8 @@ static int s5m8767_wait_for_udr_update(struct s5m_rtc_info *info)
return ret;
}
-static int s5m_check_peding_alarm_interrupt(struct s5m_rtc_info *info,
- struct rtc_wkalrm *alarm)
+static int s5m_check_pending_alarm_interrupt(struct s5m_rtc_info *info,
+ struct rtc_wkalrm *alarm)
{
int ret;
unsigned int val;
@@ -238,6 +254,7 @@ static int s5m_check_peding_alarm_interrupt(struct s5m_rtc_info *info,
ret = regmap_read(info->regmap, S5M_RTC_STATUS, &val);
val &= S5M_ALARM0_STATUS;
break;
+ case S2MPG10:
case S2MPS15X:
case S2MPS14X:
case S2MPS13X:
@@ -262,17 +279,9 @@ static int s5m_check_peding_alarm_interrupt(struct s5m_rtc_info *info,
static int s5m8767_rtc_set_time_reg(struct s5m_rtc_info *info)
{
int ret;
- unsigned int data;
- ret = regmap_read(info->regmap, info->regs->udr_update, &data);
- if (ret < 0) {
- dev_err(info->dev, "failed to read update reg(%d)\n", ret);
- return ret;
- }
-
- data |= info->regs->write_time_udr_mask;
-
- ret = regmap_write(info->regmap, info->regs->udr_update, data);
+ ret = regmap_set_bits(info->regmap, info->regs->udr_update,
+ info->regs->write_time_udr_mask);
if (ret < 0) {
dev_err(info->dev, "failed to write update reg(%d)\n", ret);
return ret;
@@ -286,20 +295,14 @@ static int s5m8767_rtc_set_time_reg(struct s5m_rtc_info *info)
static int s5m8767_rtc_set_alarm_reg(struct s5m_rtc_info *info)
{
int ret;
- unsigned int data;
+ unsigned int udr_mask;
- ret = regmap_read(info->regmap, info->regs->udr_update, &data);
- if (ret < 0) {
- dev_err(info->dev, "%s: fail to read update reg(%d)\n",
- __func__, ret);
- return ret;
- }
-
- data |= info->regs->write_alarm_udr_mask;
+ udr_mask = info->regs->write_alarm_udr_mask;
switch (info->device_type) {
case S5M8767X:
- data &= ~S5M_RTC_TIME_EN_MASK;
+ udr_mask |= S5M_RTC_TIME_EN_MASK;
break;
+ case S2MPG10:
case S2MPS15X:
case S2MPS14X:
case S2MPS13X:
@@ -309,7 +312,8 @@ static int s5m8767_rtc_set_alarm_reg(struct s5m_rtc_info *info)
return -EINVAL;
}
- ret = regmap_write(info->regmap, info->regs->udr_update, data);
+ ret = regmap_update_bits(info->regmap, info->regs->udr_update,
+ udr_mask, info->regs->write_alarm_udr_mask);
if (ret < 0) {
dev_err(info->dev, "%s: fail to write update reg(%d)\n",
__func__, ret);
@@ -320,8 +324,8 @@ static int s5m8767_rtc_set_alarm_reg(struct s5m_rtc_info *info)
/* On S2MPS13 the AUDR is not auto-cleared */
if (info->device_type == S2MPS13X)
- regmap_update_bits(info->regmap, info->regs->udr_update,
- S2MPS13_RTC_AUDR_MASK, 0);
+ regmap_clear_bits(info->regmap, info->regs->udr_update,
+ S2MPS13_RTC_AUDR_MASK);
return ret;
}
@@ -333,10 +337,8 @@ static int s5m_rtc_read_time(struct device *dev, struct rtc_time *tm)
int ret;
if (info->regs->read_time_udr_mask) {
- ret = regmap_update_bits(info->regmap,
- info->regs->udr_update,
- info->regs->read_time_udr_mask,
- info->regs->read_time_udr_mask);
+ ret = regmap_set_bits(info->regmap, info->regs->udr_update,
+ info->regs->read_time_udr_mask);
if (ret) {
dev_err(dev,
"Failed to prepare registers for time reading: %d\n",
@@ -351,6 +353,7 @@ static int s5m_rtc_read_time(struct device *dev, struct rtc_time *tm)
switch (info->device_type) {
case S5M8767X:
+ case S2MPG10:
case S2MPS15X:
case S2MPS14X:
case S2MPS13X:
@@ -374,6 +377,7 @@ static int s5m_rtc_set_time(struct device *dev, struct rtc_time *tm)
switch (info->device_type) {
case S5M8767X:
+ case S2MPG10:
case S2MPS15X:
case S2MPS14X:
case S2MPS13X:
@@ -411,6 +415,7 @@ static int s5m_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
switch (info->device_type) {
case S5M8767X:
+ case S2MPG10:
case S2MPS15X:
case S2MPS14X:
case S2MPS13X:
@@ -430,7 +435,7 @@ static int s5m_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
dev_dbg(dev, "%s: %ptR(%d)\n", __func__, &alrm->time, alrm->time.tm_wday);
- return s5m_check_peding_alarm_interrupt(info, alrm);
+ return s5m_check_pending_alarm_interrupt(info, alrm);
}
static int s5m_rtc_stop_alarm(struct s5m_rtc_info *info)
@@ -449,6 +454,7 @@ static int s5m_rtc_stop_alarm(struct s5m_rtc_info *info)
switch (info->device_type) {
case S5M8767X:
+ case S2MPG10:
case S2MPS15X:
case S2MPS14X:
case S2MPS13X:
@@ -487,6 +493,7 @@ static int s5m_rtc_start_alarm(struct s5m_rtc_info *info)
switch (info->device_type) {
case S5M8767X:
+ case S2MPG10:
case S2MPS15X:
case S2MPS14X:
case S2MPS13X:
@@ -524,6 +531,7 @@ static int s5m_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
switch (info->device_type) {
case S5M8767X:
+ case S2MPG10:
case S2MPS15X:
case S2MPS14X:
case S2MPS13X:
@@ -604,6 +612,7 @@ static int s5m8767_rtc_init_reg(struct s5m_rtc_info *info)
ret = regmap_raw_write(info->regmap, S5M_ALARM0_CONF, data, 2);
break;
+ case S2MPG10:
case S2MPS15X:
case S2MPS14X:
case S2MPS13X:
@@ -634,59 +643,92 @@ static int s5m8767_rtc_init_reg(struct s5m_rtc_info *info)
return ret;
}
+static int s5m_rtc_restart_s2mpg10(struct sys_off_data *data)
+{
+ struct s5m_rtc_info *info = data->cb_data;
+ int ret;
+
+ if (data->mode != REBOOT_COLD && data->mode != REBOOT_HARD)
+ return NOTIFY_DONE;
+
+ /*
+ * Arm watchdog with maximum timeout (2 seconds), and perform full reset
+ * on expiry.
+ */
+ ret = regmap_set_bits(info->regmap, S2MPG10_RTC_WTSR,
+ (S2MPG10_WTSR_COLDTIMER | S2MPG10_WTSR_COLDRST
+ | S2MPG10_WTSR_WTSRT | S2MPG10_WTSR_WTSR_EN));
+
+ return ret ? NOTIFY_BAD : NOTIFY_DONE;
+}
+
static int s5m_rtc_probe(struct platform_device *pdev)
{
struct sec_pmic_dev *s5m87xx = dev_get_drvdata(pdev->dev.parent);
+ enum sec_device_type device_type =
+ platform_get_device_id(pdev)->driver_data;
struct s5m_rtc_info *info;
- struct i2c_client *i2c;
- const struct regmap_config *regmap_cfg;
int ret, alarm_irq;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
- switch (platform_get_device_id(pdev)->driver_data) {
- case S2MPS15X:
- regmap_cfg = &s2mps14_rtc_regmap_config;
- info->regs = &s2mps15_rtc_regs;
- alarm_irq = S2MPS14_IRQ_RTCA0;
- break;
- case S2MPS14X:
- regmap_cfg = &s2mps14_rtc_regmap_config;
- info->regs = &s2mps14_rtc_regs;
- alarm_irq = S2MPS14_IRQ_RTCA0;
- break;
- case S2MPS13X:
- regmap_cfg = &s2mps14_rtc_regmap_config;
- info->regs = &s2mps13_rtc_regs;
- alarm_irq = S2MPS14_IRQ_RTCA0;
- break;
- case S5M8767X:
- regmap_cfg = &s5m_rtc_regmap_config;
- info->regs = &s5m_rtc_regs;
- alarm_irq = S5M8767_IRQ_RTCA1;
- break;
- default:
- return dev_err_probe(&pdev->dev, -ENODEV,
- "Device type %lu is not supported by RTC driver\n",
- platform_get_device_id(pdev)->driver_data);
- }
+ info->regmap = dev_get_regmap(pdev->dev.parent, "rtc");
+ if (!info->regmap) {
+ const struct regmap_config *regmap_cfg;
+ struct i2c_client *i2c;
- i2c = devm_i2c_new_dummy_device(&pdev->dev, s5m87xx->i2c->adapter,
- RTC_I2C_ADDR);
- if (IS_ERR(i2c))
- return dev_err_probe(&pdev->dev, PTR_ERR(i2c),
- "Failed to allocate I2C for RTC\n");
+ switch (device_type) {
+ case S2MPS15X:
+ regmap_cfg = &s2mps14_rtc_regmap_config;
+ info->regs = &s2mps15_rtc_regs;
+ alarm_irq = S2MPS14_IRQ_RTCA0;
+ break;
+ case S2MPS14X:
+ regmap_cfg = &s2mps14_rtc_regmap_config;
+ info->regs = &s2mps14_rtc_regs;
+ alarm_irq = S2MPS14_IRQ_RTCA0;
+ break;
+ case S2MPS13X:
+ regmap_cfg = &s2mps14_rtc_regmap_config;
+ info->regs = &s2mps13_rtc_regs;
+ alarm_irq = S2MPS14_IRQ_RTCA0;
+ break;
+ case S5M8767X:
+ regmap_cfg = &s5m_rtc_regmap_config;
+ info->regs = &s5m_rtc_regs;
+ alarm_irq = S5M8767_IRQ_RTCA1;
+ break;
+ default:
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "Unsupported device type %d\n",
+ device_type);
+ }
- info->regmap = devm_regmap_init_i2c(i2c, regmap_cfg);
- if (IS_ERR(info->regmap))
- return dev_err_probe(&pdev->dev, PTR_ERR(info->regmap),
- "Failed to allocate RTC register map\n");
+ i2c = devm_i2c_new_dummy_device(&pdev->dev,
+ s5m87xx->i2c->adapter,
+ RTC_I2C_ADDR);
+ if (IS_ERR(i2c))
+ return dev_err_probe(&pdev->dev, PTR_ERR(i2c),
+ "Failed to allocate I2C\n");
+
+ info->regmap = devm_regmap_init_i2c(i2c, regmap_cfg);
+ if (IS_ERR(info->regmap))
+ return dev_err_probe(&pdev->dev, PTR_ERR(info->regmap),
+ "Failed to allocate regmap\n");
+ } else if (device_type == S2MPG10) {
+ info->regs = &s2mpg10_rtc_regs;
+ alarm_irq = S2MPG10_IRQ_RTCA0;
+ } else {
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "Unsupported device type %d\n",
+ device_type);
+ }
info->dev = &pdev->dev;
info->s5m87xx = s5m87xx;
- info->device_type = platform_get_device_id(pdev)->driver_data;
+ info->device_type = device_type;
if (s5m87xx->irq_data) {
info->irq = regmap_irq_get_virq(s5m87xx->irq_data, alarm_irq);
@@ -721,7 +763,23 @@ static int s5m_rtc_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, ret,
"Failed to request alarm IRQ %d\n",
info->irq);
- device_init_wakeup(&pdev->dev, true);
+
+ ret = devm_device_init_wakeup(&pdev->dev);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to init wakeup\n");
+ }
+
+ if (of_device_is_system_power_controller(pdev->dev.parent->of_node) &&
+ info->device_type == S2MPG10) {
+ ret = devm_register_sys_off_handler(&pdev->dev,
+ SYS_OFF_MODE_RESTART,
+ SYS_OFF_PRIO_HIGH + 1,
+ s5m_rtc_restart_s2mpg10,
+ info);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to register restart handler\n");
}
return devm_rtc_register_device(info->rtc_dev);
@@ -755,6 +813,7 @@ static SIMPLE_DEV_PM_OPS(s5m_rtc_pm_ops, s5m_rtc_suspend, s5m_rtc_resume);
static const struct platform_device_id s5m_rtc_id[] = {
{ "s5m-rtc", S5M8767X },
+ { "s2mpg10-rtc", S2MPG10 },
{ "s2mps13-rtc", S2MPS13X },
{ "s2mps14-rtc", S2MPS14X },
{ "s2mps15-rtc", S2MPS15X },
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index cef60770f68b..b3fcdcae379e 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -86,7 +86,7 @@ static void *_copy_apqns_from_user(void __user *uapqns, size_t nr_apqns)
if (!uapqns || nr_apqns == 0)
return NULL;
- return memdup_user(uapqns, nr_apqns * sizeof(struct pkey_apqn));
+ return memdup_array_user(uapqns, nr_apqns, sizeof(struct pkey_apqn));
}
static int pkey_ioctl_genseck(struct pkey_genseck __user *ugs)
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index 41e36af35488..90a84ae98b97 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -449,6 +449,8 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
if (kstrtoull(buf, 0, (unsigned long long *) &fcp_lun))
return -EINVAL;
+ flush_work(&port->rport_work);
+
retval = zfcp_unit_add(port, fcp_lun);
if (retval)
return retval;
diff --git a/drivers/scsi/elx/efct/efct_hw.c b/drivers/scsi/elx/efct/efct_hw.c
index 5a5525054d71..5b079b8b7a08 100644
--- a/drivers/scsi/elx/efct/efct_hw.c
+++ b/drivers/scsi/elx/efct/efct_hw.c
@@ -1120,7 +1120,7 @@ int
efct_hw_parse_filter(struct efct_hw *hw, void *value)
{
int rc = 0;
- char *p = NULL;
+ char *p = NULL, *pp = NULL;
char *token;
u32 idx = 0;
@@ -1132,6 +1132,7 @@ efct_hw_parse_filter(struct efct_hw *hw, void *value)
efc_log_err(hw->os, "p is NULL\n");
return -ENOMEM;
}
+ pp = p;
idx = 0;
while ((token = strsep(&p, ",")) && *token) {
@@ -1144,7 +1145,7 @@ efct_hw_parse_filter(struct efct_hw *hw, void *value)
if (idx == ARRAY_SIZE(hw->config.filter_def))
break;
}
- kfree(p);
+ kfree(pp);
return rc;
}
diff --git a/drivers/scsi/fnic/fdls_disc.c b/drivers/scsi/fnic/fdls_disc.c
index f8ab69c51dab..ae37f85f618b 100644
--- a/drivers/scsi/fnic/fdls_disc.c
+++ b/drivers/scsi/fnic/fdls_disc.c
@@ -763,50 +763,86 @@ static void fdls_send_fabric_abts(struct fnic_iport_s *iport)
iport->fabric.timer_pending = 1;
}
-static void fdls_send_fdmi_abts(struct fnic_iport_s *iport)
+static uint8_t *fdls_alloc_init_fdmi_abts_frame(struct fnic_iport_s *iport,
+ uint16_t oxid)
{
- uint8_t *frame;
+ struct fc_frame_header *pfdmi_abts;
uint8_t d_id[3];
+ uint8_t *frame;
struct fnic *fnic = iport->fnic;
- struct fc_frame_header *pfabric_abts;
- unsigned long fdmi_tov;
- uint16_t oxid;
- uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
- sizeof(struct fc_frame_header);
frame = fdls_alloc_frame(iport);
if (frame == NULL) {
FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
"Failed to allocate frame to send FDMI ABTS");
- return;
+ return NULL;
}
- pfabric_abts = (struct fc_frame_header *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
+ pfdmi_abts = (struct fc_frame_header *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET);
fdls_init_fabric_abts_frame(frame, iport);
hton24(d_id, FC_FID_MGMT_SERV);
- FNIC_STD_SET_D_ID(*pfabric_abts, d_id);
+ FNIC_STD_SET_D_ID(*pfdmi_abts, d_id);
+ FNIC_STD_SET_OX_ID(*pfdmi_abts, oxid);
+
+ return frame;
+}
+
+static void fdls_send_fdmi_abts(struct fnic_iport_s *iport)
+{
+ uint8_t *frame;
+ struct fnic *fnic = iport->fnic;
+ unsigned long fdmi_tov;
+ uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET +
+ sizeof(struct fc_frame_header);
if (iport->fabric.fdmi_pending & FDLS_FDMI_PLOGI_PENDING) {
- oxid = iport->active_oxid_fdmi_plogi;
- FNIC_STD_SET_OX_ID(*pfabric_abts, oxid);
+ frame = fdls_alloc_init_fdmi_abts_frame(iport,
+ iport->active_oxid_fdmi_plogi);
+ if (frame == NULL)
+ return;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send FDMI PLOGI abts. iport->fabric.state: %d oxid: 0x%x",
+ iport->fcid, iport->fabric.state, iport->active_oxid_fdmi_plogi);
fnic_send_fcoe_frame(iport, frame, frame_size);
} else {
if (iport->fabric.fdmi_pending & FDLS_FDMI_REG_HBA_PENDING) {
- oxid = iport->active_oxid_fdmi_rhba;
- FNIC_STD_SET_OX_ID(*pfabric_abts, oxid);
+ frame = fdls_alloc_init_fdmi_abts_frame(iport,
+ iport->active_oxid_fdmi_rhba);
+ if (frame == NULL)
+ return;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send FDMI RHBA abts. iport->fabric.state: %d oxid: 0x%x",
+ iport->fcid, iport->fabric.state, iport->active_oxid_fdmi_rhba);
fnic_send_fcoe_frame(iport, frame, frame_size);
}
if (iport->fabric.fdmi_pending & FDLS_FDMI_RPA_PENDING) {
- oxid = iport->active_oxid_fdmi_rpa;
- FNIC_STD_SET_OX_ID(*pfabric_abts, oxid);
+ frame = fdls_alloc_init_fdmi_abts_frame(iport,
+ iport->active_oxid_fdmi_rpa);
+ if (frame == NULL) {
+ if (iport->fabric.fdmi_pending & FDLS_FDMI_REG_HBA_PENDING)
+ goto arm_timer;
+ else
+ return;
+ }
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send FDMI RPA abts. iport->fabric.state: %d oxid: 0x%x",
+ iport->fcid, iport->fabric.state, iport->active_oxid_fdmi_rpa);
fnic_send_fcoe_frame(iport, frame, frame_size);
}
}
+arm_timer:
fdmi_tov = jiffies + msecs_to_jiffies(2 * iport->e_d_tov);
mod_timer(&iport->fabric.fdmi_timer, round_jiffies(fdmi_tov));
iport->fabric.fdmi_pending |= FDLS_FDMI_ABORT_PENDING;
+
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: iport->fabric.fdmi_pending: 0x%x",
+ iport->fcid, iport->fabric.fdmi_pending);
}
static void fdls_send_fabric_flogi(struct fnic_iport_s *iport)
@@ -2245,6 +2281,21 @@ void fdls_fabric_timer_callback(struct timer_list *t)
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
}
+void fdls_fdmi_retry_plogi(struct fnic_iport_s *iport)
+{
+ struct fnic *fnic = iport->fnic;
+
+ iport->fabric.fdmi_pending = 0;
+ /* If max retries not exhausted, start over from fdmi plogi */
+ if (iport->fabric.fdmi_retry < FDLS_FDMI_MAX_RETRY) {
+ iport->fabric.fdmi_retry++;
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Retry FDMI PLOGI. FDMI retry: %d",
+ iport->fabric.fdmi_retry);
+ fdls_send_fdmi_plogi(iport);
+ }
+}
+
void fdls_fdmi_timer_callback(struct timer_list *t)
{
struct fnic_fdls_fabric_s *fabric = timer_container_of(fabric, t,
@@ -2257,7 +2308,7 @@ void fdls_fdmi_timer_callback(struct timer_list *t)
spin_lock_irqsave(&fnic->fnic_lock, flags);
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
- "fdmi timer callback : 0x%x\n", iport->fabric.fdmi_pending);
+ "iport->fabric.fdmi_pending: 0x%x\n", iport->fabric.fdmi_pending);
if (!iport->fabric.fdmi_pending) {
/* timer expired after fdmi responses received. */
@@ -2265,7 +2316,7 @@ void fdls_fdmi_timer_callback(struct timer_list *t)
return;
}
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
- "fdmi timer callback : 0x%x\n", iport->fabric.fdmi_pending);
+ "iport->fabric.fdmi_pending: 0x%x\n", iport->fabric.fdmi_pending);
/* if not abort pending, send an abort */
if (!(iport->fabric.fdmi_pending & FDLS_FDMI_ABORT_PENDING)) {
@@ -2274,33 +2325,37 @@ void fdls_fdmi_timer_callback(struct timer_list *t)
return;
}
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
- "fdmi timer callback : 0x%x\n", iport->fabric.fdmi_pending);
+ "iport->fabric.fdmi_pending: 0x%x\n", iport->fabric.fdmi_pending);
/* ABTS pending for an active fdmi request that is pending.
* That means FDMI ABTS timed out
* Schedule to free the OXID after 2*r_a_tov and proceed
*/
if (iport->fabric.fdmi_pending & FDLS_FDMI_PLOGI_PENDING) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FDMI PLOGI ABTS timed out. Schedule oxid free: 0x%x\n",
+ iport->active_oxid_fdmi_plogi);
fdls_schedule_oxid_free(iport, &iport->active_oxid_fdmi_plogi);
} else {
- if (iport->fabric.fdmi_pending & FDLS_FDMI_REG_HBA_PENDING)
+ if (iport->fabric.fdmi_pending & FDLS_FDMI_REG_HBA_PENDING) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FDMI RHBA ABTS timed out. Schedule oxid free: 0x%x\n",
+ iport->active_oxid_fdmi_rhba);
fdls_schedule_oxid_free(iport, &iport->active_oxid_fdmi_rhba);
- if (iport->fabric.fdmi_pending & FDLS_FDMI_RPA_PENDING)
+ }
+ if (iport->fabric.fdmi_pending & FDLS_FDMI_RPA_PENDING) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "FDMI RPA ABTS timed out. Schedule oxid free: 0x%x\n",
+ iport->active_oxid_fdmi_rpa);
fdls_schedule_oxid_free(iport, &iport->active_oxid_fdmi_rpa);
+ }
}
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
- "fdmi timer callback : 0x%x\n", iport->fabric.fdmi_pending);
+ "iport->fabric.fdmi_pending: 0x%x\n", iport->fabric.fdmi_pending);
- iport->fabric.fdmi_pending = 0;
- /* If max retries not exhaused, start over from fdmi plogi */
- if (iport->fabric.fdmi_retry < FDLS_FDMI_MAX_RETRY) {
- iport->fabric.fdmi_retry++;
- FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
- "retry fdmi timer %d", iport->fabric.fdmi_retry);
- fdls_send_fdmi_plogi(iport);
- }
+ fdls_fdmi_retry_plogi(iport);
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
- "fdmi timer callback : 0x%x\n", iport->fabric.fdmi_pending);
+ "iport->fabric.fdmi_pending: 0x%x\n", iport->fabric.fdmi_pending);
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
}
@@ -3715,13 +3770,60 @@ static void fdls_process_fdmi_abts_rsp(struct fnic_iport_s *iport,
switch (FNIC_FRAME_TYPE(oxid)) {
case FNIC_FRAME_TYPE_FDMI_PLOGI:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received FDMI PLOGI ABTS rsp with oxid: 0x%x", oxid);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: iport->fabric.fdmi_pending: 0x%x",
+ iport->fcid, iport->fabric.fdmi_pending);
fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_plogi);
+
+ iport->fabric.fdmi_pending &= ~FDLS_FDMI_PLOGI_PENDING;
+ iport->fabric.fdmi_pending &= ~FDLS_FDMI_ABORT_PENDING;
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: iport->fabric.fdmi_pending: 0x%x",
+ iport->fcid, iport->fabric.fdmi_pending);
break;
case FNIC_FRAME_TYPE_FDMI_RHBA:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received FDMI RHBA ABTS rsp with oxid: 0x%x", oxid);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: iport->fabric.fdmi_pending: 0x%x",
+ iport->fcid, iport->fabric.fdmi_pending);
+
+ iport->fabric.fdmi_pending &= ~FDLS_FDMI_REG_HBA_PENDING;
+
+ /* If RPA is still pending, don't turn off ABORT PENDING.
+ * We count on the timer to detect the ABTS timeout and take
+ * corrective action.
+ */
+ if (!(iport->fabric.fdmi_pending & FDLS_FDMI_RPA_PENDING))
+ iport->fabric.fdmi_pending &= ~FDLS_FDMI_ABORT_PENDING;
+
fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_rhba);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: iport->fabric.fdmi_pending: 0x%x",
+ iport->fcid, iport->fabric.fdmi_pending);
break;
case FNIC_FRAME_TYPE_FDMI_RPA:
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "Received FDMI RPA ABTS rsp with oxid: 0x%x", oxid);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: iport->fabric.fdmi_pending: 0x%x",
+ iport->fcid, iport->fabric.fdmi_pending);
+
+ iport->fabric.fdmi_pending &= ~FDLS_FDMI_RPA_PENDING;
+
+ /* If RHBA is still pending, don't turn off ABORT PENDING.
+ * We count on the timer to detect the ABTS timeout and take
+ * corrective action.
+ */
+ if (!(iport->fabric.fdmi_pending & FDLS_FDMI_REG_HBA_PENDING))
+ iport->fabric.fdmi_pending &= ~FDLS_FDMI_ABORT_PENDING;
+
fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_rpa);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: iport->fabric.fdmi_pending: 0x%x",
+ iport->fcid, iport->fabric.fdmi_pending);
break;
default:
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
@@ -3730,10 +3832,16 @@ static void fdls_process_fdmi_abts_rsp(struct fnic_iport_s *iport,
break;
}
- timer_delete_sync(&iport->fabric.fdmi_timer);
- iport->fabric.fdmi_pending &= ~FDLS_FDMI_ABORT_PENDING;
-
- fdls_send_fdmi_plogi(iport);
+ /*
+ * Only if ABORT PENDING is off, delete the timer, and if no other
+ * operations are pending, retry FDMI.
+ * Otherwise, let the timer pop and take the appropriate action.
+ */
+ if (!(iport->fabric.fdmi_pending & FDLS_FDMI_ABORT_PENDING)) {
+ timer_delete_sync(&iport->fabric.fdmi_timer);
+ if (!iport->fabric.fdmi_pending)
+ fdls_fdmi_retry_plogi(iport);
+ }
}
static void
@@ -4972,9 +5080,12 @@ void fnic_fdls_link_down(struct fnic_iport_s *iport)
fdls_delete_tport(iport, tport);
}
- if ((fnic_fdmi_support == 1) && (iport->fabric.fdmi_pending > 0)) {
- timer_delete_sync(&iport->fabric.fdmi_timer);
- iport->fabric.fdmi_pending = 0;
+ if (fnic_fdmi_support == 1) {
+ if (iport->fabric.fdmi_pending > 0) {
+ timer_delete_sync(&iport->fabric.fdmi_timer);
+ iport->fabric.fdmi_pending = 0;
+ }
+ iport->flags &= ~FNIC_FDMI_ACTIVE;
}
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 6c5f6046b1f5..c2fdc6553e62 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -30,7 +30,7 @@
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
-#define DRV_VERSION "1.8.0.0"
+#define DRV_VERSION "1.8.0.2"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 1e8cd64f9a5c..103ab6f1f7cd 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -636,6 +636,8 @@ static int fnic_send_frame(struct fnic *fnic, void *frame, int frame_len)
unsigned long flags;
pa = dma_map_single(&fnic->pdev->dev, frame, frame_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&fnic->pdev->dev, pa))
+ return -ENOMEM;
if ((fnic_fc_trace_set_data(fnic->fnic_num,
FNIC_FC_SEND | 0x80, (char *) frame,
diff --git a/drivers/scsi/fnic/fnic_fdls.h b/drivers/scsi/fnic/fnic_fdls.h
index 8e610b65ad57..531d0b37e450 100644
--- a/drivers/scsi/fnic/fnic_fdls.h
+++ b/drivers/scsi/fnic/fnic_fdls.h
@@ -394,6 +394,7 @@ void fdls_send_tport_abts(struct fnic_iport_s *iport,
bool fdls_delete_tport(struct fnic_iport_s *iport,
struct fnic_tport_s *tport);
void fdls_fdmi_timer_callback(struct timer_list *t);
+void fdls_fdmi_retry_plogi(struct fnic_iport_s *iport);
/* fnic_fcs.c */
void fnic_fdls_init(struct fnic *fnic, int usefip);
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 7133b254cbe4..75b29a018d1f 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -1046,7 +1046,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, unsigned int cq_ind
if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL)
atomic64_inc(&fnic_stats->misc_stats.queue_fulls);
- FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
"xfer_len: %llu", xfer_len);
break;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 3aac0e17cb00..9179f8aee964 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -5910,7 +5910,11 @@ megasas_set_high_iops_queue_affinity_and_hint(struct megasas_instance *instance)
const struct cpumask *mask;
if (instance->perf_mode == MR_BALANCED_PERF_MODE) {
- mask = cpumask_of_node(dev_to_node(&instance->pdev->dev));
+ int nid = dev_to_node(&instance->pdev->dev);
+
+ if (nid == NUMA_NO_NODE)
+ nid = 0;
+ mask = cpumask_of_node(nid);
for (i = 0; i < instance->low_latency_index_start; i++) {
irq = pci_irq_vector(instance->pdev, i);
diff --git a/drivers/scsi/mvsas/mv_defs.h b/drivers/scsi/mvsas/mv_defs.h
index 8ef174cd4d37..3e4124177b2a 100644
--- a/drivers/scsi/mvsas/mv_defs.h
+++ b/drivers/scsi/mvsas/mv_defs.h
@@ -215,7 +215,7 @@ enum hw_register_bits {
/* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */
PHYEV_DEC_ERR = (1U << 24), /* Phy Decoding Error */
- PHYEV_DCDR_ERR = (1U << 23), /* STP Deocder Error */
+ PHYEV_DCDR_ERR = (1U << 23), /* STP Decoder Error */
PHYEV_CRC_ERR = (1U << 22), /* STP CRC Error */
PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */
PHYEV_AN = (1U << 18), /* SATA async notification */
@@ -347,7 +347,7 @@ enum sas_cmd_port_registers {
CMD_SATA_PORT_MEM_CTL0 = 0x158, /* SATA Port Memory Control 0 */
CMD_SATA_PORT_MEM_CTL1 = 0x15c, /* SATA Port Memory Control 1 */
CMD_XOR_MEM_BIST_CTL = 0x160, /* XOR Memory BIST Control */
- CMD_XOR_MEM_BIST_STAT = 0x164, /* XOR Memroy BIST Status */
+ CMD_XOR_MEM_BIST_STAT = 0x164, /* XOR Memory BIST Status */
CMD_DMA_MEM_BIST_CTL = 0x168, /* DMA Memory BIST Control */
CMD_DMA_MEM_BIST_STAT = 0x16c, /* DMA Memory BIST Status */
CMD_PORT_MEM_BIST_CTL = 0x170, /* Port Memory BIST Control */
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 376b8897ab90..746ff6a1f309 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -665,7 +665,8 @@ enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd)
* if the device is in the process of becoming ready, we
* should retry.
*/
- if ((sshdr.asc == 0x04) && (sshdr.ascq == 0x01))
+ if ((sshdr.asc == 0x04) &&
+ (sshdr.ascq == 0x01 || sshdr.ascq == 0x0a))
return NEEDS_RETRY;
/*
* if the device is not started, we need to wake
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 0b8c91bf793f..c75a806496d6 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -3499,7 +3499,7 @@ static int iscsi_new_flashnode(struct iscsi_transport *transport,
pr_err("%s could not find host no %u\n",
__func__, ev->u.new_flashnode.host_no);
err = -ENODEV;
- goto put_host;
+ goto exit_new_fnode;
}
index = transport->new_flashnode(shost, data, len);
@@ -3509,7 +3509,6 @@ static int iscsi_new_flashnode(struct iscsi_transport *transport,
else
err = -EIO;
-put_host:
scsi_host_put(shost);
exit_new_fnode:
@@ -3534,7 +3533,7 @@ static int iscsi_del_flashnode(struct iscsi_transport *transport,
pr_err("%s could not find host no %u\n",
__func__, ev->u.del_flashnode.host_no);
err = -ENODEV;
- goto put_host;
+ goto exit_del_fnode;
}
idx = ev->u.del_flashnode.flashnode_idx;
@@ -3576,7 +3575,7 @@ static int iscsi_login_flashnode(struct iscsi_transport *transport,
pr_err("%s could not find host no %u\n",
__func__, ev->u.login_flashnode.host_no);
err = -ENODEV;
- goto put_host;
+ goto exit_login_fnode;
}
idx = ev->u.login_flashnode.flashnode_idx;
@@ -3628,7 +3627,7 @@ static int iscsi_logout_flashnode(struct iscsi_transport *transport,
pr_err("%s could not find host no %u\n",
__func__, ev->u.logout_flashnode.host_no);
err = -ENODEV;
- goto put_host;
+ goto exit_logout_fnode;
}
idx = ev->u.logout_flashnode.flashnode_idx;
@@ -3678,7 +3677,7 @@ static int iscsi_logout_flashnode_sid(struct iscsi_transport *transport,
pr_err("%s could not find host no %u\n",
__func__, ev->u.logout_flashnode.host_no);
err = -ENODEV;
- goto put_host;
+ goto exit_logout_sid;
}
session = iscsi_session_lookup(ev->u.logout_flashnode_sid.sid);
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 2e6b2412d2c9..d9e59204a9c3 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -362,7 +362,7 @@ MODULE_PARM_DESC(ring_avail_percent_lowater,
/*
* Timeout in seconds for all devices managed by this driver.
*/
-static int storvsc_timeout = 180;
+static const int storvsc_timeout = 180;
#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
static struct scsi_transport_template *fc_transport_template;
@@ -768,7 +768,7 @@ static void handle_multichannel_storage(struct hv_device *device, int max_chns)
return;
}
- t = wait_for_completion_timeout(&request->wait_event, 10*HZ);
+ t = wait_for_completion_timeout(&request->wait_event, storvsc_timeout * HZ);
if (t == 0) {
dev_err(dev, "Failed to create sub-channel: timed out\n");
return;
@@ -833,7 +833,7 @@ static int storvsc_execute_vstor_op(struct hv_device *device,
if (ret != 0)
return ret;
- t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
+ t = wait_for_completion_timeout(&request->wait_event, storvsc_timeout * HZ);
if (t == 0)
return -ETIMEDOUT;
@@ -1350,6 +1350,8 @@ static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size,
return ret;
ret = storvsc_channel_init(device, is_fc);
+ if (ret)
+ vmbus_close(device->channel);
return ret;
}
@@ -1668,7 +1670,7 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
if (ret != 0)
return FAILED;
- t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
+ t = wait_for_completion_timeout(&request->wait_event, storvsc_timeout * HZ);
if (t == 0)
return TIMEOUT_ERROR;
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index fe0f122f07b0..aa1932ba17cb 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -1958,10 +1958,10 @@ static int cqspi_probe(struct platform_device *pdev)
goto probe_setup_failed;
}
- ret = devm_pm_runtime_enable(dev);
- if (ret) {
- if (cqspi->rx_chan)
- dma_release_channel(cqspi->rx_chan);
+ pm_runtime_enable(dev);
+
+ if (cqspi->rx_chan) {
+ dma_release_channel(cqspi->rx_chan);
goto probe_setup_failed;
}
@@ -1981,6 +1981,7 @@ static int cqspi_probe(struct platform_device *pdev)
return 0;
probe_setup_failed:
cqspi_controller_enable(cqspi, 0);
+ pm_runtime_disable(dev);
probe_reset_failed:
if (cqspi->is_jh7110)
cqspi_jh7110_disable_clk(pdev, cqspi);
@@ -1999,7 +2000,8 @@ static void cqspi_remove(struct platform_device *pdev)
if (cqspi->rx_chan)
dma_release_channel(cqspi->rx_chan);
- clk_disable_unprepare(cqspi->clk);
+ if (pm_runtime_get_sync(&pdev->dev) >= 0)
+ clk_disable(cqspi->clk);
if (cqspi->is_jh7110)
cqspi_jh7110_disable_clk(pdev, cqspi);
diff --git a/drivers/spi/spi-loongson-core.c b/drivers/spi/spi-loongson-core.c
index 4fec226456d1..b46f072a0387 100644
--- a/drivers/spi/spi-loongson-core.c
+++ b/drivers/spi/spi-loongson-core.c
@@ -5,6 +5,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
diff --git a/drivers/spi/spi-offload.c b/drivers/spi/spi-offload.c
index e674097bf3be..d336f4d228d5 100644
--- a/drivers/spi/spi-offload.c
+++ b/drivers/spi/spi-offload.c
@@ -297,7 +297,7 @@ int spi_offload_trigger_enable(struct spi_offload *offload,
if (trigger->ops->enable) {
ret = trigger->ops->enable(trigger, config);
if (ret) {
- if (offload->ops->trigger_disable)
+ if (offload->ops && offload->ops->trigger_disable)
offload->ops->trigger_disable(offload);
return ret;
}
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 29c616e2c408..70bb74b3bd9c 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -134,6 +134,7 @@ struct omap2_mcspi {
size_t max_xfer_len;
u32 ref_clk_hz;
bool use_multi_mode;
+ bool last_msg_kept_cs;
};
struct omap2_mcspi_cs {
@@ -1269,6 +1270,10 @@ static int omap2_mcspi_prepare_message(struct spi_controller *ctlr,
* multi-mode is applicable.
*/
mcspi->use_multi_mode = true;
+
+ if (mcspi->last_msg_kept_cs)
+ mcspi->use_multi_mode = false;
+
list_for_each_entry(tr, &msg->transfers, transfer_list) {
if (!tr->bits_per_word)
bits_per_word = msg->spi->bits_per_word;
@@ -1287,18 +1292,19 @@ static int omap2_mcspi_prepare_message(struct spi_controller *ctlr,
mcspi->use_multi_mode = false;
}
- /* Check if transfer asks to change the CS status after the transfer */
- if (!tr->cs_change)
- mcspi->use_multi_mode = false;
-
- /*
- * If at least one message is not compatible, switch back to single mode
- *
- * The bits_per_word of certain transfer can be different, but it will have no
- * impact on the signal itself.
- */
- if (!mcspi->use_multi_mode)
- break;
+ if (list_is_last(&tr->transfer_list, &msg->transfers)) {
+ /* Check if transfer asks to keep the CS status after the whole message */
+ if (tr->cs_change) {
+ mcspi->use_multi_mode = false;
+ mcspi->last_msg_kept_cs = true;
+ } else {
+ mcspi->last_msg_kept_cs = false;
+ }
+ } else {
+ /* Check if transfer asks to change the CS status after the transfer */
+ if (!tr->cs_change)
+ mcspi->use_multi_mode = false;
+ }
}
omap2_mcspi_set_mode(ctlr);
diff --git a/drivers/spi/spi-pci1xxxx.c b/drivers/spi/spi-pci1xxxx.c
index c6489e90b8b9..e27642c4dea4 100644
--- a/drivers/spi/spi-pci1xxxx.c
+++ b/drivers/spi/spi-pci1xxxx.c
@@ -762,10 +762,10 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *
return -EINVAL;
num_vector = pci_alloc_irq_vectors(pdev, 1, hw_inst_cnt,
- PCI_IRQ_ALL_TYPES);
+ PCI_IRQ_INTX | PCI_IRQ_MSI);
if (num_vector < 0) {
dev_err(&pdev->dev, "Error allocating MSI vectors\n");
- return ret;
+ return num_vector;
}
init_completion(&spi_sub_ptr->spi_xfer_done);
diff --git a/drivers/spi/spi-stm32-ospi.c b/drivers/spi/spi-stm32-ospi.c
index 7c1fa55fbc47..4ab7e86f4bd5 100644
--- a/drivers/spi/spi-stm32-ospi.c
+++ b/drivers/spi/spi-stm32-ospi.c
@@ -804,7 +804,7 @@ static int stm32_ospi_get_resources(struct platform_device *pdev)
return ret;
}
- ospi->rstc = devm_reset_control_array_get_exclusive(dev);
+ ospi->rstc = devm_reset_control_array_get_exclusive_released(dev);
if (IS_ERR(ospi->rstc))
return dev_err_probe(dev, PTR_ERR(ospi->rstc),
"Can't get reset\n");
@@ -936,12 +936,16 @@ static int stm32_ospi_probe(struct platform_device *pdev)
if (ret < 0)
goto err_pm_enable;
- if (ospi->rstc) {
- reset_control_assert(ospi->rstc);
- udelay(2);
- reset_control_deassert(ospi->rstc);
+ ret = reset_control_acquire(ospi->rstc);
+ if (ret) {
+ dev_err_probe(dev, ret, "Can not acquire reset %d\n", ret);
+ goto err_pm_resume;
}
+ reset_control_assert(ospi->rstc);
+ udelay(2);
+ reset_control_deassert(ospi->rstc);
+
ret = spi_register_controller(ctrl);
if (ret) {
/* Disable ospi */
@@ -987,6 +991,8 @@ static void stm32_ospi_remove(struct platform_device *pdev)
if (ospi->dma_chrx)
dma_release_channel(ospi->dma_chrx);
+ reset_control_release(ospi->rstc);
+
pm_runtime_put_sync_suspend(ospi->dev);
pm_runtime_force_suspend(ospi->dev);
}
@@ -997,6 +1003,8 @@ static int __maybe_unused stm32_ospi_suspend(struct device *dev)
pinctrl_pm_select_sleep_state(dev);
+ reset_control_release(ospi->rstc);
+
return pm_runtime_force_suspend(ospi->dev);
}
@@ -1016,6 +1024,12 @@ static int __maybe_unused stm32_ospi_resume(struct device *dev)
if (ret < 0)
return ret;
+ ret = reset_control_acquire(ospi->rstc);
+ if (ret) {
+ dev_err(dev, "Can not acquire reset\n");
+ return ret;
+ }
+
writel_relaxed(ospi->cr_reg, regs_base + OSPI_CR);
writel_relaxed(ospi->dcr_reg, regs_base + OSPI_DCR1);
pm_runtime_mark_last_busy(ospi->dev);
diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c
index 3581757a269b..3be7499db21e 100644
--- a/drivers/spi/spi-tegra210-quad.c
+++ b/drivers/spi/spi-tegra210-quad.c
@@ -407,9 +407,6 @@ tegra_qspi_read_rx_fifo_to_client_rxbuf(struct tegra_qspi *tqspi, struct spi_tra
static void
tegra_qspi_copy_client_txbuf_to_qspi_txbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
{
- dma_sync_single_for_cpu(tqspi->dev, tqspi->tx_dma_phys,
- tqspi->dma_buf_size, DMA_TO_DEVICE);
-
/*
* In packed mode, each word in FIFO may contain multiple packets
* based on bits per word. So all bytes in each FIFO word are valid.
@@ -442,17 +439,11 @@ tegra_qspi_copy_client_txbuf_to_qspi_txbuf(struct tegra_qspi *tqspi, struct spi_
tqspi->cur_tx_pos += write_bytes;
}
-
- dma_sync_single_for_device(tqspi->dev, tqspi->tx_dma_phys,
- tqspi->dma_buf_size, DMA_TO_DEVICE);
}
static void
tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf(struct tegra_qspi *tqspi, struct spi_transfer *t)
{
- dma_sync_single_for_cpu(tqspi->dev, tqspi->rx_dma_phys,
- tqspi->dma_buf_size, DMA_FROM_DEVICE);
-
if (tqspi->is_packed) {
tqspi->cur_rx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word;
} else {
@@ -478,9 +469,6 @@ tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf(struct tegra_qspi *tqspi, struct spi_
tqspi->cur_rx_pos += read_bytes;
}
-
- dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys,
- tqspi->dma_buf_size, DMA_FROM_DEVICE);
}
static void tegra_qspi_dma_complete(void *args)
@@ -701,8 +689,6 @@ static int tegra_qspi_start_dma_based_transfer(struct tegra_qspi *tqspi, struct
return ret;
}
- dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys,
- tqspi->dma_buf_size, DMA_FROM_DEVICE);
ret = tegra_qspi_start_rx_dma(tqspi, t, len);
if (ret < 0) {
dev_err(tqspi->dev, "failed to start RX DMA: %d\n", ret);
diff --git a/drivers/staging/rtl8723bs/core/rtw_security.c b/drivers/staging/rtl8723bs/core/rtw_security.c
index 1e9eff01b1aa..e9f382c280d9 100644
--- a/drivers/staging/rtl8723bs/core/rtw_security.c
+++ b/drivers/staging/rtl8723bs/core/rtw_security.c
@@ -868,29 +868,21 @@ static signed int aes_cipher(u8 *key, uint hdrlen,
num_blocks, payload_index;
u8 pn_vector[6];
- u8 mic_iv[16];
- u8 mic_header1[16];
- u8 mic_header2[16];
- u8 ctr_preload[16];
+ u8 mic_iv[16] = {};
+ u8 mic_header1[16] = {};
+ u8 mic_header2[16] = {};
+ u8 ctr_preload[16] = {};
/* Intermediate Buffers */
- u8 chain_buffer[16];
- u8 aes_out[16];
- u8 padded_buffer[16];
+ u8 chain_buffer[16] = {};
+ u8 aes_out[16] = {};
+ u8 padded_buffer[16] = {};
u8 mic[8];
uint frtype = GetFrameType(pframe);
uint frsubtype = GetFrameSubType(pframe);
frsubtype = frsubtype>>4;
- memset((void *)mic_iv, 0, 16);
- memset((void *)mic_header1, 0, 16);
- memset((void *)mic_header2, 0, 16);
- memset((void *)ctr_preload, 0, 16);
- memset((void *)chain_buffer, 0, 16);
- memset((void *)aes_out, 0, 16);
- memset((void *)padded_buffer, 0, 16);
-
if ((hdrlen == WLAN_HDR_A3_LEN) || (hdrlen == WLAN_HDR_A3_QOS_LEN))
a4_exists = 0;
else
@@ -1080,15 +1072,15 @@ static signed int aes_decipher(u8 *key, uint hdrlen,
num_blocks, payload_index;
signed int res = _SUCCESS;
u8 pn_vector[6];
- u8 mic_iv[16];
- u8 mic_header1[16];
- u8 mic_header2[16];
- u8 ctr_preload[16];
+ u8 mic_iv[16] = {};
+ u8 mic_header1[16] = {};
+ u8 mic_header2[16] = {};
+ u8 ctr_preload[16] = {};
/* Intermediate Buffers */
- u8 chain_buffer[16];
- u8 aes_out[16];
- u8 padded_buffer[16];
+ u8 chain_buffer[16] = {};
+ u8 aes_out[16] = {};
+ u8 padded_buffer[16] = {};
u8 mic[8];
uint frtype = GetFrameType(pframe);
@@ -1096,14 +1088,6 @@ static signed int aes_decipher(u8 *key, uint hdrlen,
frsubtype = frsubtype>>4;
- memset((void *)mic_iv, 0, 16);
- memset((void *)mic_header1, 0, 16);
- memset((void *)mic_header2, 0, 16);
- memset((void *)ctr_preload, 0, 16);
- memset((void *)chain_buffer, 0, 16);
- memset((void *)aes_out, 0, 16);
- memset((void *)padded_buffer, 0, 16);
-
/* start to decrypt the payload */
num_blocks = (plen-8) / 16; /* plen including LLC, payload_length and mic) */
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 34cf2c399b39..70905805cb17 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -1842,7 +1842,9 @@ out:
}
kmem_cache_free(t10_pr_reg_cache, dest_pr_reg);
- core_scsi3_lunacl_undepend_item(dest_se_deve);
+
+ if (dest_se_deve)
+ core_scsi3_lunacl_undepend_item(dest_se_deve);
if (is_local)
continue;
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index bd02ee898f5d..500dfc009d03 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -235,6 +235,7 @@ struct imx_port {
enum imx_tx_state tx_state;
struct hrtimer trigger_start_tx;
struct hrtimer trigger_stop_tx;
+ unsigned int rxtl;
};
struct imx_port_ucrs {
@@ -1339,6 +1340,7 @@ static void imx_uart_clear_rx_errors(struct imx_port *sport)
#define TXTL_DEFAULT 8
#define RXTL_DEFAULT 8 /* 8 characters or aging timer */
+#define RXTL_CONSOLE_DEFAULT 1
#define TXTL_DMA 8 /* DMA burst setting */
#define RXTL_DMA 9 /* DMA burst setting */
@@ -1457,7 +1459,7 @@ static void imx_uart_disable_dma(struct imx_port *sport)
ucr1 &= ~(UCR1_RXDMAEN | UCR1_TXDMAEN | UCR1_ATDMAEN);
imx_uart_writel(sport, ucr1, UCR1);
- imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
+ imx_uart_setup_ufcr(sport, TXTL_DEFAULT, sport->rxtl);
sport->dma_is_enabled = 0;
}
@@ -1482,7 +1484,12 @@ static int imx_uart_startup(struct uart_port *port)
return retval;
}
- imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
+ if (uart_console(&sport->port))
+ sport->rxtl = RXTL_CONSOLE_DEFAULT;
+ else
+ sport->rxtl = RXTL_DEFAULT;
+
+ imx_uart_setup_ufcr(sport, TXTL_DEFAULT, sport->rxtl);
/* disable the DREN bit (Data Ready interrupt enable) before
* requesting IRQs
@@ -1948,7 +1955,7 @@ static int imx_uart_poll_init(struct uart_port *port)
if (retval)
clk_disable_unprepare(sport->clk_ipg);
- imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
+ imx_uart_setup_ufcr(sport, TXTL_DEFAULT, sport->rxtl);
uart_port_lock_irqsave(&sport->port, &flags);
@@ -2040,7 +2047,7 @@ static int imx_uart_rs485_config(struct uart_port *port, struct ktermios *termio
/* If the receiver trigger is 0, set it to a default value */
ufcr = imx_uart_readl(sport, UFCR);
if ((ufcr & UFCR_RXTL_MASK) == 0)
- imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
+ imx_uart_setup_ufcr(sport, TXTL_DEFAULT, sport->rxtl);
imx_uart_start_rx(port);
}
@@ -2302,7 +2309,7 @@ imx_uart_console_setup(struct console *co, char *options)
else
imx_uart_console_get_options(sport, &baud, &parity, &bits);
- imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT);
+ imx_uart_setup_ufcr(sport, TXTL_DEFAULT, sport->rxtl);
retval = uart_set_options(&sport->port, co, baud, parity, bits, flow);
diff --git a/drivers/tty/serial/serial_base_bus.c b/drivers/tty/serial/serial_base_bus.c
index 5d1677f1b651..cb3b127b06b6 100644
--- a/drivers/tty/serial/serial_base_bus.c
+++ b/drivers/tty/serial/serial_base_bus.c
@@ -72,6 +72,7 @@ static int serial_base_device_init(struct uart_port *port,
dev->parent = parent_dev;
dev->bus = &serial_base_bus_type;
dev->release = release;
+ device_set_of_node_from_dev(dev, parent_dev);
if (!serial_base_initialized) {
dev_dbg(port->dev, "uart_add_one_port() called before arch_initcall()?\n");
diff --git a/drivers/tty/vt/ucs.c b/drivers/tty/vt/ucs.c
index 6ead622b7713..03877485dfb7 100644
--- a/drivers/tty/vt/ucs.c
+++ b/drivers/tty/vt/ucs.c
@@ -206,7 +206,7 @@ static int ucs_page_entry_cmp(const void *key, const void *element)
/**
* ucs_get_fallback() - Get a substitution for the provided Unicode character
- * @base: Base Unicode code point (UCS-4)
+ * @cp: Unicode code point (UCS-4)
*
* Get a simpler fallback character for the provided Unicode character.
* This is used for terminal display when corresponding glyph is unavailable.
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index ed39d9cb4432..62049ceb34de 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -4650,6 +4650,7 @@ void do_unblank_screen(int leaving_gfx)
set_palette(vc);
set_cursor(vc);
vt_event_post(VT_EVENT_UNBLANK, vc->vc_num, vc->vc_num);
+ notify_update(vc);
}
EXPORT_SYMBOL(do_unblank_screen);
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 4410e7d93b7d..50adfb8b335b 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -6623,9 +6623,14 @@ static void ufshcd_err_handler(struct work_struct *work)
up(&hba->host_sem);
return;
}
- ufshcd_set_eh_in_progress(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags);
+
ufshcd_err_handling_prepare(hba);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_set_eh_in_progress(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
/* Complete requests that have door-bell cleared by h/w */
ufshcd_complete_requests(hba, false);
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -7802,7 +7807,8 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
hba->silence_err_logs = false;
/* scale up clocks to max frequency before full reinitialization */
- ufshcd_scale_clks(hba, ULONG_MAX, true);
+ if (ufshcd_is_clkscaling_supported(hba))
+ ufshcd_scale_clks(hba, ULONG_MAX, true);
err = ufshcd_hba_enable(hba);