summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap1
-rw-r--r--Documentation/ABI/testing/sysfs-class-usb_power_delivery28
-rw-r--r--Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.yaml4
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml4
-rw-r--r--Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml4
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,scif.yaml1
-rw-r--r--Documentation/devicetree/bindings/spi/spi-cadence.yaml11
-rw-r--r--Documentation/devicetree/bindings/spi/spi-rockchip.yaml1
-rw-r--r--Documentation/devicetree/bindings/ufs/qcom,sm8650-ufshc.yaml2
-rw-r--r--Documentation/devicetree/bindings/usb/apple,dwc3.yaml80
-rw-r--r--Documentation/devicetree/bindings/usb/dwc3-xilinx.yaml22
-rw-r--r--Documentation/devicetree/bindings/usb/eswin,eic7700-usb.yaml94
-rw-r--r--Documentation/devicetree/bindings/usb/fcs,fsa4480.yaml1
-rw-r--r--Documentation/devicetree/bindings/usb/fsl,imx8mp-dwc3.yaml10
-rw-r--r--Documentation/devicetree/bindings/usb/fsl,ls1028a.yaml33
-rw-r--r--Documentation/devicetree/bindings/usb/fsl,usbmisc.yaml1
-rw-r--r--Documentation/devicetree/bindings/usb/generic-ehci.yaml1
-rw-r--r--Documentation/devicetree/bindings/usb/generic-xhci.yaml15
-rw-r--r--Documentation/devicetree/bindings/usb/gpio-sbu-mux.yaml1
-rw-r--r--Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.yaml4
-rw-r--r--Documentation/devicetree/bindings/usb/nxp,ptn36502.yaml1
-rw-r--r--Documentation/devicetree/bindings/usb/onnn,nb7vpq904m.yaml1
-rw-r--r--Documentation/devicetree/bindings/usb/parade,ps8830.yaml1
-rw-r--r--Documentation/devicetree/bindings/usb/qcom,snps-dwc3.yaml35
-rw-r--r--Documentation/devicetree/bindings/usb/qcom,wcd939x-usbss.yaml1
-rw-r--r--Documentation/devicetree/bindings/usb/renesas,rzg3e-xhci.yaml12
-rw-r--r--Documentation/devicetree/bindings/usb/samsung,exynos-dwc3.yaml6
-rw-r--r--Documentation/devicetree/bindings/usb/ti,hd3ss3220.yaml8
-rw-r--r--Documentation/devicetree/bindings/usb/ti,tusb1046.yaml1
-rw-r--r--Documentation/devicetree/bindings/usb/usb-switch-ports.yaml68
-rw-r--r--Documentation/devicetree/bindings/usb/usb-switch.yaml52
-rw-r--r--Documentation/devicetree/bindings/usb/usb-uhci.yaml13
-rw-r--r--Documentation/networking/ax25.rst7
-rw-r--r--Documentation/networking/device_drivers/cellular/qualcomm/rmnet.rst12
-rw-r--r--Documentation/networking/net_failover.rst6
-rw-r--r--MAINTAINERS21
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/dts/broadcom/bcm2711-rpi.dtsi8
-rw-r--r--arch/arm/boot/dts/broadcom/bcm2835-rpi-common.dtsi9
-rw-r--r--arch/arm64/boot/dts/broadcom/bcm2712.dtsi2
-rw-r--r--arch/arm64/include/asm/pgtable.h3
-rw-r--r--arch/arm64/mm/copypage.c11
-rw-r--r--arch/csky/abiv2/cacheflush.c2
-rw-r--r--arch/csky/abiv2/inc/abi/cacheflush.h4
-rw-r--r--arch/mips/mti-malta/malta-setup.c4
-rw-r--r--arch/mips/pci/pci-malta.c3
-rw-r--r--arch/riscv/include/asm/asm.h8
-rw-r--r--arch/riscv/include/asm/cpufeature.h2
-rw-r--r--arch/riscv/include/asm/hwprobe.h7
-rw-r--r--arch/riscv/include/asm/pgtable-64.h2
-rw-r--r--arch/riscv/include/asm/pgtable.h2
-rw-r--r--arch/riscv/include/asm/vdso/arch_data.h6
-rw-r--r--arch/riscv/kernel/cpu.c4
-rw-r--r--arch/riscv/kernel/cpufeature.c4
-rw-r--r--arch/riscv/kernel/smp.c24
-rw-r--r--arch/riscv/kernel/sys_hwprobe.c76
-rw-r--r--arch/riscv/kernel/unaligned_access_speed.c9
-rw-r--r--arch/riscv/kernel/vdso/hwprobe.c2
-rw-r--r--arch/x86/kernel/cpu/bugs.c11
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c2
-rw-r--r--arch/x86/kernel/cpu/resctrl/monitor.c11
-rw-r--r--block/blk-settings.c10
-rw-r--r--drivers/acpi/acpica/tbprint.c6
-rw-r--r--drivers/acpi/property.c2
-rw-r--r--drivers/acpi/riscv/rimt.c122
-rw-r--r--drivers/android/binder.c38
-rw-r--r--drivers/android/binder/freeze.rs18
-rw-r--r--drivers/android/binder/node.rs2
-rw-r--r--drivers/android/binder/process.rs50
-rw-r--r--drivers/android/binder/transaction.rs6
-rw-r--r--drivers/base/arch_topology.c2
-rw-r--r--drivers/base/core.c2
-rw-r--r--drivers/base/devcoredump.c136
-rw-r--r--drivers/block/nbd.c15
-rw-r--r--drivers/comedi/comedi_buf.c2
-rw-r--r--drivers/cpufreq/amd-pstate.c6
-rw-r--r--drivers/cpuidle/governors/menu.c21
-rw-r--r--drivers/firewire/core-transaction.c2
-rw-r--r--drivers/firewire/init_ohci1394_dma.c10
-rw-r--r--drivers/firmware/arm_ffa/driver.c37
-rw-r--r--drivers/firmware/arm_scmi/common.h32
-rw-r--r--drivers/firmware/arm_scmi/driver.c59
-rw-r--r--drivers/gpio/gpio-104-idio-16.c1
-rw-r--r--drivers/gpio/gpio-idio-16.c5
-rw-r--r--drivers/gpio/gpio-ljca.c14
-rw-r--r--drivers/gpio/gpio-pci-idio-16.c1
-rw-r--r--drivers/gpio/gpio-regmap.c26
-rw-r--r--drivers/gpio/gpiolib-acpi-core.c31
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c3
-rw-r--r--drivers/gpu/drm/drm_panic.c60
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c25
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.c10
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c2
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.c3
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c4
-rw-r--r--drivers/gpu/drm/xe/xe_svm.c5
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c96
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h10
-rw-r--r--drivers/hwmon/cgbc-hwmon.c3
-rw-r--r--drivers/hwmon/gpd-fan.c10
-rw-r--r--drivers/hwmon/pmbus/isl68137.c3
-rw-r--r--drivers/hwmon/pmbus/max34440.c12
-rw-r--r--drivers/hwmon/sht3x.c27
-rw-r--r--drivers/misc/amd-sbi/Kconfig2
-rw-r--r--drivers/misc/fastrpc.c2
-rw-r--r--drivers/misc/mei/hw-me-regs.h2
-rw-r--r--drivers/misc/mei/mei_lb.c3
-rw-r--r--drivers/misc/mei/pci-me.c2
-rw-r--r--drivers/misc/mei/pci-txe.c14
-rw-r--r--drivers/misc/vmw_balloon.c8
-rw-r--r--drivers/most/most_usb.c13
-rw-r--r--drivers/net/bonding/bond_main.c47
-rw-r--r--drivers/net/can/bxcan.c2
-rw-r--r--drivers/net/can/dev/netlink.c6
-rw-r--r--drivers/net/can/esd/esdacc.c2
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-tx.c2
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c3
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c25
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h2
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c5
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c9
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c63
-rw-r--r--drivers/net/ovpn/tcp.c26
-rw-r--r--drivers/net/phy/micrel.c4
-rw-r--r--drivers/net/phy/realtek/realtek_main.c16
-rw-r--r--drivers/net/usb/rtl8150.c11
-rw-r--r--drivers/nvmem/rcar-efuse.c1
-rw-r--r--drivers/of/irq.c44
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c28
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c68
-rw-r--r--drivers/pci/pcie/aspm.c34
-rw-r--r--drivers/platform/mellanox/mlxbf-pmc.c1
-rw-r--r--drivers/platform/x86/dell/alienware-wmi-wmax.c12
-rw-r--r--drivers/ptp/ptp_ocp.c2
-rw-r--r--drivers/scsi/libfc/fc_fcp.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c8
-rw-r--r--drivers/scsi/storvsc_drv.c96
-rw-r--r--drivers/spi/spi-airoha-snfi.c128
-rw-r--r--drivers/spi/spi-amlogic-spifc-a4.c4
-rw-r--r--drivers/spi/spi-cadence-quadspi.c5
-rw-r--r--drivers/spi/spi-dw-mmio.c4
-rw-r--r--drivers/spi/spi-intel-pci.c2
-rw-r--r--drivers/spi/spi-intel.c6
-rw-r--r--drivers/spi/spi-nxp-fspi.c32
-rw-r--r--drivers/spi/spi-rockchip-sfc.c12
-rw-r--r--drivers/staging/gpib/agilent_82350b/agilent_82350b.c12
-rw-r--r--drivers/staging/gpib/fmh_gpib/fmh_gpib.c5
-rw-r--r--drivers/staging/gpib/ni_usb/ni_usb_gpib.c13
-rw-r--r--drivers/tee/qcomtee/Kconfig1
-rw-r--r--drivers/tee/qcomtee/call.c2
-rw-r--r--drivers/tee/qcomtee/core.c2
-rw-r--r--drivers/tty/serial/8250/8250_dw.c4
-rw-r--r--drivers/tty/serial/8250/8250_exar.c11
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c6
-rw-r--r--drivers/tty/serial/sc16is7xx.c7
-rw-r--r--drivers/tty/serial/sh-sci.c14
-rw-r--r--drivers/usb/cdns3/cdns3-gadget.c1
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.c1
-rw-r--r--drivers/usb/chipidea/core.c1
-rw-r--r--drivers/usb/chipidea/otg_fsm.c1
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c12
-rw-r--r--drivers/usb/class/cdc-acm.c2
-rw-r--r--drivers/usb/class/usbtmc.c12
-rw-r--r--drivers/usb/core/Makefile5
-rw-r--r--drivers/usb/core/hcd.c8
-rw-r--r--drivers/usb/core/hub.c43
-rw-r--r--drivers/usb/core/message.c2
-rw-r--r--drivers/usb/core/quirks.c2
-rw-r--r--drivers/usb/core/trace.c6
-rw-r--r--drivers/usb/core/trace.h61
-rw-r--r--drivers/usb/core/usb.c2
-rw-r--r--drivers/usb/dwc2/platform.c17
-rw-r--r--drivers/usb/dwc3/Kconfig11
-rw-r--r--drivers/usb/dwc3/Makefile1
-rw-r--r--drivers/usb/dwc3/core.c30
-rw-r--r--drivers/usb/dwc3/drd.c1
-rw-r--r--drivers/usb/dwc3/dwc3-am62.c1
-rw-r--r--drivers/usb/dwc3/dwc3-apple.c489
-rw-r--r--drivers/usb/dwc3/dwc3-generic-plat.c73
-rw-r--r--drivers/usb/dwc3/dwc3-imx8mp.c1
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c1
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c1
-rw-r--r--drivers/usb/dwc3/dwc3-xilinx.c1
-rw-r--r--drivers/usb/dwc3/gadget.c4
-rw-r--r--drivers/usb/dwc3/glue.h157
-rw-r--r--drivers/usb/dwc3/host.c7
-rw-r--r--drivers/usb/gadget/function/f_fs.c4
-rw-r--r--drivers/usb/gadget/function/f_hid.c3
-rw-r--r--drivers/usb/gadget/legacy/raw_gadget.c3
-rw-r--r--drivers/usb/gadget/legacy/zero.c27
-rw-r--r--drivers/usb/gadget/udc/cdns2/cdns2-gadget.c1
-rw-r--r--drivers/usb/host/ehci-platform.c40
-rw-r--r--drivers/usb/host/ohci-da8xx.c17
-rw-r--r--drivers/usb/host/ohci-platform.c24
-rw-r--r--drivers/usb/host/uhci-hcd.h1
-rw-r--r--drivers/usb/host/uhci-platform.c28
-rw-r--r--drivers/usb/host/xen-hcd.c4
-rw-r--r--drivers/usb/host/xhci-caps.h167
-rw-r--r--drivers/usb/host/xhci-dbgcap.c23
-rw-r--r--drivers/usb/host/xhci-debugfs.c57
-rw-r--r--drivers/usb/host/xhci-hub.c125
-rw-r--r--drivers/usb/host/xhci-mem.c41
-rw-r--r--drivers/usb/host/xhci-mtk.c1
-rw-r--r--drivers/usb/host/xhci-mtk.h10
-rw-r--r--drivers/usb/host/xhci-pci.c9
-rw-r--r--drivers/usb/host/xhci-port.h5
-rw-r--r--drivers/usb/host/xhci-ring.c242
-rw-r--r--drivers/usb/host/xhci-tegra.c13
-rw-r--r--drivers/usb/host/xhci-trace.h25
-rw-r--r--drivers/usb/host/xhci.c92
-rw-r--r--drivers/usb/host/xhci.h116
-rw-r--r--drivers/usb/misc/Kconfig1
-rw-r--r--drivers/usb/misc/apple-mfi-fastcharge.c1
-rw-r--r--drivers/usb/misc/chaoskey.c16
-rw-r--r--drivers/usb/misc/usb-ljca.c39
-rw-r--r--drivers/usb/mtu3/mtu3.h34
-rw-r--r--drivers/usb/mtu3/mtu3_core.c2
-rw-r--r--drivers/usb/mtu3/mtu3_plat.c1
-rw-r--r--drivers/usb/mtu3/mtu3_qmu.c2
-rw-r--r--drivers/usb/musb/musb_core.c5
-rw-r--r--drivers/usb/musb/musb_debugfs.c5
-rw-r--r--drivers/usb/musb/musb_dsps.c1
-rw-r--r--drivers/usb/musb/musb_gadget.c4
-rw-r--r--drivers/usb/musb/omap2430.c1
-rw-r--r--drivers/usb/renesas_usbhs/common.c35
-rw-r--r--drivers/usb/serial/option.c10
-rw-r--r--drivers/usb/storage/protocol.c3
-rw-r--r--drivers/usb/storage/uas.c25
-rw-r--r--drivers/usb/storage/unusual_uas.h2
-rw-r--r--drivers/usb/typec/altmodes/displayport.c4
-rw-r--r--drivers/usb/typec/anx7411.c3
-rw-r--r--drivers/usb/typec/class.c13
-rw-r--r--drivers/usb/typec/hd3ss3220.c75
-rw-r--r--drivers/usb/typec/mux/ps883x.c135
-rw-r--r--drivers/usb/typec/pd.c95
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c19
-rw-r--r--drivers/usb/typec/tipd/core.c15
-rw-r--r--drivers/usb/typec/ucsi/cros_ec_ucsi.c5
-rw-r--r--drivers/usb/typec/ucsi/debugfs.c37
-rw-r--r--drivers/usb/typec/ucsi/displayport.c11
-rw-r--r--drivers/usb/typec/ucsi/psy.c26
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c156
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h30
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c25
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c11
-rw-r--r--drivers/usb/typec/ucsi/ucsi_glink.c88
-rw-r--r--drivers/usb/typec/ucsi/ucsi_yoga_c630.c15
-rw-r--r--drivers/usb/usbip/stub_tx.c9
-rw-r--r--drivers/usb/usbip/vhci_hcd.c96
-rw-r--r--fs/9p/vfs_dentry.c10
-rw-r--r--fs/9p/vfs_inode.c8
-rw-r--r--fs/9p/vfs_inode_dotl.c8
-rw-r--r--fs/btrfs/delayed-inode.c2
-rw-r--r--fs/btrfs/delayed-inode.h7
-rw-r--r--fs/btrfs/ref-verify.c2
-rw-r--r--fs/btrfs/send.c56
-rw-r--r--fs/btrfs/super.c8
-rw-r--r--fs/erofs/zmap.c59
-rw-r--r--fs/hugetlbfs/inode.c9
-rw-r--r--fs/notify/fdinfo.c6
-rw-r--r--fs/ocfs2/move_extents.c5
-rw-r--r--fs/resctrl/monitor.c16
-rw-r--r--fs/smb/client/cifsglob.h4
-rw-r--r--fs/smb/client/cifsproto.h1
-rw-r--r--fs/smb/client/cifssmb.c8
-rw-r--r--fs/smb/client/inode.c5
-rw-r--r--fs/smb/client/smb2ops.c4
-rw-r--r--fs/smb/client/smb2proto.h6
-rw-r--r--fs/smb/client/smb2transport.c18
-rw-r--r--fs/smb/client/smbdirect.c103
-rw-r--r--fs/smb/client/trace.c1
-rw-r--r--fs/smb/common/smbdirect/smbdirect_socket.h13
-rw-r--r--fs/smb/server/transport_rdma.c344
-rw-r--r--fs/sysfs/group.c26
-rw-r--r--fs/xfs/Kconfig11
-rw-r--r--fs/xfs/scrub/nlinks.c34
-rw-r--r--fs/xfs/xfs_buf.c2
-rw-r--r--fs/xfs/xfs_buf.h1
-rw-r--r--fs/xfs/xfs_mount.h1
-rw-r--r--fs/xfs/xfs_super.c53
-rw-r--r--fs/xfs/xfs_zone_alloc.c148
-rw-r--r--fs/xfs/xfs_zone_gc.c81
-rw-r--r--fs/xfs/xfs_zone_priv.h2
-rw-r--r--include/linux/arm_ffa.h21
-rw-r--r--include/linux/cgroup-defs.h2
-rw-r--r--include/linux/exportfs.h7
-rw-r--r--include/linux/gpio/regmap.h5
-rw-r--r--include/linux/hung_task.h8
-rw-r--r--include/linux/misc_cgroup.h2
-rw-r--r--include/linux/mlx5/mlx5_ifc.h4
-rw-r--r--include/linux/platform_data/usb-davinci.h22
-rw-r--r--include/linux/pm_runtime.h8
-rw-r--r--include/linux/skbuff.h3
-rw-r--r--include/linux/usb/pd.h69
-rw-r--r--include/linux/usb/typec.h1
-rw-r--r--include/linux/usb/typec_altmode.h13
-rw-r--r--include/linux/usb/typec_tbt.h1
-rw-r--r--include/linux/virtio_net.h4
-rw-r--r--include/uapi/drm/xe_drm.h15
-rw-r--r--include/uapi/linux/usb/cdc.h12
-rw-r--r--io_uring/fdinfo.c8
-rw-r--r--io_uring/filetable.c2
-rw-r--r--io_uring/io_uring.c2
-rw-r--r--io_uring/kbuf.c33
-rw-r--r--io_uring/net.c2
-rw-r--r--io_uring/sqpoll.c65
-rw-r--r--io_uring/sqpoll.h1
-rw-r--r--io_uring/waitid.c2
-rw-r--r--kernel/cgroup/cgroup.c2
-rw-r--r--kernel/dma/debug.c5
-rw-r--r--kernel/irq/chip.c2
-rw-r--r--kernel/irq/manage.c4
-rw-r--r--kernel/sched/fair.c12
-rw-r--r--kernel/sched/sched.h2
-rw-r--r--kernel/time/timekeeping.c2
-rw-r--r--kernel/trace/rv/monitors/pagefault/Kconfig1
-rw-r--r--kernel/trace/rv/rv.c12
-rw-r--r--lib/crypto/Kconfig2
-rw-r--r--mm/damon/core.c7
-rw-r--r--mm/damon/sysfs.c7
-rw-r--r--mm/huge_memory.c3
-rw-r--r--mm/hugetlb.c5
-rw-r--r--mm/migrate.c3
-rw-r--r--mm/mremap.c15
-rw-r--r--mm/page_owner.c3
-rw-r--r--mm/slub.c31
-rw-r--r--net/core/datagram.c44
-rw-r--r--net/core/gro.c10
-rw-r--r--net/core/gro_cells.c5
-rw-r--r--net/core/rtnetlink.c3
-rw-r--r--net/hsr/hsr_netlink.c8
-rw-r--r--net/mptcp/pm_kernel.c6
-rw-r--r--net/sctp/inqueue.c13
-rw-r--r--net/smc/smc_inet.c13
-rw-r--r--net/vmw_vsock/af_vsock.c38
-rw-r--r--net/xfrm/espintcp.c6
-rw-r--r--rust/bindings/bindings_helper.h1
-rw-r--r--rust/helpers/helpers.c1
-rw-r--r--rust/kernel/auxiliary.rs8
-rw-r--r--rust/kernel/device.rs4
-rw-r--r--rust/kernel/lib.rs2
-rw-r--r--samples/rust/Kconfig2
-rw-r--r--tools/objtool/check.c5
-rw-r--r--tools/testing/selftests/cgroup/lib/include/cgroup_util.h20
-rw-r--r--tools/testing/selftests/cgroup/test_cpu.c18
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh18
-rw-r--r--tools/testing/selftests/net/sctp_hello.c17
-rwxr-xr-xtools/testing/selftests/net/sctp_vrf.sh73
366 files changed, 5237 insertions, 2416 deletions
diff --git a/.mailmap b/.mailmap
index c371ba3fb845..717d754b378c 100644
--- a/.mailmap
+++ b/.mailmap
@@ -27,6 +27,7 @@ Alan Cox <alan@lxorguk.ukuu.org.uk>
Alan Cox <root@hraefn.swansea.linux.org.uk>
Aleksandar Markovic <aleksandar.markovic@mips.com> <aleksandar.markovic@imgtec.com>
Aleksey Gorelov <aleksey_gorelov@phoenix.com>
+Alex Williamson <alex@shazbot.org> <alex.williamson@redhat.com>
Alexander Lobakin <alobakin@pm.me> <alobakin@dlink.ru>
Alexander Lobakin <alobakin@pm.me> <alobakin@marvell.com>
Alexander Lobakin <alobakin@pm.me> <bloodyreaper@yandex.ru>
diff --git a/Documentation/ABI/testing/sysfs-class-usb_power_delivery b/Documentation/ABI/testing/sysfs-class-usb_power_delivery
index 61d233c320ea..c754458a527e 100644
--- a/Documentation/ABI/testing/sysfs-class-usb_power_delivery
+++ b/Documentation/ABI/testing/sysfs-class-usb_power_delivery
@@ -254,3 +254,31 @@ Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
Description:
The PPS Power Limited bit indicates whether or not the source
supply will exceed the rated output power if requested.
+
+Standard Power Range (SPR) Adjustable Voltage Supplies
+
+What: /sys/class/usb_power_delivery/.../<capability>/<position>:spr_adjustable_voltage_supply
+Date: Oct 2025
+Contact: Badhri Jagan Sridharan <badhri@google.com>
+Description:
+ Adjustable Voltage Supply (AVS) Augmented PDO (APDO).
+
+What: /sys/class/usb_power_delivery/.../<capability>/<position>:spr_adjustable_voltage_supply/maximum_current_9V_to_15V
+Date: Oct 2025
+Contact: Badhri Jagan Sridharan <badhri@google.com>
+Description:
+ Maximum Current for 9V to 15V range in milliamperes.
+
+What: /sys/class/usb_power_delivery/.../<capability>/<position>:spr_adjustable_voltage_supply/maximum_current_15V_to_20V
+Date: Oct 2025
+Contact: Badhri Jagan Sridharan <badhri@google.com>
+Description:
+ Maximum Current for greater than 15V till 20V range in
+ milliamperes.
+
+What: /sys/class/usb_power_delivery/.../<capability>/<position>:spr_adjustable_voltage_supply/peak_current
+Date: Oct 2025
+Contact: Badhri Jagan Sridharan <badhri@google.com>
+Description:
+ This file shows the value of the Adjustable Voltage Supply Peak Current
+ Capability field.
diff --git a/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.yaml b/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.yaml
index 6a47e08e0e97..f9cffbb2df07 100644
--- a/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.yaml
@@ -142,7 +142,9 @@ allOf:
required:
- orientation-switch
then:
- $ref: /schemas/usb/usb-switch.yaml#
+ allOf:
+ - $ref: /schemas/usb/usb-switch.yaml#
+ - $ref: /schemas/usb/usb-switch-ports.yaml#
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml
index a58370a6a5d3..fba7b2549dde 100644
--- a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml
@@ -24,6 +24,10 @@ properties:
- enum:
- qcom,qcs8300-qmp-ufs-phy
- const: qcom,sa8775p-qmp-ufs-phy
+ - items:
+ - enum:
+ - qcom,kaanapali-qmp-ufs-phy
+ - const: qcom,sm8750-qmp-ufs-phy
- enum:
- qcom,msm8996-qmp-ufs-phy
- qcom,msm8998-qmp-ufs-phy
diff --git a/Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml b/Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml
index e906403208c0..ea1135c91fb7 100644
--- a/Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml
@@ -125,7 +125,9 @@ allOf:
contains:
const: google,gs101-usb31drd-phy
then:
- $ref: /schemas/usb/usb-switch.yaml#
+ allOf:
+ - $ref: /schemas/usb/usb-switch.yaml#
+ - $ref: /schemas/usb/usb-switch-ports.yaml#
properties:
clocks:
diff --git a/Documentation/devicetree/bindings/serial/renesas,scif.yaml b/Documentation/devicetree/bindings/serial/renesas,scif.yaml
index e925cd4c3ac8..72483bc3274d 100644
--- a/Documentation/devicetree/bindings/serial/renesas,scif.yaml
+++ b/Documentation/devicetree/bindings/serial/renesas,scif.yaml
@@ -197,6 +197,7 @@ allOf:
- renesas,rcar-gen2-scif
- renesas,rcar-gen3-scif
- renesas,rcar-gen4-scif
+ - renesas,rcar-gen5-scif
then:
properties:
interrupts:
diff --git a/Documentation/devicetree/bindings/spi/spi-cadence.yaml b/Documentation/devicetree/bindings/spi/spi-cadence.yaml
index 8de96abe9da1..27414b78d61d 100644
--- a/Documentation/devicetree/bindings/spi/spi-cadence.yaml
+++ b/Documentation/devicetree/bindings/spi/spi-cadence.yaml
@@ -14,9 +14,14 @@ allOf:
properties:
compatible:
- enum:
- - cdns,spi-r1p6
- - xlnx,zynq-spi-r1p6
+ oneOf:
+ - enum:
+ - xlnx,zynq-spi-r1p6
+ - items:
+ - enum:
+ - xlnx,zynqmp-spi-r1p6
+ - xlnx,versal-net-spi-r1p6
+ - const: cdns,spi-r1p6
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/spi/spi-rockchip.yaml b/Documentation/devicetree/bindings/spi/spi-rockchip.yaml
index 748faf7f7081..ce6762c92fda 100644
--- a/Documentation/devicetree/bindings/spi/spi-rockchip.yaml
+++ b/Documentation/devicetree/bindings/spi/spi-rockchip.yaml
@@ -34,6 +34,7 @@ properties:
- rockchip,rk3328-spi
- rockchip,rk3368-spi
- rockchip,rk3399-spi
+ - rockchip,rk3506-spi
- rockchip,rk3528-spi
- rockchip,rk3562-spi
- rockchip,rk3568-spi
diff --git a/Documentation/devicetree/bindings/ufs/qcom,sm8650-ufshc.yaml b/Documentation/devicetree/bindings/ufs/qcom,sm8650-ufshc.yaml
index aaa0bbb5bfe1..cea84ab2204f 100644
--- a/Documentation/devicetree/bindings/ufs/qcom,sm8650-ufshc.yaml
+++ b/Documentation/devicetree/bindings/ufs/qcom,sm8650-ufshc.yaml
@@ -15,6 +15,7 @@ select:
compatible:
contains:
enum:
+ - qcom,kaanapali-ufshc
- qcom,sm8650-ufshc
- qcom,sm8750-ufshc
required:
@@ -24,6 +25,7 @@ properties:
compatible:
items:
- enum:
+ - qcom,kaanapali-ufshc
- qcom,sm8650-ufshc
- qcom,sm8750-ufshc
- const: qcom,ufshc
diff --git a/Documentation/devicetree/bindings/usb/apple,dwc3.yaml b/Documentation/devicetree/bindings/usb/apple,dwc3.yaml
new file mode 100644
index 000000000000..f70c33f32c5d
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/apple,dwc3.yaml
@@ -0,0 +1,80 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/apple,dwc3.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Apple Silicon DWC3 USB controller
+
+maintainers:
+ - Sven Peter <sven@kernel.org>
+
+description:
+ Apple Silicon SoCs use a Synopsys DesignWare DWC3 based controller for each of
+ their Type-C ports.
+
+allOf:
+ - $ref: snps,dwc3-common.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - apple,t6000-dwc3
+ - apple,t6020-dwc3
+ - apple,t8112-dwc3
+ - const: apple,t8103-dwc3
+ - const: apple,t8103-dwc3
+
+ reg:
+ items:
+ - description: Core DWC3 region
+ - description: Apple-specific DWC3 region
+
+ reg-names:
+ items:
+ - const: dwc3-core
+ - const: dwc3-apple
+
+ interrupts:
+ maxItems: 1
+
+ iommus:
+ maxItems: 2
+
+ resets:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - interrupts
+ - iommus
+ - resets
+ - power-domains
+ - usb-role-switch
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/apple-aic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ usb@82280000 {
+ compatible = "apple,t8103-dwc3";
+ reg = <0x82280000 0xcd00>, <0x8228cd00 0x3200>;
+ reg-names = "dwc3-core", "dwc3-apple";
+ interrupts = <AIC_IRQ 777 IRQ_TYPE_LEVEL_HIGH>;
+ iommus = <&dwc3_0_dart_0 0>, <&dwc3_0_dart_1 1>;
+
+ power-domains = <&ps_atc0_usb>;
+ resets = <&atcphy0>;
+
+ usb-role-switch;
+ };
diff --git a/Documentation/devicetree/bindings/usb/dwc3-xilinx.yaml b/Documentation/devicetree/bindings/usb/dwc3-xilinx.yaml
index 36f5c644d959..d6823ef5f9a7 100644
--- a/Documentation/devicetree/bindings/usb/dwc3-xilinx.yaml
+++ b/Documentation/devicetree/bindings/usb/dwc3-xilinx.yaml
@@ -47,6 +47,7 @@ properties:
- const: ref_clk
resets:
+ minItems: 1
description:
A list of phandles for resets listed in reset-names.
@@ -56,6 +57,7 @@ properties:
- description: USB APB reset
reset-names:
+ minItems: 1
items:
- const: usb_crst
- const: usb_hibrst
@@ -95,6 +97,26 @@ required:
- resets
- reset-names
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - xlnx,versal-dwc3
+ then:
+ properties:
+ resets:
+ maxItems: 1
+ reset-names:
+ maxItems: 1
+ else:
+ properties:
+ resets:
+ minItems: 3
+ reset-names:
+ minItems: 3
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/usb/eswin,eic7700-usb.yaml b/Documentation/devicetree/bindings/usb/eswin,eic7700-usb.yaml
new file mode 100644
index 000000000000..41c3b1b98991
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/eswin,eic7700-usb.yaml
@@ -0,0 +1,94 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/eswin,eic7700-usb.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ESWIN EIC7700 SoC Usb Controller
+
+maintainers:
+ - Wei Yang <yangwei1@eswincomputing.com>
+ - Senchuan Zhang <zhangsenchuan@eswincomputing.com>
+ - Hang Cao <caohang@eswincomputing.com>
+
+description:
+ The Usb controller on EIC7700 SoC.
+
+allOf:
+ - $ref: snps,dwc3-common.yaml#
+
+properties:
+ compatible:
+ const: eswin,eic7700-dwc3
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-names:
+ items:
+ - const: peripheral
+
+ clocks:
+ maxItems: 3
+
+ clock-names:
+ items:
+ - const: aclk
+ - const: cfg
+ - const: usb_en
+
+ resets:
+ maxItems: 2
+
+ reset-names:
+ items:
+ - const: vaux
+ - const: usb_rst
+
+ eswin,hsp-sp-csr:
+ description:
+ HSP CSR is to control and get status of different high-speed peripherals
+ (such as Ethernet, USB, SATA, etc.) via register, which can tune
+ board-level's parameters of PHY, etc.
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ items:
+ - items:
+ - description: phandle to HSP Register Controller hsp_sp_csr node.
+ - description: USB bus register offset.
+ - description: AXI low power register offset.
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - interrupts
+ - interrupt-names
+ - resets
+ - reset-names
+ - eswin,hsp-sp-csr
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ usb@50480000 {
+ compatible = "eswin,eic7700-dwc3";
+ reg = <0x50480000 0x10000>;
+ clocks = <&clock 135>,
+ <&clock 136>,
+ <&hspcrg 18>;
+ clock-names = "aclk", "cfg", "usb_en";
+ interrupt-parent = <&plic>;
+ interrupts = <85>;
+ interrupt-names = "peripheral";
+ resets = <&reset 84>, <&hspcrg 2>;
+ reset-names = "vaux", "usb_rst";
+ dr_mode = "peripheral";
+ maximum-speed = "high-speed";
+ phy_type = "utmi";
+ eswin,hsp-sp-csr = <&hsp_sp_csr 0x800 0x818>;
+ };
diff --git a/Documentation/devicetree/bindings/usb/fcs,fsa4480.yaml b/Documentation/devicetree/bindings/usb/fcs,fsa4480.yaml
index e3a7df91f7f1..89b1fb90aeeb 100644
--- a/Documentation/devicetree/bindings/usb/fcs,fsa4480.yaml
+++ b/Documentation/devicetree/bindings/usb/fcs,fsa4480.yaml
@@ -76,6 +76,7 @@ required:
allOf:
- $ref: usb-switch.yaml#
+ - $ref: usb-switch-ports.yaml#
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/usb/fsl,imx8mp-dwc3.yaml b/Documentation/devicetree/bindings/usb/fsl,imx8mp-dwc3.yaml
index baf130669c38..73e7a60a0060 100644
--- a/Documentation/devicetree/bindings/usb/fsl,imx8mp-dwc3.yaml
+++ b/Documentation/devicetree/bindings/usb/fsl,imx8mp-dwc3.yaml
@@ -89,13 +89,21 @@ required:
- reg
- "#address-cells"
- "#size-cells"
- - dma-ranges
- ranges
- clocks
- clock-names
- interrupts
- power-domains
+allOf:
+ - if:
+ properties:
+ compatible:
+ const: fsl,imx8mp-dwc3
+ then:
+ required:
+ - dma-ranges
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/usb/fsl,ls1028a.yaml b/Documentation/devicetree/bindings/usb/fsl,ls1028a.yaml
index a44bdf391887..4784f057264a 100644
--- a/Documentation/devicetree/bindings/usb/fsl,ls1028a.yaml
+++ b/Documentation/devicetree/bindings/usb/fsl,ls1028a.yaml
@@ -9,21 +9,19 @@ title: Freescale layerscape SuperSpeed DWC3 USB SoC controller
maintainers:
- Frank Li <Frank.Li@nxp.com>
-select:
- properties:
- compatible:
- contains:
- enum:
- - fsl,ls1028a-dwc3
- required:
- - compatible
-
properties:
compatible:
- items:
- - enum:
- - fsl,ls1028a-dwc3
- - const: snps,dwc3
+ oneOf:
+ - items:
+ - enum:
+ - fsl,ls1012a-dwc3
+ - fsl,ls1043a-dwc3
+ - fsl,ls1046a-dwc3
+ - fsl,ls1088a-dwc3
+ - fsl,ls208xa-dwc3
+ - fsl,lx2160a-dwc3
+ - const: fsl,ls1028a-dwc3
+ - const: fsl,ls1028a-dwc3
reg:
maxItems: 1
@@ -31,6 +29,11 @@ properties:
interrupts:
maxItems: 1
+ iommus:
+ maxItems: 1
+
+ dma-coherent: true
+
unevaluatedProperties: false
required:
@@ -39,14 +42,14 @@ required:
- interrupts
allOf:
- - $ref: snps,dwc3.yaml#
+ - $ref: snps,dwc3-common.yaml#
examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
usb@fe800000 {
- compatible = "fsl,ls1028a-dwc3", "snps,dwc3";
+ compatible = "fsl,ls1028a-dwc3";
reg = <0xfe800000 0x100000>;
interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
};
diff --git a/Documentation/devicetree/bindings/usb/fsl,usbmisc.yaml b/Documentation/devicetree/bindings/usb/fsl,usbmisc.yaml
index ca677d1a8274..d06efe4dbb3b 100644
--- a/Documentation/devicetree/bindings/usb/fsl,usbmisc.yaml
+++ b/Documentation/devicetree/bindings/usb/fsl,usbmisc.yaml
@@ -36,6 +36,7 @@ properties:
- fsl,imx8mm-usbmisc
- fsl,imx8mn-usbmisc
- fsl,imx8ulp-usbmisc
+ - fsl,imx94-usbmisc
- fsl,imx95-usbmisc
- const: fsl,imx7d-usbmisc
- const: fsl,imx6q-usbmisc
diff --git a/Documentation/devicetree/bindings/usb/generic-ehci.yaml b/Documentation/devicetree/bindings/usb/generic-ehci.yaml
index 508d958e698c..4e84bead0232 100644
--- a/Documentation/devicetree/bindings/usb/generic-ehci.yaml
+++ b/Documentation/devicetree/bindings/usb/generic-ehci.yaml
@@ -46,6 +46,7 @@ properties:
- aspeed,ast2400-ehci
- aspeed,ast2500-ehci
- aspeed,ast2600-ehci
+ - aspeed,ast2700-ehci
- brcm,bcm3384-ehci
- brcm,bcm63268-ehci
- brcm,bcm6328-ehci
diff --git a/Documentation/devicetree/bindings/usb/generic-xhci.yaml b/Documentation/devicetree/bindings/usb/generic-xhci.yaml
index a2b94a138999..62678abd74b5 100644
--- a/Documentation/devicetree/bindings/usb/generic-xhci.yaml
+++ b/Documentation/devicetree/bindings/usb/generic-xhci.yaml
@@ -14,12 +14,15 @@ properties:
oneOf:
- description: Generic xHCI device
const: generic-xhci
- - description: Armada 37xx/375/38x/8k SoCs
+ - description: Armada 375/38x SoCs
items:
- enum:
- - marvell,armada3700-xhci
- marvell,armada-375-xhci
- marvell,armada-380-xhci
+ - description: Armada 37xx/8k SoCs
+ items:
+ - enum:
+ - marvell,armada3700-xhci
- marvell,armada-8k-xhci
- const: generic-xhci
- description: Broadcom SoCs with power domains
@@ -53,6 +56,14 @@ properties:
dma-coherent: true
+ dr_mode:
+ enum:
+ - host
+ - otg
+
+ iommus:
+ maxItems: 1
+
power-domains:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/usb/gpio-sbu-mux.yaml b/Documentation/devicetree/bindings/usb/gpio-sbu-mux.yaml
index e588514fab2d..793662f6f3bf 100644
--- a/Documentation/devicetree/bindings/usb/gpio-sbu-mux.yaml
+++ b/Documentation/devicetree/bindings/usb/gpio-sbu-mux.yaml
@@ -52,6 +52,7 @@ required:
allOf:
- $ref: usb-switch.yaml#
+ - $ref: usb-switch-ports.yaml#
- if:
required:
- mode-switch
diff --git a/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.yaml b/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.yaml
index 004d3ebec091..231e6f35a986 100644
--- a/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.yaml
+++ b/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.yaml
@@ -34,6 +34,7 @@ properties:
- mediatek,mt8183-xhci
- mediatek,mt8186-xhci
- mediatek,mt8188-xhci
+ - mediatek,mt8189-xhci
- mediatek,mt8192-xhci
- mediatek,mt8195-xhci
- mediatek,mt8365-xhci
@@ -168,7 +169,8 @@ properties:
104 - used by mt8195, IP1, specific 1.04;
105 - used by mt8195, IP2, specific 1.05;
106 - used by mt8195, IP3, specific 1.06;
- enum: [1, 2, 101, 102, 103, 104, 105, 106]
+ 110 - used by mt8189, IP4, specific 1.10;
+ enum: [1, 2, 101, 102, 103, 104, 105, 106, 110]
mediatek,u3p-dis-msk:
$ref: /schemas/types.yaml#/definitions/uint32
diff --git a/Documentation/devicetree/bindings/usb/nxp,ptn36502.yaml b/Documentation/devicetree/bindings/usb/nxp,ptn36502.yaml
index d805dde80796..4d2fcaa71870 100644
--- a/Documentation/devicetree/bindings/usb/nxp,ptn36502.yaml
+++ b/Documentation/devicetree/bindings/usb/nxp,ptn36502.yaml
@@ -46,6 +46,7 @@ required:
allOf:
- $ref: usb-switch.yaml#
+ - $ref: usb-switch-ports.yaml#
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/usb/onnn,nb7vpq904m.yaml b/Documentation/devicetree/bindings/usb/onnn,nb7vpq904m.yaml
index 589914d22bf2..25fab5fdc2cd 100644
--- a/Documentation/devicetree/bindings/usb/onnn,nb7vpq904m.yaml
+++ b/Documentation/devicetree/bindings/usb/onnn,nb7vpq904m.yaml
@@ -91,6 +91,7 @@ required:
allOf:
- $ref: usb-switch.yaml#
+ - $ref: usb-switch-ports.yaml#
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/usb/parade,ps8830.yaml b/Documentation/devicetree/bindings/usb/parade,ps8830.yaml
index aeb33667818e..eaeab1c01a59 100644
--- a/Documentation/devicetree/bindings/usb/parade,ps8830.yaml
+++ b/Documentation/devicetree/bindings/usb/parade,ps8830.yaml
@@ -81,6 +81,7 @@ required:
allOf:
- $ref: usb-switch.yaml#
+ - $ref: usb-switch-ports.yaml#
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/usb/qcom,snps-dwc3.yaml b/Documentation/devicetree/bindings/usb/qcom,snps-dwc3.yaml
index dfd084ed9024..8cee7c5582f2 100644
--- a/Documentation/devicetree/bindings/usb/qcom,snps-dwc3.yaml
+++ b/Documentation/devicetree/bindings/usb/qcom,snps-dwc3.yaml
@@ -24,6 +24,8 @@ properties:
compatible:
items:
- enum:
+ - qcom,glymur-dwc3
+ - qcom,glymur-dwc3-mp
- qcom,ipq4019-dwc3
- qcom,ipq5018-dwc3
- qcom,ipq5332-dwc3
@@ -32,6 +34,7 @@ properties:
- qcom,ipq8064-dwc3
- qcom,ipq8074-dwc3
- qcom,ipq9574-dwc3
+ - qcom,kaanapali-dwc3
- qcom,milos-dwc3
- qcom,msm8953-dwc3
- qcom,msm8994-dwc3
@@ -67,7 +70,9 @@ properties:
- qcom,sm8450-dwc3
- qcom,sm8550-dwc3
- qcom,sm8650-dwc3
+ - qcom,sm8750-dwc3
- qcom,x1e80100-dwc3
+ - qcom,x1e80100-dwc3-mp
- const: qcom,snps-dwc3
reg:
@@ -199,6 +204,7 @@ allOf:
contains:
enum:
- qcom,ipq9574-dwc3
+ - qcom,kaanapali-dwc3
- qcom,msm8953-dwc3
- qcom,msm8996-dwc3
- qcom,msm8998-dwc3
@@ -212,6 +218,7 @@ allOf:
- qcom,sdx65-dwc3
- qcom,sdx75-dwc3
- qcom,sm6350-dwc3
+ - qcom,sm8750-dwc3
then:
properties:
clocks:
@@ -391,6 +398,28 @@ allOf:
compatible:
contains:
enum:
+ - qcom,glymur-dwc3
+ - qcom,glymur-dwc3-mp
+
+ then:
+ properties:
+ clocks:
+ maxItems: 7
+ clock-names:
+ items:
+ - const: cfg_noc
+ - const: core
+ - const: iface
+ - const: sleep
+ - const: mock_utmi
+ - const: noc_aggr_north
+ - const: noc_aggr_south
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
- qcom,ipq5018-dwc3
- qcom,ipq6018-dwc3
- qcom,ipq8074-dwc3
@@ -455,13 +484,16 @@ allOf:
compatible:
contains:
enum:
+ - qcom,glymur-dwc3
- qcom,milos-dwc3
- qcom,x1e80100-dwc3
then:
properties:
interrupts:
+ minItems: 4
maxItems: 5
interrupt-names:
+ minItems: 4
items:
- const: dwc_usb3
- const: pwr_event
@@ -476,6 +508,7 @@ allOf:
enum:
- qcom,ipq4019-dwc3
- qcom,ipq8064-dwc3
+ - qcom,kaanapali-dwc3
- qcom,msm8994-dwc3
- qcom,qcs615-dwc3
- qcom,qcs8300-dwc3
@@ -498,6 +531,7 @@ allOf:
- qcom,sm8450-dwc3
- qcom,sm8550-dwc3
- qcom,sm8650-dwc3
+ - qcom,sm8750-dwc3
then:
properties:
interrupts:
@@ -518,6 +552,7 @@ allOf:
compatible:
contains:
enum:
+ - qcom,glymur-dwc3-mp
- qcom,sc8180x-dwc3-mp
- qcom,x1e80100-dwc3-mp
then:
diff --git a/Documentation/devicetree/bindings/usb/qcom,wcd939x-usbss.yaml b/Documentation/devicetree/bindings/usb/qcom,wcd939x-usbss.yaml
index 96346723f3e9..96dcec9b7620 100644
--- a/Documentation/devicetree/bindings/usb/qcom,wcd939x-usbss.yaml
+++ b/Documentation/devicetree/bindings/usb/qcom,wcd939x-usbss.yaml
@@ -60,6 +60,7 @@ required:
allOf:
- $ref: usb-switch.yaml#
+ - $ref: usb-switch-ports.yaml#
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/usb/renesas,rzg3e-xhci.yaml b/Documentation/devicetree/bindings/usb/renesas,rzg3e-xhci.yaml
index 98260f9fb442..3f4b09e48ce0 100644
--- a/Documentation/devicetree/bindings/usb/renesas,rzg3e-xhci.yaml
+++ b/Documentation/devicetree/bindings/usb/renesas,rzg3e-xhci.yaml
@@ -4,14 +4,22 @@
$id: http://devicetree.org/schemas/usb/renesas,rzg3e-xhci.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: Renesas RZ/G3E USB 3.2 Gen2 Host controller
+title: Renesas USB 3.2 Gen2 Host controller
maintainers:
- Biju Das <biju.das.jz@bp.renesas.com>
properties:
compatible:
- const: renesas,r9a09g047-xhci
+ oneOf:
+ - items:
+ - enum:
+ - renesas,r9a09g056-xhci # RZ/V2N
+ - renesas,r9a09g057-xhci # RZ/V2H(P)
+ - const: renesas,r9a09g047-xhci
+
+ - items:
+ - const: renesas,r9a09g047-xhci # RZ/G3E
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/usb/samsung,exynos-dwc3.yaml b/Documentation/devicetree/bindings/usb/samsung,exynos-dwc3.yaml
index 6d39e5066944..8af0143c3e47 100644
--- a/Documentation/devicetree/bindings/usb/samsung,exynos-dwc3.yaml
+++ b/Documentation/devicetree/bindings/usb/samsung,exynos-dwc3.yaml
@@ -22,6 +22,9 @@ properties:
- samsung,exynos850-dwusb3
- samsung,exynosautov920-dwusb3
- items:
+ - const: samsung,exynos8890-dwusb3
+ - const: samsung,exynos7-dwusb3
+ - items:
- const: samsung,exynos990-dwusb3
- const: samsung,exynos850-dwusb3
@@ -36,6 +39,9 @@ properties:
minItems: 1
maxItems: 4
+ power-domains:
+ maxItems: 1
+
ranges: true
'#size-cells':
diff --git a/Documentation/devicetree/bindings/usb/ti,hd3ss3220.yaml b/Documentation/devicetree/bindings/usb/ti,hd3ss3220.yaml
index bec1c8047bc0..06099e93c6c3 100644
--- a/Documentation/devicetree/bindings/usb/ti,hd3ss3220.yaml
+++ b/Documentation/devicetree/bindings/usb/ti,hd3ss3220.yaml
@@ -25,6 +25,14 @@ properties:
interrupts:
maxItems: 1
+ id-gpios:
+ description:
+ An input gpio for USB ID pin. Upon detecting a UFP device, HD3SS3220
+ will keep ID pin high if VBUS is not at VSafe0V. Once VBUS is at VSafe0V,
+ the HD3SS3220 will assert ID pin low. This is done to enforce Type-C
+ requirement that VBUS must be at VSafe0V before re-enabling VBUS.
+ maxItems: 1
+
ports:
$ref: /schemas/graph.yaml#/properties/ports
description: OF graph bindings (specified in bindings/graph.txt) that model
diff --git a/Documentation/devicetree/bindings/usb/ti,tusb1046.yaml b/Documentation/devicetree/bindings/usb/ti,tusb1046.yaml
index f713cac4a8ac..e1501ea6b50b 100644
--- a/Documentation/devicetree/bindings/usb/ti,tusb1046.yaml
+++ b/Documentation/devicetree/bindings/usb/ti,tusb1046.yaml
@@ -11,6 +11,7 @@ maintainers:
allOf:
- $ref: usb-switch.yaml#
+ - $ref: usb-switch-ports.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/usb/usb-switch-ports.yaml b/Documentation/devicetree/bindings/usb/usb-switch-ports.yaml
new file mode 100644
index 000000000000..6bf0c97e30ae
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/usb-switch-ports.yaml
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/usb-switch-ports.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: USB Orientation and Mode Switches Ports Graph Properties
+
+maintainers:
+ - Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+description:
+ Ports Graph properties for devices handling USB mode and orientation switching.
+
+properties:
+ port:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ description:
+ A port node to link the device to a TypeC controller for the purpose of
+ handling altmode muxing and orientation switching.
+
+ properties:
+ endpoint:
+ $ref: /schemas/graph.yaml#/$defs/endpoint-base
+ unevaluatedProperties: false
+ properties:
+ data-lanes:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 8
+ uniqueItems: true
+ items:
+ maximum: 8
+
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+ properties:
+ port@0:
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ Super Speed (SS) Output endpoint to the Type-C connector
+
+ port@1:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ description:
+ Super Speed (SS) Input endpoint from the Super-Speed PHY
+ unevaluatedProperties: false
+
+ properties:
+ endpoint:
+ $ref: /schemas/graph.yaml#/$defs/endpoint-base
+ unevaluatedProperties: false
+ properties:
+ data-lanes:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 8
+ uniqueItems: true
+ items:
+ maximum: 8
+
+oneOf:
+ - required:
+ - port
+ - required:
+ - ports
+
+additionalProperties: true
diff --git a/Documentation/devicetree/bindings/usb/usb-switch.yaml b/Documentation/devicetree/bindings/usb/usb-switch.yaml
index 896201912630..f77731493dc4 100644
--- a/Documentation/devicetree/bindings/usb/usb-switch.yaml
+++ b/Documentation/devicetree/bindings/usb/usb-switch.yaml
@@ -25,56 +25,4 @@ properties:
description: Possible handler of SuperSpeed signals retiming
type: boolean
- port:
- $ref: /schemas/graph.yaml#/$defs/port-base
- description:
- A port node to link the device to a TypeC controller for the purpose of
- handling altmode muxing and orientation switching.
-
- properties:
- endpoint:
- $ref: /schemas/graph.yaml#/$defs/endpoint-base
- unevaluatedProperties: false
- properties:
- data-lanes:
- $ref: /schemas/types.yaml#/definitions/uint32-array
- minItems: 1
- maxItems: 8
- uniqueItems: true
- items:
- maximum: 8
-
- ports:
- $ref: /schemas/graph.yaml#/properties/ports
- properties:
- port@0:
- $ref: /schemas/graph.yaml#/properties/port
- description:
- Super Speed (SS) Output endpoint to the Type-C connector
-
- port@1:
- $ref: /schemas/graph.yaml#/$defs/port-base
- description:
- Super Speed (SS) Input endpoint from the Super-Speed PHY
- unevaluatedProperties: false
-
- properties:
- endpoint:
- $ref: /schemas/graph.yaml#/$defs/endpoint-base
- unevaluatedProperties: false
- properties:
- data-lanes:
- $ref: /schemas/types.yaml#/definitions/uint32-array
- minItems: 1
- maxItems: 8
- uniqueItems: true
- items:
- maximum: 8
-
-oneOf:
- - required:
- - port
- - required:
- - ports
-
additionalProperties: true
diff --git a/Documentation/devicetree/bindings/usb/usb-uhci.yaml b/Documentation/devicetree/bindings/usb/usb-uhci.yaml
index d8336f72dc1f..e050ca203945 100644
--- a/Documentation/devicetree/bindings/usb/usb-uhci.yaml
+++ b/Documentation/devicetree/bindings/usb/usb-uhci.yaml
@@ -20,6 +20,7 @@ properties:
- aspeed,ast2400-uhci
- aspeed,ast2500-uhci
- aspeed,ast2600-uhci
+ - aspeed,ast2700-uhci
- const: generic-uhci
reg:
@@ -28,6 +29,9 @@ properties:
interrupts:
maxItems: 1
+ resets:
+ maxItems: 1
+
'#ports':
$ref: /schemas/types.yaml#/definitions/uint32
@@ -50,6 +54,15 @@ allOf:
required:
- clocks
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: aspeed,ast2700-uhci
+ then:
+ required:
+ - resets
+
unevaluatedProperties: false
examples:
diff --git a/Documentation/networking/ax25.rst b/Documentation/networking/ax25.rst
index 605e72c6c877..89c79dd6c6f9 100644
--- a/Documentation/networking/ax25.rst
+++ b/Documentation/networking/ax25.rst
@@ -11,6 +11,7 @@ found on https://linux-ax25.in-berlin.de.
There is a mailing list for discussing Linux amateur radio matters
called linux-hams@vger.kernel.org. To subscribe to it, send a message to
-majordomo@vger.kernel.org with the words "subscribe linux-hams" in the body
-of the message, the subject field is ignored. You don't need to be
-subscribed to post but of course that means you might miss an answer.
+linux-hams+subscribe@vger.kernel.org or use the web interface at
+https://vger.kernel.org. The subject and body of the message are
+ignored. You don't need to be subscribed to post but of course that
+means you might miss an answer.
diff --git a/Documentation/networking/device_drivers/cellular/qualcomm/rmnet.rst b/Documentation/networking/device_drivers/cellular/qualcomm/rmnet.rst
index 289c146a8291..6877a3260582 100644
--- a/Documentation/networking/device_drivers/cellular/qualcomm/rmnet.rst
+++ b/Documentation/networking/device_drivers/cellular/qualcomm/rmnet.rst
@@ -137,16 +137,20 @@ d. Checksum offload header v5
Checksum offload header fields are in big endian format.
+Packet format::
+
Bit 0 - 6 7 8-15 16-31
Function Header Type Next Header Checksum Valid Reserved
Header Type is to indicate the type of header, this usually is set to CHECKSUM
Header types
-= ==========================================
+
+= ===============
0 Reserved
1 Reserved
2 checksum header
+= ===============
Checksum Valid is to indicate whether the header checksum is valid. Value of 1
implies that checksum is calculated on this packet and is valid, value of 0
@@ -183,9 +187,11 @@ rmnet in a single linear skb. rmnet will process the individual
packets and either ACK the MAP command or deliver the IP packet to the
network stack as needed
-MAP header|IP Packet|Optional padding|MAP header|IP Packet|Optional padding....
+Packet format::
+
+ MAP header|IP Packet|Optional padding|MAP header|IP Packet|Optional padding....
-MAP header|IP Packet|Optional padding|MAP header|Command Packet|Optional pad...
+ MAP header|IP Packet|Optional padding|MAP header|Command Packet|Optional pad...
3. Userspace configuration
==========================
diff --git a/Documentation/networking/net_failover.rst b/Documentation/networking/net_failover.rst
index f4e1b4e07adc..2f776e90d318 100644
--- a/Documentation/networking/net_failover.rst
+++ b/Documentation/networking/net_failover.rst
@@ -96,9 +96,8 @@ needed to these network configuration daemons to make sure that an IP is
received only on the 'failover' device.
Below is the patch snippet used with 'cloud-ifupdown-helper' script found on
-Debian cloud images:
+Debian cloud images::
-::
@@ -27,6 +27,8 @@ do_setup() {
local working="$cfgdir/.$INTERFACE"
local final="$cfgdir/$INTERFACE"
@@ -172,9 +171,8 @@ appropriate FDB entry is added.
The following script is executed on the destination hypervisor once migration
completes, and it reattaches the VF to the VM and brings down the virtio-net
-interface.
+interface::
-::
# reattach-vf.sh
#!/bin/bash
diff --git a/MAINTAINERS b/MAINTAINERS
index 545a4776795e..9d8aa101f689 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1997,6 +1997,10 @@ F: include/uapi/linux/if_arcnet.h
ARM AND ARM64 SoC SUB-ARCHITECTURES (COMMON PARTS)
M: Arnd Bergmann <arnd@arndb.de>
+M: Krzysztof Kozlowski <krzk@kernel.org>
+M: Alexandre Belloni <alexandre.belloni@bootlin.com>
+M: Linus Walleij <linus.walleij@linaro.org>
+R: Drew Fustini <fustini@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: soc@lists.linux.dev
S: Maintained
@@ -2437,6 +2441,7 @@ F: Documentation/devicetree/bindings/power/reset/apple,smc-reboot.yaml
F: Documentation/devicetree/bindings/pwm/apple,s5l-fpwm.yaml
F: Documentation/devicetree/bindings/spi/apple,spi.yaml
F: Documentation/devicetree/bindings/spmi/apple,spmi.yaml
+F: Documentation/devicetree/bindings/usb/apple,dwc3.yaml
F: Documentation/devicetree/bindings/watchdog/apple,wdt.yaml
F: arch/arm64/boot/dts/apple/
F: drivers/bluetooth/hci_bcm4377.c
@@ -2461,6 +2466,7 @@ F: drivers/pwm/pwm-apple.c
F: drivers/soc/apple/*
F: drivers/spi/spi-apple.c
F: drivers/spmi/spmi-apple-controller.c
+F: drivers/usb/dwc3/dwc3-apple.c
F: drivers/video/backlight/apple_dwi_bl.c
F: drivers/watchdog/apple_wdt.c
F: include/dt-bindings/interrupt-controller/apple-aic.h
@@ -3841,6 +3847,7 @@ F: drivers/hwmon/asus-ec-sensors.c
ASUS NOTEBOOKS AND EEEPC ACPI/WMI EXTRAS DRIVERS
M: Corentin Chary <corentin.chary@gmail.com>
M: Luke D. Jones <luke@ljones.dev>
+M: Denis Benato <benato.denis96@gmail.com>
L: platform-driver-x86@vger.kernel.org
S: Maintained
W: https://asus-linux.org/
@@ -13111,6 +13118,15 @@ F: include/uapi/linux/io_uring.h
F: include/uapi/linux/io_uring/
F: io_uring/
+IO_URING ZCRX
+M: Pavel Begunkov <asml.silence@gmail.com>
+L: io-uring@vger.kernel.org
+L: netdev@vger.kernel.org
+T: git https://github.com/isilence/linux.git zcrx/for-next
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux.git
+S: Maintained
+F: io_uring/zcrx.*
+
IPMI SUBSYSTEM
M: Corey Minyard <corey@minyard.net>
L: openipmi-developer@lists.sourceforge.net (moderated for non-subscribers)
@@ -14394,6 +14410,7 @@ F: tools/memory-model/
LINUX-NEXT TREE
M: Stephen Rothwell <sfr@canb.auug.org.au>
+M: Mark Brown <broonie@kernel.org>
L: linux-next@vger.kernel.org
S: Supported
B: mailto:linux-next@vger.kernel.org and the appropriate development tree
@@ -26885,7 +26902,7 @@ S: Maintained
F: drivers/vfio/cdx/*
VFIO DRIVER
-M: Alex Williamson <alex.williamson@redhat.com>
+M: Alex Williamson <alex@shazbot.org>
L: kvm@vger.kernel.org
S: Maintained
T: git https://github.com/awilliam/linux-vfio.git
@@ -27048,7 +27065,7 @@ T: git git://linuxtv.org/media.git
F: drivers/media/test-drivers/vimc/*
VIRT LIB
-M: Alex Williamson <alex.williamson@redhat.com>
+M: Alex Williamson <alex@shazbot.org>
M: Paolo Bonzini <pbonzini@redhat.com>
L: kvm@vger.kernel.org
S: Supported
diff --git a/Makefile b/Makefile
index d14824792227..b34a1f4c0396 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 18
SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc3
NAME = Baby Opossum Posse
# *DOCUMENTATION*
diff --git a/arch/arm/boot/dts/broadcom/bcm2711-rpi.dtsi b/arch/arm/boot/dts/broadcom/bcm2711-rpi.dtsi
index c78ed064d166..1eb6406449d1 100644
--- a/arch/arm/boot/dts/broadcom/bcm2711-rpi.dtsi
+++ b/arch/arm/boot/dts/broadcom/bcm2711-rpi.dtsi
@@ -77,6 +77,14 @@
/delete-property/ pinctrl-0;
};
+&pm {
+ clocks = <&firmware_clocks 5>,
+ <&clocks BCM2835_CLOCK_PERI_IMAGE>,
+ <&clocks BCM2835_CLOCK_H264>,
+ <&clocks BCM2835_CLOCK_ISP>;
+ clock-names = "v3d", "peri_image", "h264", "isp";
+};
+
&rmem {
/*
* RPi4's co-processor will copy the board's bootloader configuration
diff --git a/arch/arm/boot/dts/broadcom/bcm2835-rpi-common.dtsi b/arch/arm/boot/dts/broadcom/bcm2835-rpi-common.dtsi
index 8b3c21d9f333..fa9d784c88b6 100644
--- a/arch/arm/boot/dts/broadcom/bcm2835-rpi-common.dtsi
+++ b/arch/arm/boot/dts/broadcom/bcm2835-rpi-common.dtsi
@@ -13,7 +13,16 @@
clock-names = "pixel", "hdmi";
};
+&pm {
+ clocks = <&firmware_clocks 5>,
+ <&clocks BCM2835_CLOCK_PERI_IMAGE>,
+ <&clocks BCM2835_CLOCK_H264>,
+ <&clocks BCM2835_CLOCK_ISP>;
+ clock-names = "v3d", "peri_image", "h264", "isp";
+};
+
&v3d {
+ clocks = <&firmware_clocks 5>;
power-domains = <&power RPI_POWER_DOMAIN_V3D>;
};
diff --git a/arch/arm64/boot/dts/broadcom/bcm2712.dtsi b/arch/arm64/boot/dts/broadcom/bcm2712.dtsi
index e77a66adc22a..205b87f557d6 100644
--- a/arch/arm64/boot/dts/broadcom/bcm2712.dtsi
+++ b/arch/arm64/boot/dts/broadcom/bcm2712.dtsi
@@ -326,6 +326,8 @@
<0x7fffe000 0x2000>;
interrupt-controller;
#address-cells = <0>;
+ interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) |
+ IRQ_TYPE_LEVEL_HIGH)>;
#interrupt-cells = <3>;
};
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index aa89c2e67ebc..0944e296dd4a 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -293,7 +293,8 @@ static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
static inline pte_t pte_mkwrite_novma(pte_t pte)
{
pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
- pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
+ if (pte_sw_dirty(pte))
+ pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
return pte;
}
diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c
index a86c897017df..cd5912ba617b 100644
--- a/arch/arm64/mm/copypage.c
+++ b/arch/arm64/mm/copypage.c
@@ -35,7 +35,7 @@ void copy_highpage(struct page *to, struct page *from)
from != folio_page(src, 0))
return;
- WARN_ON_ONCE(!folio_try_hugetlb_mte_tagging(dst));
+ folio_try_hugetlb_mte_tagging(dst);
/*
* Populate tags for all subpages.
@@ -51,8 +51,13 @@ void copy_highpage(struct page *to, struct page *from)
}
folio_set_hugetlb_mte_tagged(dst);
} else if (page_mte_tagged(from)) {
- /* It's a new page, shouldn't have been tagged yet */
- WARN_ON_ONCE(!try_page_mte_tagging(to));
+ /*
+ * Most of the time it's a new page that shouldn't have been
+ * tagged yet. However, folio migration can end up reusing the
+ * same page without untagging it. Ignore the warning if the
+ * page is already tagged.
+ */
+ try_page_mte_tagging(to);
mte_copy_page_tags(kto, kfrom);
set_page_mte_tagged(to);
diff --git a/arch/csky/abiv2/cacheflush.c b/arch/csky/abiv2/cacheflush.c
index 876028b1083f..064b0f0f95ca 100644
--- a/arch/csky/abiv2/cacheflush.c
+++ b/arch/csky/abiv2/cacheflush.c
@@ -21,7 +21,7 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
folio = page_folio(pfn_to_page(pfn));
- if (test_and_set_bit(PG_dcache_clean, &folio->flags))
+ if (test_and_set_bit(PG_dcache_clean, &folio->flags.f))
return;
icache_inv_range(address, address + nr*PAGE_SIZE);
diff --git a/arch/csky/abiv2/inc/abi/cacheflush.h b/arch/csky/abiv2/inc/abi/cacheflush.h
index 6513ac5d2578..da51a0f02391 100644
--- a/arch/csky/abiv2/inc/abi/cacheflush.h
+++ b/arch/csky/abiv2/inc/abi/cacheflush.h
@@ -20,8 +20,8 @@
static inline void flush_dcache_folio(struct folio *folio)
{
- if (test_bit(PG_dcache_clean, &folio->flags))
- clear_bit(PG_dcache_clean, &folio->flags);
+ if (test_bit(PG_dcache_clean, &folio->flags.f))
+ clear_bit(PG_dcache_clean, &folio->flags.f);
}
#define flush_dcache_folio flush_dcache_folio
diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c
index 3a2836e9d856..816570514c37 100644
--- a/arch/mips/mti-malta/malta-setup.c
+++ b/arch/mips/mti-malta/malta-setup.c
@@ -47,7 +47,7 @@ static struct resource standard_io_resources[] = {
.name = "keyboard",
.start = 0x60,
.end = 0x6f,
- .flags = IORESOURCE_IO | IORESOURCE_BUSY
+ .flags = IORESOURCE_IO
},
{
.name = "dma page reg",
@@ -213,7 +213,7 @@ void __init plat_mem_setup(void)
/* Request I/O space for devices used on the Malta board. */
for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
- request_resource(&ioport_resource, standard_io_resources+i);
+ insert_resource(&ioport_resource, standard_io_resources + i);
/*
* Enable DMA channel 4 (cascade channel) in the PIIX4 south bridge.
diff --git a/arch/mips/pci/pci-malta.c b/arch/mips/pci/pci-malta.c
index 6aefdf20ca05..2e35aeba45bc 100644
--- a/arch/mips/pci/pci-malta.c
+++ b/arch/mips/pci/pci-malta.c
@@ -230,8 +230,7 @@ void __init mips_pcibios_init(void)
}
/* PIIX4 ACPI starts at 0x1000 */
- if (controller->io_resource->start < 0x00001000UL)
- controller->io_resource->start = 0x00001000UL;
+ PCIBIOS_MIN_IO = 0x1000;
iomem_resource.end &= 0xfffffffffULL; /* 64 GB */
ioport_resource.end = controller->io_resource->end;
diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h
index 8bd2a11382a3..ac28066bb564 100644
--- a/arch/riscv/include/asm/asm.h
+++ b/arch/riscv/include/asm/asm.h
@@ -84,15 +84,9 @@
.endm
#ifdef CONFIG_SMP
-#ifdef CONFIG_32BIT
-#define PER_CPU_OFFSET_SHIFT 2
-#else
-#define PER_CPU_OFFSET_SHIFT 3
-#endif
-
.macro asm_per_cpu dst sym tmp
lw \tmp, TASK_TI_CPU_NUM(tp)
- slli \tmp, \tmp, PER_CPU_OFFSET_SHIFT
+ slli \tmp, \tmp, RISCV_LGPTR
la \dst, __per_cpu_offset
add \dst, \dst, \tmp
REG_L \tmp, 0(\dst)
diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h
index fbd0e4306c93..62837fa981e8 100644
--- a/arch/riscv/include/asm/cpufeature.h
+++ b/arch/riscv/include/asm/cpufeature.h
@@ -31,6 +31,8 @@ struct riscv_isainfo {
DECLARE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo);
+extern const struct seq_operations cpuinfo_op;
+
/* Per-cpu ISA extensions. */
extern struct riscv_isainfo hart_isa[NR_CPUS];
diff --git a/arch/riscv/include/asm/hwprobe.h b/arch/riscv/include/asm/hwprobe.h
index 948d2b34e94e..58f8dda73259 100644
--- a/arch/riscv/include/asm/hwprobe.h
+++ b/arch/riscv/include/asm/hwprobe.h
@@ -42,4 +42,11 @@ static inline bool riscv_hwprobe_pair_cmp(struct riscv_hwprobe *pair,
return pair->value == other_pair->value;
}
+#ifdef CONFIG_MMU
+void riscv_hwprobe_register_async_probe(void);
+void riscv_hwprobe_complete_async_probe(void);
+#else
+static inline void riscv_hwprobe_register_async_probe(void) {}
+static inline void riscv_hwprobe_complete_async_probe(void) {}
+#endif
#endif
diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h
index 1018d2216901..6e789fa58514 100644
--- a/arch/riscv/include/asm/pgtable-64.h
+++ b/arch/riscv/include/asm/pgtable-64.h
@@ -69,6 +69,8 @@ typedef struct {
#define PTRS_PER_PMD (PAGE_SIZE / sizeof(pmd_t))
+#define MAX_POSSIBLE_PHYSMEM_BITS 56
+
/*
* rv64 PTE format:
* | 63 | 62 61 | 60 54 | 53 10 | 9 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 29e994a9afb6..5a08eb5fe99f 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -654,6 +654,8 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
return __pgprot(prot);
}
+#define pgprot_dmacoherent pgprot_writecombine
+
/*
* Both Svade and Svadu control the hardware behavior when the PTE A/D bits need to be set. By
* default the M-mode firmware enables the hardware updating scheme when only Svadu is present in
diff --git a/arch/riscv/include/asm/vdso/arch_data.h b/arch/riscv/include/asm/vdso/arch_data.h
index da57a3786f7a..88b37af55175 100644
--- a/arch/riscv/include/asm/vdso/arch_data.h
+++ b/arch/riscv/include/asm/vdso/arch_data.h
@@ -12,6 +12,12 @@ struct vdso_arch_data {
/* Boolean indicating all CPUs have the same static hwprobe values. */
__u8 homogeneous_cpus;
+
+ /*
+ * A gate to check and see if the hwprobe data is actually ready, as
+ * probing is deferred to avoid boot slowdowns.
+ */
+ __u8 ready;
};
#endif /* __RISCV_ASM_VDSO_ARCH_DATA_H */
diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c
index f6b13e9f5e6c..3dbc8cc557dd 100644
--- a/arch/riscv/kernel/cpu.c
+++ b/arch/riscv/kernel/cpu.c
@@ -62,10 +62,8 @@ int __init riscv_early_of_processor_hartid(struct device_node *node, unsigned lo
return -ENODEV;
}
- if (!of_device_is_available(node)) {
- pr_info("CPU with hartid=%lu is not available\n", *hart);
+ if (!of_device_is_available(node))
return -ENODEV;
- }
if (of_property_read_string(node, "riscv,isa-base", &isa))
goto old_interface;
diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
index 67b59699357d..72ca768f4e91 100644
--- a/arch/riscv/kernel/cpufeature.c
+++ b/arch/riscv/kernel/cpufeature.c
@@ -932,9 +932,9 @@ static int has_thead_homogeneous_vlenb(void)
{
int cpu;
u32 prev_vlenb = 0;
- u32 vlenb;
+ u32 vlenb = 0;
- /* Ignore thead,vlenb property if xtheavector is not enabled in the kernel */
+ /* Ignore thead,vlenb property if xtheadvector is not enabled in the kernel */
if (!IS_ENABLED(CONFIG_RISCV_ISA_XTHEADVECTOR))
return 0;
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index e650dec44817..5ed5095320e6 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -40,6 +40,17 @@ enum ipi_message_type {
IPI_MAX
};
+static const char * const ipi_names[] = {
+ [IPI_RESCHEDULE] = "Rescheduling interrupts",
+ [IPI_CALL_FUNC] = "Function call interrupts",
+ [IPI_CPU_STOP] = "CPU stop interrupts",
+ [IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts",
+ [IPI_IRQ_WORK] = "IRQ work interrupts",
+ [IPI_TIMER] = "Timer broadcast interrupts",
+ [IPI_CPU_BACKTRACE] = "CPU backtrace interrupts",
+ [IPI_KGDB_ROUNDUP] = "KGDB roundup interrupts",
+};
+
unsigned long __cpuid_to_hartid_map[NR_CPUS] __ro_after_init = {
[0 ... NR_CPUS-1] = INVALID_HARTID
};
@@ -199,7 +210,7 @@ void riscv_ipi_set_virq_range(int virq, int nr)
/* Request IPIs */
for (i = 0; i < nr_ipi; i++) {
err = request_percpu_irq(ipi_virq_base + i, handle_IPI,
- "IPI", &ipi_dummy_dev);
+ ipi_names[i], &ipi_dummy_dev);
WARN_ON(err);
ipi_desc[i] = irq_to_desc(ipi_virq_base + i);
@@ -210,17 +221,6 @@ void riscv_ipi_set_virq_range(int virq, int nr)
riscv_ipi_enable();
}
-static const char * const ipi_names[] = {
- [IPI_RESCHEDULE] = "Rescheduling interrupts",
- [IPI_CALL_FUNC] = "Function call interrupts",
- [IPI_CPU_STOP] = "CPU stop interrupts",
- [IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts",
- [IPI_IRQ_WORK] = "IRQ work interrupts",
- [IPI_TIMER] = "Timer broadcast interrupts",
- [IPI_CPU_BACKTRACE] = "CPU backtrace interrupts",
- [IPI_KGDB_ROUNDUP] = "KGDB roundup interrupts",
-};
-
void show_ipi_stats(struct seq_file *p, int prec)
{
unsigned int cpu, i;
diff --git a/arch/riscv/kernel/sys_hwprobe.c b/arch/riscv/kernel/sys_hwprobe.c
index 000f4451a9d8..199d13f86f31 100644
--- a/arch/riscv/kernel/sys_hwprobe.c
+++ b/arch/riscv/kernel/sys_hwprobe.c
@@ -5,6 +5,9 @@
* more details.
*/
#include <linux/syscalls.h>
+#include <linux/completion.h>
+#include <linux/atomic.h>
+#include <linux/once.h>
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/hwprobe.h>
@@ -28,6 +31,11 @@ static void hwprobe_arch_id(struct riscv_hwprobe *pair,
bool first = true;
int cpu;
+ if (pair->key != RISCV_HWPROBE_KEY_MVENDORID &&
+ pair->key != RISCV_HWPROBE_KEY_MIMPID &&
+ pair->key != RISCV_HWPROBE_KEY_MARCHID)
+ goto out;
+
for_each_cpu(cpu, cpus) {
u64 cpu_id;
@@ -58,6 +66,7 @@ static void hwprobe_arch_id(struct riscv_hwprobe *pair,
}
}
+out:
pair->value = id;
}
@@ -454,28 +463,32 @@ static int hwprobe_get_cpus(struct riscv_hwprobe __user *pairs,
return 0;
}
-static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs,
- size_t pair_count, size_t cpusetsize,
- unsigned long __user *cpus_user,
- unsigned int flags)
-{
- if (flags & RISCV_HWPROBE_WHICH_CPUS)
- return hwprobe_get_cpus(pairs, pair_count, cpusetsize,
- cpus_user, flags);
+#ifdef CONFIG_MMU
- return hwprobe_get_values(pairs, pair_count, cpusetsize,
- cpus_user, flags);
+static DECLARE_COMPLETION(boot_probes_done);
+static atomic_t pending_boot_probes = ATOMIC_INIT(1);
+
+void riscv_hwprobe_register_async_probe(void)
+{
+ atomic_inc(&pending_boot_probes);
}
-#ifdef CONFIG_MMU
+void riscv_hwprobe_complete_async_probe(void)
+{
+ if (atomic_dec_and_test(&pending_boot_probes))
+ complete(&boot_probes_done);
+}
-static int __init init_hwprobe_vdso_data(void)
+static int complete_hwprobe_vdso_data(void)
{
struct vdso_arch_data *avd = vdso_k_arch_data;
u64 id_bitsmash = 0;
struct riscv_hwprobe pair;
int key;
+ if (unlikely(!atomic_dec_and_test(&pending_boot_probes)))
+ wait_for_completion(&boot_probes_done);
+
/*
* Initialize vDSO data with the answers for the "all CPUs" case, to
* save a syscall in the common case.
@@ -503,13 +516,52 @@ static int __init init_hwprobe_vdso_data(void)
* vDSO should defer to the kernel for exotic cpu masks.
*/
avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1;
+
+ /*
+ * Make sure all the VDSO values are visible before we look at them.
+ * This pairs with the implicit "no speculativly visible accesses"
+ * barrier in the VDSO hwprobe code.
+ */
+ smp_wmb();
+ avd->ready = true;
+ return 0;
+}
+
+static int __init init_hwprobe_vdso_data(void)
+{
+ struct vdso_arch_data *avd = vdso_k_arch_data;
+
+ /*
+ * Prevent the vDSO cached values from being used, as they're not ready
+ * yet.
+ */
+ avd->ready = false;
return 0;
}
arch_initcall_sync(init_hwprobe_vdso_data);
+#else
+
+static int complete_hwprobe_vdso_data(void) { return 0; }
+
#endif /* CONFIG_MMU */
+static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs,
+ size_t pair_count, size_t cpusetsize,
+ unsigned long __user *cpus_user,
+ unsigned int flags)
+{
+ DO_ONCE_SLEEPABLE(complete_hwprobe_vdso_data);
+
+ if (flags & RISCV_HWPROBE_WHICH_CPUS)
+ return hwprobe_get_cpus(pairs, pair_count, cpusetsize,
+ cpus_user, flags);
+
+ return hwprobe_get_values(pairs, pair_count, cpusetsize,
+ cpus_user, flags);
+}
+
SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs,
size_t, pair_count, size_t, cpusetsize, unsigned long __user *,
cpus, unsigned int, flags)
diff --git a/arch/riscv/kernel/unaligned_access_speed.c b/arch/riscv/kernel/unaligned_access_speed.c
index ae2068425fbc..70b5e6927620 100644
--- a/arch/riscv/kernel/unaligned_access_speed.c
+++ b/arch/riscv/kernel/unaligned_access_speed.c
@@ -379,6 +379,7 @@ free:
static int __init vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused)
{
schedule_on_each_cpu(check_vector_unaligned_access);
+ riscv_hwprobe_complete_async_probe();
return 0;
}
@@ -473,8 +474,12 @@ static int __init check_unaligned_access_all_cpus(void)
per_cpu(vector_misaligned_access, cpu) = unaligned_vector_speed_param;
} else if (!check_vector_unaligned_access_emulated_all_cpus() &&
IS_ENABLED(CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS)) {
- kthread_run(vec_check_unaligned_access_speed_all_cpus,
- NULL, "vec_check_unaligned_access_speed_all_cpus");
+ riscv_hwprobe_register_async_probe();
+ if (IS_ERR(kthread_run(vec_check_unaligned_access_speed_all_cpus,
+ NULL, "vec_check_unaligned_access_speed_all_cpus"))) {
+ pr_warn("Failed to create vec_unalign_check kthread\n");
+ riscv_hwprobe_complete_async_probe();
+ }
}
/*
diff --git a/arch/riscv/kernel/vdso/hwprobe.c b/arch/riscv/kernel/vdso/hwprobe.c
index 2ddeba6c68dd..8f45500d0a6e 100644
--- a/arch/riscv/kernel/vdso/hwprobe.c
+++ b/arch/riscv/kernel/vdso/hwprobe.c
@@ -27,7 +27,7 @@ static int riscv_vdso_get_values(struct riscv_hwprobe *pairs, size_t pair_count,
* homogeneous, then this function can handle requests for arbitrary
* masks.
*/
- if ((flags != 0) || (!all_cpus && !avd->homogeneous_cpus))
+ if (flags != 0 || (!all_cpus && !avd->homogeneous_cpus) || unlikely(!avd->ready))
return riscv_hwprobe(pairs, pair_count, cpusetsize, cpus, flags);
/* This is something we can handle, fill out the pairs. */
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 6a526ae1fe99..d7fa03bf51b4 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -1463,7 +1463,9 @@ static void __init retbleed_update_mitigation(void)
break;
default:
if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF) {
- pr_err(RETBLEED_INTEL_MSG);
+ if (retbleed_mitigation != RETBLEED_MITIGATION_NONE)
+ pr_err(RETBLEED_INTEL_MSG);
+
retbleed_mitigation = RETBLEED_MITIGATION_NONE;
}
}
@@ -1825,13 +1827,6 @@ void unpriv_ebpf_notify(int new_state)
}
#endif
-static inline bool match_option(const char *arg, int arglen, const char *opt)
-{
- int len = strlen(opt);
-
- return len == arglen && !strncmp(arg, opt, len);
-}
-
/* The kernel command line selection for spectre v2 */
enum spectre_v2_mitigation_cmd {
SPECTRE_V2_CMD_NONE,
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index cdce885e2fd5..28ed8c089024 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -194,7 +194,7 @@ static bool need_sha_check(u32 cur_rev)
}
switch (cur_rev >> 8) {
- case 0x80012: return cur_rev <= 0x800126f; break;
+ case 0x80012: return cur_rev <= 0x8001277; break;
case 0x80082: return cur_rev <= 0x800820f; break;
case 0x83010: return cur_rev <= 0x830107c; break;
case 0x86001: return cur_rev <= 0x860010e; break;
diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
index 2cd25a0d4637..fe1a2aa53c16 100644
--- a/arch/x86/kernel/cpu/resctrl/monitor.c
+++ b/arch/x86/kernel/cpu/resctrl/monitor.c
@@ -458,7 +458,16 @@ int __init rdt_get_mon_l3_config(struct rdt_resource *r)
r->mon.mbm_cfg_mask = ecx & MAX_EVT_CONFIG_BITS;
}
- if (rdt_cpu_has(X86_FEATURE_ABMC)) {
+ /*
+ * resctrl assumes a system that supports assignable counters can
+ * switch to "default" mode. Ensure that there is a "default" mode
+ * to switch to. This enforces a dependency between the independent
+ * X86_FEATURE_ABMC and X86_FEATURE_CQM_MBM_TOTAL/X86_FEATURE_CQM_MBM_LOCAL
+ * hardware features.
+ */
+ if (rdt_cpu_has(X86_FEATURE_ABMC) &&
+ (rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL) ||
+ rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL))) {
r->mon.mbm_cntr_assignable = true;
cpuid_count(0x80000020, 5, &eax, &ebx, &ecx, &edx);
r->mon.num_mbm_cntrs = (ebx & GENMASK(15, 0)) + 1;
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 54cffaae4df4..d74b13ec8e54 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -184,6 +184,16 @@ static int blk_validate_integrity_limits(struct queue_limits *lim)
if (!bi->interval_exp)
bi->interval_exp = ilog2(lim->logical_block_size);
+ /*
+ * The PI generation / validation helpers do not expect intervals to
+ * straddle multiple bio_vecs. Enforce alignment so that those are
+ * never generated, and that each buffer is aligned as expected.
+ */
+ if (bi->csum_type) {
+ lim->dma_alignment = max(lim->dma_alignment,
+ (1U << bi->interval_exp) - 1);
+ }
+
return 0;
}
diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
index 049f6c2f1e32..e5631027f7f1 100644
--- a/drivers/acpi/acpica/tbprint.c
+++ b/drivers/acpi/acpica/tbprint.c
@@ -95,6 +95,11 @@ acpi_tb_print_table_header(acpi_physical_address address,
{
struct acpi_table_header local_header;
+#pragma GCC diagnostic push
+#if defined(__GNUC__) && __GNUC__ >= 11
+#pragma GCC diagnostic ignored "-Wstringop-overread"
+#endif
+
if (ACPI_COMPARE_NAMESEG(header->signature, ACPI_SIG_FACS)) {
/* FACS only has signature and length fields */
@@ -143,4 +148,5 @@ acpi_tb_print_table_header(acpi_physical_address address,
local_header.asl_compiler_id,
local_header.asl_compiler_revision));
}
+#pragma GCC diagnostic pop
}
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 1b997a5497e7..43d5e457814e 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -1107,7 +1107,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
size_t num_args,
struct fwnode_reference_args *args)
{
- return acpi_fwnode_get_reference_args(fwnode, propname, NULL, index, num_args, args);
+ return acpi_fwnode_get_reference_args(fwnode, propname, NULL, num_args, index, args);
}
EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference);
diff --git a/drivers/acpi/riscv/rimt.c b/drivers/acpi/riscv/rimt.c
index 683fcfe35c31..7f423405e5ef 100644
--- a/drivers/acpi/riscv/rimt.c
+++ b/drivers/acpi/riscv/rimt.c
@@ -61,30 +61,6 @@ static int rimt_set_fwnode(struct acpi_rimt_node *rimt_node,
return 0;
}
-/**
- * rimt_get_fwnode() - Retrieve fwnode associated with an RIMT node
- *
- * @node: RIMT table node to be looked-up
- *
- * Returns: fwnode_handle pointer on success, NULL on failure
- */
-static struct fwnode_handle *rimt_get_fwnode(struct acpi_rimt_node *node)
-{
- struct fwnode_handle *fwnode = NULL;
- struct rimt_fwnode *curr;
-
- spin_lock(&rimt_fwnode_lock);
- list_for_each_entry(curr, &rimt_fwnode_list, list) {
- if (curr->rimt_node == node) {
- fwnode = curr->fwnode;
- break;
- }
- }
- spin_unlock(&rimt_fwnode_lock);
-
- return fwnode;
-}
-
static acpi_status rimt_match_node_callback(struct acpi_rimt_node *node,
void *context)
{
@@ -202,6 +178,67 @@ static struct acpi_rimt_node *rimt_scan_node(enum acpi_rimt_node_type type,
return NULL;
}
+/*
+ * RISC-V supports IOMMU as a PCI device or a platform device.
+ * When it is a platform device, there should be a namespace device as
+ * well along with RIMT. To create the link between RIMT information and
+ * the platform device, the IOMMU driver should register itself with the
+ * RIMT module. This is true for PCI based IOMMU as well.
+ */
+int rimt_iommu_register(struct device *dev)
+{
+ struct fwnode_handle *rimt_fwnode;
+ struct acpi_rimt_node *node;
+
+ node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_IOMMU, dev);
+ if (!node) {
+ pr_err("Could not find IOMMU node in RIMT\n");
+ return -ENODEV;
+ }
+
+ if (dev_is_pci(dev)) {
+ rimt_fwnode = acpi_alloc_fwnode_static();
+ if (!rimt_fwnode)
+ return -ENOMEM;
+
+ rimt_fwnode->dev = dev;
+ if (!dev->fwnode)
+ dev->fwnode = rimt_fwnode;
+
+ rimt_set_fwnode(node, rimt_fwnode);
+ } else {
+ rimt_set_fwnode(node, dev->fwnode);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_IOMMU_API
+
+/**
+ * rimt_get_fwnode() - Retrieve fwnode associated with an RIMT node
+ *
+ * @node: RIMT table node to be looked-up
+ *
+ * Returns: fwnode_handle pointer on success, NULL on failure
+ */
+static struct fwnode_handle *rimt_get_fwnode(struct acpi_rimt_node *node)
+{
+ struct fwnode_handle *fwnode = NULL;
+ struct rimt_fwnode *curr;
+
+ spin_lock(&rimt_fwnode_lock);
+ list_for_each_entry(curr, &rimt_fwnode_list, list) {
+ if (curr->rimt_node == node) {
+ fwnode = curr->fwnode;
+ break;
+ }
+ }
+ spin_unlock(&rimt_fwnode_lock);
+
+ return fwnode;
+}
+
static bool rimt_pcie_rc_supports_ats(struct acpi_rimt_node *node)
{
struct acpi_rimt_pcie_rc *pci_rc;
@@ -290,43 +327,6 @@ static struct acpi_rimt_node *rimt_node_get_id(struct acpi_rimt_node *node,
return NULL;
}
-/*
- * RISC-V supports IOMMU as a PCI device or a platform device.
- * When it is a platform device, there should be a namespace device as
- * well along with RIMT. To create the link between RIMT information and
- * the platform device, the IOMMU driver should register itself with the
- * RIMT module. This is true for PCI based IOMMU as well.
- */
-int rimt_iommu_register(struct device *dev)
-{
- struct fwnode_handle *rimt_fwnode;
- struct acpi_rimt_node *node;
-
- node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_IOMMU, dev);
- if (!node) {
- pr_err("Could not find IOMMU node in RIMT\n");
- return -ENODEV;
- }
-
- if (dev_is_pci(dev)) {
- rimt_fwnode = acpi_alloc_fwnode_static();
- if (!rimt_fwnode)
- return -ENOMEM;
-
- rimt_fwnode->dev = dev;
- if (!dev->fwnode)
- dev->fwnode = rimt_fwnode;
-
- rimt_set_fwnode(node, rimt_fwnode);
- } else {
- rimt_set_fwnode(node, dev->fwnode);
- }
-
- return 0;
-}
-
-#ifdef CONFIG_IOMMU_API
-
static struct acpi_rimt_node *rimt_node_map_id(struct acpi_rimt_node *node,
u32 id_in, u32 *id_out,
u8 type_mask)
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 8c99ceaa303b..a3a1b5c33ba3 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -851,17 +851,8 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong,
} else {
if (!internal)
node->local_weak_refs++;
- if (!node->has_weak_ref && list_empty(&node->work.entry)) {
- if (target_list == NULL) {
- pr_err("invalid inc weak node for %d\n",
- node->debug_id);
- return -EINVAL;
- }
- /*
- * See comment above
- */
+ if (!node->has_weak_ref && target_list && list_empty(&node->work.entry))
binder_enqueue_work_ilocked(&node->work, target_list);
- }
}
return 0;
}
@@ -2418,10 +2409,10 @@ err_fd_not_accepted:
/**
* struct binder_ptr_fixup - data to be fixed-up in target buffer
- * @offset offset in target buffer to fixup
- * @skip_size bytes to skip in copy (fixup will be written later)
- * @fixup_data data to write at fixup offset
- * @node list node
+ * @offset: offset in target buffer to fixup
+ * @skip_size: bytes to skip in copy (fixup will be written later)
+ * @fixup_data: data to write at fixup offset
+ * @node: list node
*
* This is used for the pointer fixup list (pf) which is created and consumed
* during binder_transaction() and is only accessed locally. No
@@ -2438,10 +2429,10 @@ struct binder_ptr_fixup {
/**
* struct binder_sg_copy - scatter-gather data to be copied
- * @offset offset in target buffer
- * @sender_uaddr user address in source buffer
- * @length bytes to copy
- * @node list node
+ * @offset: offset in target buffer
+ * @sender_uaddr: user address in source buffer
+ * @length: bytes to copy
+ * @node: list node
*
* This is used for the sg copy list (sgc) which is created and consumed
* during binder_transaction() and is only accessed locally. No
@@ -4063,14 +4054,15 @@ binder_freeze_notification_done(struct binder_proc *proc,
/**
* binder_free_buf() - free the specified buffer
- * @proc: binder proc that owns buffer
- * @buffer: buffer to be freed
- * @is_failure: failed to send transaction
+ * @proc: binder proc that owns buffer
+ * @thread: binder thread performing the buffer release
+ * @buffer: buffer to be freed
+ * @is_failure: failed to send transaction
*
- * If buffer for an async transaction, enqueue the next async
+ * If the buffer is for an async transaction, enqueue the next async
* transaction from the node.
*
- * Cleanup buffer and free it.
+ * Cleanup the buffer and free it.
*/
static void
binder_free_buf(struct binder_proc *proc,
diff --git a/drivers/android/binder/freeze.rs b/drivers/android/binder/freeze.rs
index e68c3c8bc55a..220de35ae85a 100644
--- a/drivers/android/binder/freeze.rs
+++ b/drivers/android/binder/freeze.rs
@@ -106,13 +106,22 @@ impl DeliverToRead for FreezeMessage {
return Ok(true);
}
if freeze.is_clearing {
- _removed_listener = freeze_entry.remove_node();
+ kernel::warn_on!(freeze.num_cleared_duplicates != 0);
+ if freeze.num_pending_duplicates > 0 {
+ // The primary freeze listener was deleted, so convert a pending duplicate back
+ // into the primary one.
+ freeze.num_pending_duplicates -= 1;
+ freeze.is_pending = true;
+ freeze.is_clearing = true;
+ } else {
+ _removed_listener = freeze_entry.remove_node();
+ }
drop(node_refs);
writer.write_code(BR_CLEAR_FREEZE_NOTIFICATION_DONE)?;
writer.write_payload(&self.cookie.0)?;
Ok(true)
} else {
- let is_frozen = freeze.node.owner.inner.lock().is_frozen;
+ let is_frozen = freeze.node.owner.inner.lock().is_frozen.is_fully_frozen();
if freeze.last_is_frozen == Some(is_frozen) {
return Ok(true);
}
@@ -245,8 +254,9 @@ impl Process {
);
return Err(EINVAL);
}
- if freeze.is_clearing {
- // Immediately send another FreezeMessage for BR_CLEAR_FREEZE_NOTIFICATION_DONE.
+ let is_frozen = freeze.node.owner.inner.lock().is_frozen.is_fully_frozen();
+ if freeze.is_clearing || freeze.last_is_frozen != Some(is_frozen) {
+ // Immediately send another FreezeMessage.
clear_msg = Some(FreezeMessage::init(alloc, cookie));
}
freeze.is_pending = false;
diff --git a/drivers/android/binder/node.rs b/drivers/android/binder/node.rs
index ade895ef791e..08d362deaf61 100644
--- a/drivers/android/binder/node.rs
+++ b/drivers/android/binder/node.rs
@@ -687,7 +687,7 @@ impl Node {
);
}
if inner.freeze_list.is_empty() {
- _unused_capacity = mem::replace(&mut inner.freeze_list, KVVec::new());
+ _unused_capacity = mem::take(&mut inner.freeze_list);
}
}
diff --git a/drivers/android/binder/process.rs b/drivers/android/binder/process.rs
index f13a747e784c..7607353a5e92 100644
--- a/drivers/android/binder/process.rs
+++ b/drivers/android/binder/process.rs
@@ -72,6 +72,33 @@ impl Mapping {
const PROC_DEFER_FLUSH: u8 = 1;
const PROC_DEFER_RELEASE: u8 = 2;
+#[derive(Copy, Clone)]
+pub(crate) enum IsFrozen {
+ Yes,
+ No,
+ InProgress,
+}
+
+impl IsFrozen {
+ /// Whether incoming transactions should be rejected due to freeze.
+ pub(crate) fn is_frozen(self) -> bool {
+ match self {
+ IsFrozen::Yes => true,
+ IsFrozen::No => false,
+ IsFrozen::InProgress => true,
+ }
+ }
+
+ /// Whether freeze notifications consider this process frozen.
+ pub(crate) fn is_fully_frozen(self) -> bool {
+ match self {
+ IsFrozen::Yes => true,
+ IsFrozen::No => false,
+ IsFrozen::InProgress => false,
+ }
+ }
+}
+
/// The fields of `Process` protected by the spinlock.
pub(crate) struct ProcessInner {
is_manager: bool,
@@ -98,7 +125,7 @@ pub(crate) struct ProcessInner {
/// are woken up.
outstanding_txns: u32,
/// Process is frozen and unable to service binder transactions.
- pub(crate) is_frozen: bool,
+ pub(crate) is_frozen: IsFrozen,
/// Process received sync transactions since last frozen.
pub(crate) sync_recv: bool,
/// Process received async transactions since last frozen.
@@ -124,7 +151,7 @@ impl ProcessInner {
started_thread_count: 0,
defer_work: 0,
outstanding_txns: 0,
- is_frozen: false,
+ is_frozen: IsFrozen::No,
sync_recv: false,
async_recv: false,
binderfs_file: None,
@@ -1260,7 +1287,7 @@ impl Process {
let is_manager = {
let mut inner = self.inner.lock();
inner.is_dead = true;
- inner.is_frozen = false;
+ inner.is_frozen = IsFrozen::No;
inner.sync_recv = false;
inner.async_recv = false;
inner.is_manager
@@ -1346,10 +1373,6 @@ impl Process {
.alloc
.take_for_each(|offset, size, debug_id, odata| {
let ptr = offset + address;
- pr_warn!(
- "{}: removing orphan mapping {offset}:{size}\n",
- self.pid_in_current_ns()
- );
let mut alloc =
Allocation::new(self.clone(), debug_id, offset, size, ptr, false);
if let Some(data) = odata {
@@ -1371,7 +1394,7 @@ impl Process {
return;
}
inner.outstanding_txns -= 1;
- inner.is_frozen && inner.outstanding_txns == 0
+ inner.is_frozen.is_frozen() && inner.outstanding_txns == 0
};
if wake {
@@ -1385,7 +1408,7 @@ impl Process {
let mut inner = self.inner.lock();
inner.sync_recv = false;
inner.async_recv = false;
- inner.is_frozen = false;
+ inner.is_frozen = IsFrozen::No;
drop(inner);
msgs.send_messages();
return Ok(());
@@ -1394,7 +1417,7 @@ impl Process {
let mut inner = self.inner.lock();
inner.sync_recv = false;
inner.async_recv = false;
- inner.is_frozen = true;
+ inner.is_frozen = IsFrozen::InProgress;
if info.timeout_ms > 0 {
let mut jiffies = kernel::time::msecs_to_jiffies(info.timeout_ms);
@@ -1408,7 +1431,7 @@ impl Process {
.wait_interruptible_timeout(&mut inner, jiffies)
{
CondVarTimeoutResult::Signal { .. } => {
- inner.is_frozen = false;
+ inner.is_frozen = IsFrozen::No;
return Err(ERESTARTSYS);
}
CondVarTimeoutResult::Woken { jiffies: remaining } => {
@@ -1422,17 +1445,18 @@ impl Process {
}
if inner.txns_pending_locked() {
- inner.is_frozen = false;
+ inner.is_frozen = IsFrozen::No;
Err(EAGAIN)
} else {
drop(inner);
match self.prepare_freeze_messages() {
Ok(batch) => {
+ self.inner.lock().is_frozen = IsFrozen::Yes;
batch.send_messages();
Ok(())
}
Err(kernel::alloc::AllocError) => {
- self.inner.lock().is_frozen = false;
+ self.inner.lock().is_frozen = IsFrozen::No;
Err(ENOMEM)
}
}
diff --git a/drivers/android/binder/transaction.rs b/drivers/android/binder/transaction.rs
index 02512175d622..4bd3c0e417eb 100644
--- a/drivers/android/binder/transaction.rs
+++ b/drivers/android/binder/transaction.rs
@@ -249,7 +249,7 @@ impl Transaction {
if oneway {
if let Some(target_node) = self.target_node.clone() {
- if process_inner.is_frozen {
+ if process_inner.is_frozen.is_frozen() {
process_inner.async_recv = true;
if self.flags & TF_UPDATE_TXN != 0 {
if let Some(t_outdated) =
@@ -270,7 +270,7 @@ impl Transaction {
}
}
- if process_inner.is_frozen {
+ if process_inner.is_frozen.is_frozen() {
return Err(BinderError::new_frozen_oneway());
} else {
return Ok(());
@@ -280,7 +280,7 @@ impl Transaction {
}
}
- if process_inner.is_frozen {
+ if process_inner.is_frozen.is_frozen() {
process_inner.sync_recv = true;
return Err(BinderError::new_frozen());
}
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 1037169abb45..e1eff05bea4a 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -292,7 +292,7 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
* frequency (by keeping the initial capacity_freq_ref value).
*/
cpu_clk = of_clk_get(cpu_node, 0);
- if (!PTR_ERR_OR_ZERO(cpu_clk)) {
+ if (!IS_ERR_OR_NULL(cpu_clk)) {
per_cpu(capacity_freq_ref, cpu) =
clk_get_rate(cpu_clk) / HZ_PER_KHZ;
clk_put(cpu_clk);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 3c533dab8fa5..f69dc9c85954 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1784,7 +1784,7 @@ static int fw_devlink_dev_sync_state(struct device *dev, void *data)
return 0;
if (fw_devlink_sync_state == FW_DEVLINK_SYNC_STATE_STRICT) {
- dev_warn(sup, "sync_state() pending due to %s\n",
+ dev_info(sup, "sync_state() pending due to %s\n",
dev_name(link->consumer));
return 0;
}
diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c
index 37faf6156d7c..55bdc7f5e59d 100644
--- a/drivers/base/devcoredump.c
+++ b/drivers/base/devcoredump.c
@@ -23,50 +23,46 @@ struct devcd_entry {
void *data;
size_t datalen;
/*
- * Here, mutex is required to serialize the calls to del_wk work between
- * user/kernel space which happens when devcd is added with device_add()
- * and that sends uevent to user space. User space reads the uevents,
- * and calls to devcd_data_write() which try to modify the work which is
- * not even initialized/queued from devcoredump.
+ * There are 2 races for which mutex is required.
*
+ * The first race is between device creation and userspace writing to
+ * schedule immediately destruction.
*
+ * This race is handled by arming the timer before device creation, but
+ * when device creation fails the timer still exists.
*
- * cpu0(X) cpu1(Y)
+ * To solve this, hold the mutex during device_add(), and set
+ * init_completed on success before releasing the mutex.
*
- * dev_coredump() uevent sent to user space
- * device_add() ======================> user space process Y reads the
- * uevents writes to devcd fd
- * which results into writes to
+ * That way the timer will never fire until device_add() is called,
+ * it will do nothing if init_completed is not set. The timer is also
+ * cancelled in that case.
*
- * devcd_data_write()
- * mod_delayed_work()
- * try_to_grab_pending()
- * timer_delete()
- * debug_assert_init()
- * INIT_DELAYED_WORK()
- * schedule_delayed_work()
- *
- *
- * Also, mutex alone would not be enough to avoid scheduling of
- * del_wk work after it get flush from a call to devcd_free()
- * mentioned as below.
- *
- * disabled_store()
- * devcd_free()
- * mutex_lock() devcd_data_write()
- * flush_delayed_work()
- * mutex_unlock()
- * mutex_lock()
- * mod_delayed_work()
- * mutex_unlock()
- * So, delete_work flag is required.
+ * The second race involves multiple parallel invocations of devcd_free(),
+ * add a deleted flag so only 1 can call the destructor.
*/
struct mutex mutex;
- bool delete_work;
+ bool init_completed, deleted;
struct module *owner;
ssize_t (*read)(char *buffer, loff_t offset, size_t count,
void *data, size_t datalen);
void (*free)(void *data);
+ /*
+ * If nothing interferes and device_add() was returns success,
+ * del_wk will destroy the device after the timer fires.
+ *
+ * Multiple userspace processes can interfere in the working of the timer:
+ * - Writing to the coredump will reschedule the timer to run immediately,
+ * if still armed.
+ *
+ * This is handled by using "if (cancel_delayed_work()) {
+ * schedule_delayed_work() }", to prevent re-arming after having
+ * been previously fired.
+ * - Writing to /sys/class/devcoredump/disabled will destroy the
+ * coredump synchronously.
+ * This is handled by using disable_delayed_work_sync(), and then
+ * checking if deleted flag is set with &devcd->mutex held.
+ */
struct delayed_work del_wk;
struct device *failing_dev;
};
@@ -95,14 +91,27 @@ static void devcd_dev_release(struct device *dev)
kfree(devcd);
}
+static void __devcd_del(struct devcd_entry *devcd)
+{
+ devcd->deleted = true;
+ device_del(&devcd->devcd_dev);
+ put_device(&devcd->devcd_dev);
+}
+
static void devcd_del(struct work_struct *wk)
{
struct devcd_entry *devcd;
+ bool init_completed;
devcd = container_of(wk, struct devcd_entry, del_wk.work);
- device_del(&devcd->devcd_dev);
- put_device(&devcd->devcd_dev);
+ /* devcd->mutex serializes against dev_coredumpm_timeout */
+ mutex_lock(&devcd->mutex);
+ init_completed = devcd->init_completed;
+ mutex_unlock(&devcd->mutex);
+
+ if (init_completed)
+ __devcd_del(devcd);
}
static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj,
@@ -122,12 +131,12 @@ static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct devcd_entry *devcd = dev_to_devcd(dev);
- mutex_lock(&devcd->mutex);
- if (!devcd->delete_work) {
- devcd->delete_work = true;
- mod_delayed_work(system_wq, &devcd->del_wk, 0);
- }
- mutex_unlock(&devcd->mutex);
+ /*
+ * Although it's tempting to use mod_delayed work here,
+ * that will cause a reschedule if the timer already fired.
+ */
+ if (cancel_delayed_work(&devcd->del_wk))
+ schedule_delayed_work(&devcd->del_wk, 0);
return count;
}
@@ -151,11 +160,21 @@ static int devcd_free(struct device *dev, void *data)
{
struct devcd_entry *devcd = dev_to_devcd(dev);
+ /*
+ * To prevent a race with devcd_data_write(), disable work and
+ * complete manually instead.
+ *
+ * We cannot rely on the return value of
+ * disable_delayed_work_sync() here, because it might be in the
+ * middle of a cancel_delayed_work + schedule_delayed_work pair.
+ *
+ * devcd->mutex here guards against multiple parallel invocations
+ * of devcd_free().
+ */
+ disable_delayed_work_sync(&devcd->del_wk);
mutex_lock(&devcd->mutex);
- if (!devcd->delete_work)
- devcd->delete_work = true;
-
- flush_delayed_work(&devcd->del_wk);
+ if (!devcd->deleted)
+ __devcd_del(devcd);
mutex_unlock(&devcd->mutex);
return 0;
}
@@ -179,12 +198,10 @@ static ssize_t disabled_show(const struct class *class, const struct class_attri
* put_device() <- last reference
* error = fn(dev, data) devcd_dev_release()
* devcd_free(dev, data) kfree(devcd)
- * mutex_lock(&devcd->mutex);
*
*
* In the above diagram, it looks like disabled_store() would be racing with parallelly
- * running devcd_del() and result in memory abort while acquiring devcd->mutex which
- * is called after kfree of devcd memory after dropping its last reference with
+ * running devcd_del() and result in memory abort after dropping its last reference with
* put_device(). However, this will not happens as fn(dev, data) runs
* with its own reference to device via klist_node so it is not its last reference.
* so, above situation would not occur.
@@ -374,7 +391,7 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner,
devcd->read = read;
devcd->free = free;
devcd->failing_dev = get_device(dev);
- devcd->delete_work = false;
+ devcd->deleted = false;
mutex_init(&devcd->mutex);
device_initialize(&devcd->devcd_dev);
@@ -383,8 +400,14 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner,
atomic_inc_return(&devcd_count));
devcd->devcd_dev.class = &devcd_class;
- mutex_lock(&devcd->mutex);
dev_set_uevent_suppress(&devcd->devcd_dev, true);
+
+ /* devcd->mutex prevents devcd_del() completing until init finishes */
+ mutex_lock(&devcd->mutex);
+ devcd->init_completed = false;
+ INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
+ schedule_delayed_work(&devcd->del_wk, timeout);
+
if (device_add(&devcd->devcd_dev))
goto put_device;
@@ -401,13 +424,20 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner,
dev_set_uevent_suppress(&devcd->devcd_dev, false);
kobject_uevent(&devcd->devcd_dev.kobj, KOBJ_ADD);
- INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
- schedule_delayed_work(&devcd->del_wk, timeout);
+
+ /*
+ * Safe to run devcd_del() now that we are done with devcd_dev.
+ * Alternatively we could have taken a ref on devcd_dev before
+ * dropping the lock.
+ */
+ devcd->init_completed = true;
mutex_unlock(&devcd->mutex);
return;
put_device:
- put_device(&devcd->devcd_dev);
mutex_unlock(&devcd->mutex);
+ cancel_delayed_work_sync(&devcd->del_wk);
+ put_device(&devcd->devcd_dev);
+
put_module:
module_put(owner);
free:
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 1188f32a5e5e..a853c65ac65d 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -52,6 +52,7 @@
static DEFINE_IDR(nbd_index_idr);
static DEFINE_MUTEX(nbd_index_mutex);
static struct workqueue_struct *nbd_del_wq;
+static struct cred *nbd_cred;
static int nbd_total_devices = 0;
struct nbd_sock {
@@ -554,6 +555,7 @@ static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
int result;
struct msghdr msg = {} ;
unsigned int noreclaim_flag;
+ const struct cred *old_cred;
if (unlikely(!sock)) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
@@ -562,6 +564,8 @@ static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
return -EINVAL;
}
+ old_cred = override_creds(nbd_cred);
+
msg.msg_iter = *iter;
noreclaim_flag = memalloc_noreclaim_save();
@@ -586,6 +590,8 @@ static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
memalloc_noreclaim_restore(noreclaim_flag);
+ revert_creds(old_cred);
+
return result;
}
@@ -2677,7 +2683,15 @@ static int __init nbd_init(void)
return -ENOMEM;
}
+ nbd_cred = prepare_kernel_cred(&init_task);
+ if (!nbd_cred) {
+ destroy_workqueue(nbd_del_wq);
+ unregister_blkdev(NBD_MAJOR, "nbd");
+ return -ENOMEM;
+ }
+
if (genl_register_family(&nbd_genl_family)) {
+ put_cred(nbd_cred);
destroy_workqueue(nbd_del_wq);
unregister_blkdev(NBD_MAJOR, "nbd");
return -EINVAL;
@@ -2732,6 +2746,7 @@ static void __exit nbd_cleanup(void)
/* Also wait for nbd_dev_remove_work() completes */
destroy_workqueue(nbd_del_wq);
+ put_cred(nbd_cred);
idr_destroy(&nbd_index_idr);
unregister_blkdev(NBD_MAJOR, "nbd");
}
diff --git a/drivers/comedi/comedi_buf.c b/drivers/comedi/comedi_buf.c
index 002c0e76baff..c7c262a2d8ca 100644
--- a/drivers/comedi/comedi_buf.c
+++ b/drivers/comedi/comedi_buf.c
@@ -317,7 +317,7 @@ static unsigned int comedi_buf_munge(struct comedi_subdevice *s,
unsigned int count = 0;
const unsigned int num_sample_bytes = comedi_bytes_per_sample(s);
- if (!s->munge || (async->cmd.flags & CMDF_RAWDATA)) {
+ if (!s->munge || (async->cmd.flags & CMDF_RAWDATA) || async->cmd.chanlist_len == 0) {
async->munge_count += num_bytes;
return num_bytes;
}
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 298e92d8cc03..b44f0f7a5ba1 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -1614,7 +1614,11 @@ static int amd_pstate_cpu_offline(struct cpufreq_policy *policy)
* min_perf value across kexec reboots. If this CPU is just onlined normally after this, the
* limits, epp and desired perf will get reset to the cached values in cpudata struct
*/
- return amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false);
+ return amd_pstate_update_perf(policy, perf.bios_min_perf,
+ FIELD_GET(AMD_CPPC_DES_PERF_MASK, cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_MAX_PERF_MASK, cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached),
+ false);
}
static int amd_pstate_suspend(struct cpufreq_policy *policy)
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 4d9aa5ce31f0..7d21fb5a72f4 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -188,20 +188,17 @@ again:
*
* This can deal with workloads that have long pauses interspersed
* with sporadic activity with a bunch of short pauses.
+ *
+ * However, if the number of remaining samples is too small to exclude
+ * any more outliers, allow the deepest available idle state to be
+ * selected because there are systems where the time spent by CPUs in
+ * deep idle states is correlated to the maximum frequency the CPUs
+ * can get to. On those systems, shallow idle states should be avoided
+ * unless there is a clear indication that the given CPU is most likley
+ * going to be woken up shortly.
*/
- if (divisor * 4 <= INTERVALS * 3) {
- /*
- * If there are sufficiently many data points still under
- * consideration after the outliers have been eliminated,
- * returning without a prediction would be a mistake because it
- * is likely that the next interval will not exceed the current
- * maximum, so return the latter in that case.
- */
- if (divisor >= INTERVALS / 2)
- return max;
-
+ if (divisor * 4 <= INTERVALS * 3)
return UINT_MAX;
- }
/* Update the thresholds for the next round. */
if (avg - min > max - avg)
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index dd3656a0c1ff..c65f491c54d0 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -269,7 +269,7 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
}
static int allocate_tlabel(struct fw_card *card)
-__must_hold(&card->transactions_lock)
+__must_hold(&card->transactions.lock)
{
int tlabel;
diff --git a/drivers/firewire/init_ohci1394_dma.c b/drivers/firewire/init_ohci1394_dma.c
index 48b879e9e831..121f0c2f6401 100644
--- a/drivers/firewire/init_ohci1394_dma.c
+++ b/drivers/firewire/init_ohci1394_dma.c
@@ -167,6 +167,7 @@ static inline void __init init_ohci1394_initialize(struct ohci *ohci)
/**
* init_ohci1394_wait_for_busresets - wait until bus resets are completed
+ * @ohci: Pointer to the OHCI-1394 controller structure
*
* OHCI1394 initialization itself and any device going on- or offline
* and any cable issue cause a IEEE1394 bus reset. The OHCI1394 spec
@@ -189,6 +190,8 @@ static inline void __init init_ohci1394_wait_for_busresets(struct ohci *ohci)
/**
* init_ohci1394_enable_physical_dma - Enable physical DMA for remote debugging
+ * @ohci: Pointer to the OHCI-1394 controller structure
+ *
* This enables remote DMA access over IEEE1394 from every host for the low
* 4GB of address space. DMA accesses above 4GB are not available currently.
*/
@@ -201,6 +204,8 @@ static inline void __init init_ohci1394_enable_physical_dma(struct ohci *ohci)
/**
* init_ohci1394_reset_and_init_dma - init controller and enable DMA
+ * @ohci: Pointer to the OHCI-1394 controller structure
+ *
* This initializes the given controller and enables physical DMA engine in it.
*/
static inline void __init init_ohci1394_reset_and_init_dma(struct ohci *ohci)
@@ -230,6 +235,10 @@ static inline void __init init_ohci1394_reset_and_init_dma(struct ohci *ohci)
/**
* init_ohci1394_controller - Map the registers of the controller and init DMA
+ * @num: PCI bus number
+ * @slot: PCI device number
+ * @func: PCI function number
+ *
* This maps the registers of the specified controller and initializes it
*/
static inline void __init init_ohci1394_controller(int num, int slot, int func)
@@ -284,6 +293,7 @@ void __init init_ohci1394_dma_on_all_controllers(void)
/**
* setup_ohci1394_dma - enables early OHCI1394 DMA initialization
+ * @opt: Kernel command line parameter string
*/
static int __init setup_ohci1394_dma(char *opt)
{
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index 65bf1685350a..c72ee4756585 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -649,6 +649,26 @@ static u16 ffa_memory_attributes_get(u32 func_id)
return FFA_MEM_NORMAL | FFA_MEM_WRITE_BACK | FFA_MEM_INNER_SHAREABLE;
}
+static void ffa_emad_impdef_value_init(u32 version, void *dst, void *src)
+{
+ struct ffa_mem_region_attributes *ep_mem_access;
+
+ if (FFA_EMAD_HAS_IMPDEF_FIELD(version))
+ memcpy(dst, src, sizeof(ep_mem_access->impdef_val));
+}
+
+static void
+ffa_mem_region_additional_setup(u32 version, struct ffa_mem_region *mem_region)
+{
+ if (!FFA_MEM_REGION_HAS_EP_MEM_OFFSET(version)) {
+ mem_region->ep_mem_size = 0;
+ } else {
+ mem_region->ep_mem_size = ffa_emad_size_get(version);
+ mem_region->ep_mem_offset = sizeof(*mem_region);
+ memset(mem_region->reserved, 0, 12);
+ }
+}
+
static int
ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
struct ffa_mem_ops_args *args)
@@ -667,27 +687,24 @@ ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
mem_region->flags = args->flags;
mem_region->sender_id = drv_info->vm_id;
mem_region->attributes = ffa_memory_attributes_get(func_id);
- ep_mem_access = buffer +
- ffa_mem_desc_offset(buffer, 0, drv_info->version);
composite_offset = ffa_mem_desc_offset(buffer, args->nattrs,
drv_info->version);
- for (idx = 0; idx < args->nattrs; idx++, ep_mem_access++) {
+ for (idx = 0; idx < args->nattrs; idx++) {
+ ep_mem_access = buffer +
+ ffa_mem_desc_offset(buffer, idx, drv_info->version);
ep_mem_access->receiver = args->attrs[idx].receiver;
ep_mem_access->attrs = args->attrs[idx].attrs;
ep_mem_access->composite_off = composite_offset;
ep_mem_access->flag = 0;
ep_mem_access->reserved = 0;
+ ffa_emad_impdef_value_init(drv_info->version,
+ ep_mem_access->impdef_val,
+ args->attrs[idx].impdef_val);
}
mem_region->handle = 0;
mem_region->ep_count = args->nattrs;
- if (drv_info->version <= FFA_VERSION_1_0) {
- mem_region->ep_mem_size = 0;
- } else {
- mem_region->ep_mem_size = sizeof(*ep_mem_access);
- mem_region->ep_mem_offset = sizeof(*mem_region);
- memset(mem_region->reserved, 0, 12);
- }
+ ffa_mem_region_additional_setup(drv_info->version, mem_region);
composite = buffer + composite_offset;
composite->total_pg_cnt = ffa_get_num_pages_sg(args->sg);
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index 07b9e629276d..7c35c95fddba 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -309,16 +309,36 @@ enum debug_counters {
SCMI_DEBUG_COUNTERS_LAST
};
-static inline void scmi_inc_count(atomic_t *arr, int stat)
+/**
+ * struct scmi_debug_info - Debug common info
+ * @top_dentry: A reference to the top debugfs dentry
+ * @name: Name of this SCMI instance
+ * @type: Type of this SCMI instance
+ * @is_atomic: Flag to state if the transport of this instance is atomic
+ * @counters: An array of atomic_c's used for tracking statistics (if enabled)
+ */
+struct scmi_debug_info {
+ struct dentry *top_dentry;
+ const char *name;
+ const char *type;
+ bool is_atomic;
+ atomic_t counters[SCMI_DEBUG_COUNTERS_LAST];
+};
+
+static inline void scmi_inc_count(struct scmi_debug_info *dbg, int stat)
{
- if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS))
- atomic_inc(&arr[stat]);
+ if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) {
+ if (dbg)
+ atomic_inc(&dbg->counters[stat]);
+ }
}
-static inline void scmi_dec_count(atomic_t *arr, int stat)
+static inline void scmi_dec_count(struct scmi_debug_info *dbg, int stat)
{
- if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS))
- atomic_dec(&arr[stat]);
+ if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) {
+ if (dbg)
+ atomic_dec(&dbg->counters[stat]);
+ }
}
enum scmi_bad_msg {
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index bd56a877fdfc..5caa9191a8d1 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -116,22 +116,6 @@ struct scmi_protocol_instance {
#define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph)
/**
- * struct scmi_debug_info - Debug common info
- * @top_dentry: A reference to the top debugfs dentry
- * @name: Name of this SCMI instance
- * @type: Type of this SCMI instance
- * @is_atomic: Flag to state if the transport of this instance is atomic
- * @counters: An array of atomic_c's used for tracking statistics (if enabled)
- */
-struct scmi_debug_info {
- struct dentry *top_dentry;
- const char *name;
- const char *type;
- bool is_atomic;
- atomic_t counters[SCMI_DEBUG_COUNTERS_LAST];
-};
-
-/**
* struct scmi_info - Structure representing a SCMI instance
*
* @id: A sequence number starting from zero identifying this instance
@@ -610,7 +594,7 @@ scmi_xfer_inflight_register_unlocked(struct scmi_xfer *xfer,
/* Set in-flight */
set_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
hash_add(minfo->pending_xfers, &xfer->node, xfer->hdr.seq);
- scmi_inc_count(info->dbg->counters, XFERS_INFLIGHT);
+ scmi_inc_count(info->dbg, XFERS_INFLIGHT);
xfer->pending = true;
}
@@ -819,8 +803,9 @@ __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
hash_del(&xfer->node);
xfer->pending = false;
- scmi_dec_count(info->dbg->counters, XFERS_INFLIGHT);
+ scmi_dec_count(info->dbg, XFERS_INFLIGHT);
}
+ xfer->flags = 0;
hlist_add_head(&xfer->node, &minfo->free_xfers);
}
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
@@ -839,8 +824,6 @@ void scmi_xfer_raw_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
{
struct scmi_info *info = handle_to_scmi_info(handle);
- xfer->flags &= ~SCMI_XFER_FLAG_IS_RAW;
- xfer->flags &= ~SCMI_XFER_FLAG_CHAN_SET;
return __scmi_xfer_put(&info->tx_minfo, xfer);
}
@@ -1034,7 +1017,7 @@ scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
scmi_bad_message_trace(cinfo, msg_hdr, MSG_UNEXPECTED);
- scmi_inc_count(info->dbg->counters, ERR_MSG_UNEXPECTED);
+ scmi_inc_count(info->dbg, ERR_MSG_UNEXPECTED);
return xfer;
}
@@ -1062,7 +1045,7 @@ scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
msg_type, xfer_id, msg_hdr, xfer->state);
scmi_bad_message_trace(cinfo, msg_hdr, MSG_INVALID);
- scmi_inc_count(info->dbg->counters, ERR_MSG_INVALID);
+ scmi_inc_count(info->dbg, ERR_MSG_INVALID);
/* On error the refcount incremented above has to be dropped */
__scmi_xfer_put(minfo, xfer);
@@ -1107,7 +1090,7 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo,
PTR_ERR(xfer));
scmi_bad_message_trace(cinfo, msg_hdr, MSG_NOMEM);
- scmi_inc_count(info->dbg->counters, ERR_MSG_NOMEM);
+ scmi_inc_count(info->dbg, ERR_MSG_NOMEM);
scmi_clear_channel(info, cinfo);
return;
@@ -1123,7 +1106,7 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo,
trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
xfer->hdr.id, "NOTI", xfer->hdr.seq,
xfer->hdr.status, xfer->rx.buf, xfer->rx.len);
- scmi_inc_count(info->dbg->counters, NOTIFICATION_OK);
+ scmi_inc_count(info->dbg, NOTIFICATION_OK);
scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
@@ -1183,10 +1166,10 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) {
scmi_clear_channel(info, cinfo);
complete(xfer->async_done);
- scmi_inc_count(info->dbg->counters, DELAYED_RESPONSE_OK);
+ scmi_inc_count(info->dbg, DELAYED_RESPONSE_OK);
} else {
complete(&xfer->done);
- scmi_inc_count(info->dbg->counters, RESPONSE_OK);
+ scmi_inc_count(info->dbg, RESPONSE_OK);
}
if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
@@ -1296,7 +1279,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
"timed out in resp(caller: %pS) - polling\n",
(void *)_RET_IP_);
ret = -ETIMEDOUT;
- scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_POLLED_TIMEOUT);
+ scmi_inc_count(info->dbg, XFERS_RESPONSE_POLLED_TIMEOUT);
}
}
@@ -1321,7 +1304,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
"RESP" : "resp",
xfer->hdr.seq, xfer->hdr.status,
xfer->rx.buf, xfer->rx.len);
- scmi_inc_count(info->dbg->counters, RESPONSE_POLLED_OK);
+ scmi_inc_count(info->dbg, RESPONSE_POLLED_OK);
if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
scmi_raw_message_report(info->raw, xfer,
@@ -1336,7 +1319,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
dev_err(dev, "timed out in resp(caller: %pS)\n",
(void *)_RET_IP_);
ret = -ETIMEDOUT;
- scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_TIMEOUT);
+ scmi_inc_count(info->dbg, XFERS_RESPONSE_TIMEOUT);
}
}
@@ -1420,13 +1403,13 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
!is_transport_polling_capable(info->desc)) {
dev_warn_once(dev,
"Polling mode is not supported by transport.\n");
- scmi_inc_count(info->dbg->counters, SENT_FAIL_POLLING_UNSUPPORTED);
+ scmi_inc_count(info->dbg, SENT_FAIL_POLLING_UNSUPPORTED);
return -EINVAL;
}
cinfo = idr_find(&info->tx_idr, pi->proto->id);
if (unlikely(!cinfo)) {
- scmi_inc_count(info->dbg->counters, SENT_FAIL_CHANNEL_NOT_FOUND);
+ scmi_inc_count(info->dbg, SENT_FAIL_CHANNEL_NOT_FOUND);
return -EINVAL;
}
/* True ONLY if also supported by transport. */
@@ -1461,19 +1444,19 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
ret = info->desc->ops->send_message(cinfo, xfer);
if (ret < 0) {
dev_dbg(dev, "Failed to send message %d\n", ret);
- scmi_inc_count(info->dbg->counters, SENT_FAIL);
+ scmi_inc_count(info->dbg, SENT_FAIL);
return ret;
}
trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
xfer->hdr.id, "CMND", xfer->hdr.seq,
xfer->hdr.status, xfer->tx.buf, xfer->tx.len);
- scmi_inc_count(info->dbg->counters, SENT_OK);
+ scmi_inc_count(info->dbg, SENT_OK);
ret = scmi_wait_for_message_response(cinfo, xfer);
if (!ret && xfer->hdr.status) {
ret = scmi_to_linux_errno(xfer->hdr.status);
- scmi_inc_count(info->dbg->counters, ERR_PROTOCOL);
+ scmi_inc_count(info->dbg, ERR_PROTOCOL);
}
if (info->desc->ops->mark_txdone)
@@ -3044,9 +3027,6 @@ static int scmi_debugfs_raw_mode_setup(struct scmi_info *info)
u8 channels[SCMI_MAX_CHANNELS] = {};
DECLARE_BITMAP(protos, SCMI_MAX_CHANNELS) = {};
- if (!info->dbg)
- return -EINVAL;
-
/* Enumerate all channels to collect their ids */
idr_for_each_entry(&info->tx_idr, cinfo, id) {
/*
@@ -3218,7 +3198,7 @@ static int scmi_probe(struct platform_device *pdev)
if (!info->dbg)
dev_warn(dev, "Failed to setup SCMI debugfs.\n");
- if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
+ if (info->dbg && IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
ret = scmi_debugfs_raw_mode_setup(info);
if (!coex) {
if (ret)
@@ -3423,6 +3403,9 @@ int scmi_inflight_count(const struct scmi_handle *handle)
if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) {
struct scmi_info *info = handle_to_scmi_info(handle);
+ if (!info->dbg)
+ return 0;
+
return atomic_read(&info->dbg->counters[XFERS_INFLIGHT]);
} else {
return 0;
diff --git a/drivers/gpio/gpio-104-idio-16.c b/drivers/gpio/gpio-104-idio-16.c
index ffe7e1cb6b23..fe5c10cd5c32 100644
--- a/drivers/gpio/gpio-104-idio-16.c
+++ b/drivers/gpio/gpio-104-idio-16.c
@@ -59,6 +59,7 @@ static const struct regmap_config idio_16_regmap_config = {
.reg_stride = 1,
.val_bits = 8,
.io_port = true,
+ .max_register = 0x5,
.wr_table = &idio_16_wr_table,
.rd_table = &idio_16_rd_table,
.volatile_table = &idio_16_rd_table,
diff --git a/drivers/gpio/gpio-idio-16.c b/drivers/gpio/gpio-idio-16.c
index 0103be977c66..4fbae6f6a497 100644
--- a/drivers/gpio/gpio-idio-16.c
+++ b/drivers/gpio/gpio-idio-16.c
@@ -6,6 +6,7 @@
#define DEFAULT_SYMBOL_NAMESPACE "GPIO_IDIO_16"
+#include <linux/bitmap.h>
#include <linux/bits.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -107,6 +108,7 @@ int devm_idio_16_regmap_register(struct device *const dev,
struct idio_16_data *data;
struct regmap_irq_chip *chip;
struct regmap_irq_chip_data *chip_data;
+ DECLARE_BITMAP(fixed_direction_output, IDIO_16_NGPIO);
if (!config->parent)
return -EINVAL;
@@ -164,6 +166,9 @@ int devm_idio_16_regmap_register(struct device *const dev,
gpio_config.irq_domain = regmap_irq_get_domain(chip_data);
gpio_config.reg_mask_xlate = idio_16_reg_mask_xlate;
+ bitmap_from_u64(fixed_direction_output, GENMASK_U64(15, 0));
+ gpio_config.fixed_direction_output = fixed_direction_output;
+
return PTR_ERR_OR_ZERO(devm_gpio_regmap_register(dev, &gpio_config));
}
EXPORT_SYMBOL_GPL(devm_idio_16_regmap_register);
diff --git a/drivers/gpio/gpio-ljca.c b/drivers/gpio/gpio-ljca.c
index 3b4f8830c741..f32d1d237795 100644
--- a/drivers/gpio/gpio-ljca.c
+++ b/drivers/gpio/gpio-ljca.c
@@ -286,22 +286,14 @@ static void ljca_gpio_event_cb(void *context, u8 cmd, const void *evt_data,
{
const struct ljca_gpio_packet *packet = evt_data;
struct ljca_gpio_dev *ljca_gpio = context;
- int i, irq;
+ int i;
if (cmd != LJCA_GPIO_INT_EVENT)
return;
for (i = 0; i < packet->num; i++) {
- irq = irq_find_mapping(ljca_gpio->gc.irq.domain,
- packet->item[i].index);
- if (!irq) {
- dev_err(ljca_gpio->gc.parent,
- "gpio_id %u does not mapped to IRQ yet\n",
- packet->item[i].index);
- return;
- }
-
- generic_handle_domain_irq(ljca_gpio->gc.irq.domain, irq);
+ generic_handle_domain_irq(ljca_gpio->gc.irq.domain,
+ packet->item[i].index);
set_bit(packet->item[i].index, ljca_gpio->reenable_irqs);
}
diff --git a/drivers/gpio/gpio-pci-idio-16.c b/drivers/gpio/gpio-pci-idio-16.c
index 476cea1b5ed7..9d28ca8e1d6f 100644
--- a/drivers/gpio/gpio-pci-idio-16.c
+++ b/drivers/gpio/gpio-pci-idio-16.c
@@ -41,6 +41,7 @@ static const struct regmap_config idio_16_regmap_config = {
.reg_stride = 1,
.val_bits = 8,
.io_port = true,
+ .max_register = 0x7,
.wr_table = &idio_16_wr_table,
.rd_table = &idio_16_rd_table,
.volatile_table = &idio_16_rd_table,
diff --git a/drivers/gpio/gpio-regmap.c b/drivers/gpio/gpio-regmap.c
index ab9e4077fa60..f4267af00027 100644
--- a/drivers/gpio/gpio-regmap.c
+++ b/drivers/gpio/gpio-regmap.c
@@ -31,6 +31,7 @@ struct gpio_regmap {
unsigned int reg_clr_base;
unsigned int reg_dir_in_base;
unsigned int reg_dir_out_base;
+ unsigned long *fixed_direction_output;
#ifdef CONFIG_REGMAP_IRQ
int regmap_irq_line;
@@ -134,6 +135,13 @@ static int gpio_regmap_get_direction(struct gpio_chip *chip,
unsigned int base, val, reg, mask;
int invert, ret;
+ if (gpio->fixed_direction_output) {
+ if (test_bit(offset, gpio->fixed_direction_output))
+ return GPIO_LINE_DIRECTION_OUT;
+ else
+ return GPIO_LINE_DIRECTION_IN;
+ }
+
if (gpio->reg_dat_base && !gpio->reg_set_base)
return GPIO_LINE_DIRECTION_IN;
if (gpio->reg_set_base && !gpio->reg_dat_base)
@@ -284,6 +292,17 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
goto err_free_gpio;
}
+ if (config->fixed_direction_output) {
+ gpio->fixed_direction_output = bitmap_alloc(chip->ngpio,
+ GFP_KERNEL);
+ if (!gpio->fixed_direction_output) {
+ ret = -ENOMEM;
+ goto err_free_gpio;
+ }
+ bitmap_copy(gpio->fixed_direction_output,
+ config->fixed_direction_output, chip->ngpio);
+ }
+
/* if not set, assume there is only one register */
gpio->ngpio_per_reg = config->ngpio_per_reg;
if (!gpio->ngpio_per_reg)
@@ -300,7 +319,7 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
ret = gpiochip_add_data(chip, gpio);
if (ret < 0)
- goto err_free_gpio;
+ goto err_free_bitmap;
#ifdef CONFIG_REGMAP_IRQ
if (config->regmap_irq_chip) {
@@ -309,7 +328,7 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
config->regmap_irq_line, config->regmap_irq_flags,
0, config->regmap_irq_chip, &gpio->irq_chip_data);
if (ret)
- goto err_free_gpio;
+ goto err_free_bitmap;
irq_domain = regmap_irq_get_domain(gpio->irq_chip_data);
} else
@@ -326,6 +345,8 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
err_remove_gpiochip:
gpiochip_remove(chip);
+err_free_bitmap:
+ bitmap_free(gpio->fixed_direction_output);
err_free_gpio:
kfree(gpio);
return ERR_PTR(ret);
@@ -344,6 +365,7 @@ void gpio_regmap_unregister(struct gpio_regmap *gpio)
#endif
gpiochip_remove(&gpio->gpio_chip);
+ bitmap_free(gpio->fixed_direction_output);
kfree(gpio);
}
EXPORT_SYMBOL_GPL(gpio_regmap_unregister);
diff --git a/drivers/gpio/gpiolib-acpi-core.c b/drivers/gpio/gpiolib-acpi-core.c
index 284e762d92c4..d441c1236d8c 100644
--- a/drivers/gpio/gpiolib-acpi-core.c
+++ b/drivers/gpio/gpiolib-acpi-core.c
@@ -291,6 +291,19 @@ acpi_gpio_to_gpiod_flags(const struct acpi_resource_gpio *agpio, int polarity)
return GPIOD_ASIS;
}
+static void acpi_gpio_set_debounce_timeout(struct gpio_desc *desc,
+ unsigned int acpi_debounce)
+{
+ int ret;
+
+ /* ACPI uses hundredths of milliseconds units */
+ acpi_debounce *= 10;
+ ret = gpio_set_debounce_timeout(desc, acpi_debounce);
+ if (ret)
+ gpiod_warn(desc, "Failed to set debounce-timeout %u: %d\n",
+ acpi_debounce, ret);
+}
+
static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip,
struct acpi_resource_gpio *agpio,
unsigned int index,
@@ -300,18 +313,12 @@ static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip,
enum gpiod_flags flags = acpi_gpio_to_gpiod_flags(agpio, polarity);
unsigned int pin = agpio->pin_table[index];
struct gpio_desc *desc;
- int ret;
desc = gpiochip_request_own_desc(chip, pin, label, polarity, flags);
if (IS_ERR(desc))
return desc;
- /* ACPI uses hundredths of milliseconds units */
- ret = gpio_set_debounce_timeout(desc, agpio->debounce_timeout * 10);
- if (ret)
- dev_warn(chip->parent,
- "Failed to set debounce-timeout for pin 0x%04X, err %d\n",
- pin, ret);
+ acpi_gpio_set_debounce_timeout(desc, agpio->debounce_timeout);
return desc;
}
@@ -375,8 +382,8 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
desc = acpi_request_own_gpiod(chip, agpio, 0, "ACPI:Event");
if (IS_ERR(desc)) {
dev_err(chip->parent,
- "Failed to request GPIO for pin 0x%04X, err %ld\n",
- pin, PTR_ERR(desc));
+ "Failed to request GPIO for pin 0x%04X, err %pe\n",
+ pin, desc);
return AE_OK;
}
@@ -944,7 +951,6 @@ struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode,
bool can_fallback = acpi_can_fallback_to_crs(adev, con_id);
struct acpi_gpio_info info = {};
struct gpio_desc *desc;
- int ret;
desc = __acpi_find_gpio(fwnode, con_id, idx, can_fallback, &info);
if (IS_ERR(desc))
@@ -959,10 +965,7 @@ struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode,
acpi_gpio_update_gpiod_flags(dflags, &info);
acpi_gpio_update_gpiod_lookup_flags(lookupflags, &info);
- /* ACPI uses hundredths of milliseconds units */
- ret = gpio_set_debounce_timeout(desc, info.debounce * 10);
- if (ret)
- return ERR_PTR(ret);
+ acpi_gpio_set_debounce_timeout(desc, info.debounce);
return desc;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 6597475e245d..bfa3199591b6 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -551,13 +551,13 @@ static void schedule_dc_vmin_vmax(struct amdgpu_device *adev,
struct dc_stream_state *stream,
struct dc_crtc_timing_adjust *adjust)
{
- struct vupdate_offload_work *offload_work = kzalloc(sizeof(*offload_work), GFP_KERNEL);
+ struct vupdate_offload_work *offload_work = kzalloc(sizeof(*offload_work), GFP_NOWAIT);
if (!offload_work) {
drm_dbg_driver(adev_to_drm(adev), "Failed to allocate vupdate_offload_work\n");
return;
}
- struct dc_crtc_timing_adjust *adjust_copy = kzalloc(sizeof(*adjust_copy), GFP_KERNEL);
+ struct dc_crtc_timing_adjust *adjust_copy = kzalloc(sizeof(*adjust_copy), GFP_NOWAIT);
if (!adjust_copy) {
drm_dbg_driver(adev_to_drm(adev), "Failed to allocate adjust_copy\n");
kfree(offload_work);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
index 7c276c319086..ce3d0b45fb4c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
@@ -200,6 +200,9 @@ void dcn401_init_hw(struct dc *dc)
*/
struct dc_link *link = dc->links[i];
+ if (link->ep_type != DISPLAY_ENDPOINT_PHY)
+ continue;
+
link->link_enc->funcs->hw_init(link->link_enc);
/* Check for enabled DIG to identify enabled display */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index 41c76ba9ba56..62a39204fe0b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -44,7 +44,13 @@
*/
#define MAX_PIPES 6
#define MAX_PHANTOM_PIPES (MAX_PIPES / 2)
-#define MAX_LINKS (MAX_PIPES * 2 +2)
+
+#define MAX_DPIA 6
+#define MAX_CONNECTOR 6
+#define MAX_VIRTUAL_LINKS 4
+
+#define MAX_LINKS (MAX_DPIA + MAX_CONNECTOR + MAX_VIRTUAL_LINKS)
+
#define MAX_DIG_LINK_ENCODERS 7
#define MAX_DWB_PIPES 1
#define MAX_HPO_DP2_ENCODERS 4
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
index 9e33bf937a69..2676ae9f6fe8 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
@@ -78,6 +78,7 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
struct audio_output audio_output[MAX_PIPES];
struct dc_stream_state *streams_on_link[MAX_PIPES];
int num_streams_on_link = 0;
+ struct dc *dc = (struct dc *)link->dc;
needs_divider_update = (link->dc->link_srv->dp_get_encoding_format(link_setting) !=
link->dc->link_srv->dp_get_encoding_format((const struct dc_link_settings *) &link->cur_link_settings));
@@ -150,7 +151,7 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
if (streams_on_link[i] && streams_on_link[i]->link && streams_on_link[i]->link == link) {
stream_update.stream = streams_on_link[i];
stream_update.dpms_off = &dpms_off;
- dc_update_planes_and_stream(state->clk_mgr->ctx->dc, NULL, 0, streams_on_link[i], &stream_update);
+ dc_update_planes_and_stream(dc, NULL, 0, streams_on_link[i], &stream_update);
}
}
}
diff --git a/drivers/gpu/drm/drm_panic.c b/drivers/gpu/drm/drm_panic.c
index 1d6312fa1429..d4b6ea42db0f 100644
--- a/drivers/gpu/drm/drm_panic.c
+++ b/drivers/gpu/drm/drm_panic.c
@@ -174,6 +174,33 @@ static void drm_panic_write_pixel24(void *vaddr, unsigned int offset, u32 color)
*p = color & 0xff;
}
+/*
+ * Special case if the pixel crosses page boundaries
+ */
+static void drm_panic_write_pixel24_xpage(void *vaddr, struct page *next_page,
+ unsigned int offset, u32 color)
+{
+ u8 *vaddr2;
+ u8 *p = vaddr + offset;
+
+ vaddr2 = kmap_local_page_try_from_panic(next_page);
+
+ *p++ = color & 0xff;
+ color >>= 8;
+
+ if (offset == PAGE_SIZE - 1)
+ p = vaddr2;
+
+ *p++ = color & 0xff;
+ color >>= 8;
+
+ if (offset == PAGE_SIZE - 2)
+ p = vaddr2;
+
+ *p = color & 0xff;
+ kunmap_local(vaddr2);
+}
+
static void drm_panic_write_pixel32(void *vaddr, unsigned int offset, u32 color)
{
u32 *p = vaddr + offset;
@@ -231,7 +258,14 @@ static void drm_panic_blit_page(struct page **pages, unsigned int dpitch,
page = new_page;
vaddr = kmap_local_page_try_from_panic(pages[page]);
}
- if (vaddr)
+ if (!vaddr)
+ continue;
+
+ // Special case for 24bit, as a pixel might cross page boundaries
+ if (cpp == 3 && offset + 3 > PAGE_SIZE)
+ drm_panic_write_pixel24_xpage(vaddr, pages[page + 1],
+ offset, fg32);
+ else
drm_panic_write_pixel(vaddr, offset, fg32, cpp);
}
}
@@ -321,7 +355,15 @@ static void drm_panic_fill_page(struct page **pages, unsigned int dpitch,
page = new_page;
vaddr = kmap_local_page_try_from_panic(pages[page]);
}
- drm_panic_write_pixel(vaddr, offset, color, cpp);
+ if (!vaddr)
+ continue;
+
+ // Special case for 24bit, as a pixel might cross page boundaries
+ if (cpp == 3 && offset + 3 > PAGE_SIZE)
+ drm_panic_write_pixel24_xpage(vaddr, pages[page + 1],
+ offset, color);
+ else
+ drm_panic_write_pixel(vaddr, offset, color, cpp);
}
}
if (vaddr)
@@ -429,6 +471,9 @@ static void drm_panic_logo_rect(struct drm_rect *rect, const struct font_desc *f
static void drm_panic_logo_draw(struct drm_scanout_buffer *sb, struct drm_rect *rect,
const struct font_desc *font, u32 fg_color)
{
+ if (rect->x2 > sb->width || rect->y2 > sb->height)
+ return;
+
if (logo_mono)
drm_panic_blit(sb, rect, logo_mono->data,
DIV_ROUND_UP(drm_rect_width(rect), 8), 1, fg_color);
@@ -477,7 +522,7 @@ static int draw_line_with_wrap(struct drm_scanout_buffer *sb, const struct font_
struct drm_panic_line *line, int yoffset, u32 fg_color)
{
int chars_per_row = sb->width / font->width;
- struct drm_rect r_txt = DRM_RECT_INIT(0, yoffset, sb->width, sb->height);
+ struct drm_rect r_txt = DRM_RECT_INIT(0, yoffset, sb->width, font->height);
struct drm_panic_line line_wrap;
if (line->len > chars_per_row) {
@@ -520,7 +565,7 @@ static void draw_panic_static_kmsg(struct drm_scanout_buffer *sb)
struct drm_panic_line line;
int yoffset;
- if (!font)
+ if (!font || font->width > sb->width)
return;
yoffset = sb->height - font->height - (sb->height % font->height) / 2;
@@ -733,7 +778,10 @@ static int _draw_panic_static_qr_code(struct drm_scanout_buffer *sb)
pr_debug("QR width %d and scale %d\n", qr_width, scale);
r_qr_canvas = DRM_RECT_INIT(0, 0, qr_canvas_width * scale, qr_canvas_width * scale);
- v_margin = (sb->height - drm_rect_height(&r_qr_canvas) - drm_rect_height(&r_msg)) / 5;
+ v_margin = sb->height - drm_rect_height(&r_qr_canvas) - drm_rect_height(&r_msg);
+ if (v_margin < 0)
+ return -ENOSPC;
+ v_margin /= 5;
drm_rect_translate(&r_qr_canvas, (sb->width - r_qr_canvas.x2) / 2, 2 * v_margin);
r_qr = DRM_RECT_INIT(r_qr_canvas.x1 + QR_MARGIN * scale, r_qr_canvas.y1 + QR_MARGIN * scale,
@@ -746,7 +794,7 @@ static int _draw_panic_static_qr_code(struct drm_scanout_buffer *sb)
/* Fill with the background color, and draw text on top */
drm_panic_fill(sb, &r_screen, bg_color);
- if (!drm_rect_overlap(&r_logo, &r_msg) && !drm_rect_overlap(&r_logo, &r_qr))
+ if (!drm_rect_overlap(&r_logo, &r_msg) && !drm_rect_overlap(&r_logo, &r_qr_canvas))
drm_panic_logo_draw(sb, &r_logo, font, fg_color);
draw_txt_rectangle(sb, font, panic_msg, panic_msg_lines, true, &r_msg, fg_color);
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index b817ff44c043..c48384e58ea1 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -2117,6 +2117,7 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
intel_frontbuffer_put(intel_fb->frontbuffer);
+ kfree(intel_fb->panic);
kfree(intel_fb);
}
@@ -2215,16 +2216,22 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
struct intel_display *display = to_intel_display(obj->dev);
struct drm_framebuffer *fb = &intel_fb->base;
u32 max_stride;
- int ret = -EINVAL;
+ int ret;
int i;
+ intel_fb->panic = intel_panic_alloc();
+ if (!intel_fb->panic)
+ return -ENOMEM;
+
/*
* intel_frontbuffer_get() must be done before
* intel_fb_bo_framebuffer_init() to avoid set_tiling vs. addfb race.
*/
intel_fb->frontbuffer = intel_frontbuffer_get(obj);
- if (!intel_fb->frontbuffer)
- return -ENOMEM;
+ if (!intel_fb->frontbuffer) {
+ ret = -ENOMEM;
+ goto err_free_panic;
+ }
ret = intel_fb_bo_framebuffer_init(fb, obj, mode_cmd);
if (ret)
@@ -2323,6 +2330,9 @@ err_bo_framebuffer_fini:
intel_fb_bo_framebuffer_fini(obj);
err_frontbuffer_put:
intel_frontbuffer_put(intel_fb->frontbuffer);
+err_free_panic:
+ kfree(intel_fb->panic);
+
return ret;
}
@@ -2349,20 +2359,11 @@ intel_user_framebuffer_create(struct drm_device *dev,
struct intel_framebuffer *intel_framebuffer_alloc(void)
{
struct intel_framebuffer *intel_fb;
- struct intel_panic *panic;
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb)
return NULL;
- panic = intel_panic_alloc();
- if (!panic) {
- kfree(intel_fb);
- return NULL;
- }
-
- intel_fb->panic = panic;
-
return intel_fb;
}
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
index 6dec4354e378..7870e7dbaa5d 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.c
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -1175,10 +1175,14 @@ panthor_vm_op_ctx_prealloc_vmas(struct panthor_vm_op_ctx *op_ctx)
break;
case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
- /* Partial unmaps might trigger a remap with either a prev or a next VA,
- * but not both.
+ /* Two VMAs can be needed for an unmap, as an unmap can happen
+ * in the middle of a drm_gpuva, requiring a remap with both
+ * prev & next VA. Or an unmap can span more than one drm_gpuva
+ * where the first and last ones are covered partially, requring
+ * a remap for the first with a prev VA and remap for the last
+ * with a next VA.
*/
- vma_count = 1;
+ vma_count = 2;
break;
default:
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 7b613997bb50..727cdf768161 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -361,7 +361,7 @@ static void dw_hdmi_rk3228_setup_hpd(struct dw_hdmi *dw_hdmi, void *data)
regmap_write(hdmi->regmap, RK3228_GRF_SOC_CON2,
FIELD_PREP_WM16(RK3228_HDMI_SDAIN_MSK, 1) |
- FIELD_PREP_WM16(RK3328_HDMI_SCLIN_MSK, 1));
+ FIELD_PREP_WM16(RK3228_HDMI_SCLIN_MSK, 1));
}
static enum drm_connector_status
diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
index 7fdd0a97a628..5edc0cad47e2 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.c
+++ b/drivers/gpu/drm/xe/xe_ggtt.c
@@ -292,6 +292,9 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
ggtt->pt_ops = &xelp_pt_ops;
ggtt->wq = alloc_workqueue("xe-ggtt-wq", 0, WQ_MEM_RECLAIM);
+ if (!ggtt->wq)
+ return -ENOMEM;
+
__xe_ggtt_init_early(ggtt, xe_wopcm_size(xe));
err = drmm_add_action_or_reset(&xe->drm, ggtt_fini_early, ggtt);
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index a1c88f9a6c76..07f96bda638a 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -2022,7 +2022,7 @@ static int op_prepare(struct xe_vm *vm,
case DRM_GPUVA_OP_MAP:
if ((!op->map.immediate && xe_vm_in_fault_mode(vm) &&
!op->map.invalidate_on_bind) ||
- op->map.is_cpu_addr_mirror)
+ (op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR))
break;
err = bind_op_prepare(vm, tile, pt_update_ops, op->map.vma,
@@ -2252,7 +2252,7 @@ static void op_commit(struct xe_vm *vm,
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
if ((!op->map.immediate && xe_vm_in_fault_mode(vm)) ||
- op->map.is_cpu_addr_mirror)
+ (op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR))
break;
bind_op_commit(vm, tile, pt_update_ops, op->map.vma, fence,
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index da2a412f80c0..129e7818565c 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -302,6 +302,11 @@ static int xe_svm_range_set_default_attr(struct xe_vm *vm, u64 range_start, u64
if (!vma)
return -EINVAL;
+ if (!(vma->gpuva.flags & XE_VMA_MADV_AUTORESET)) {
+ drm_dbg(&vm->xe->drm, "Skipping madvise reset for vma.\n");
+ return 0;
+ }
+
if (xe_vma_has_default_mem_attrs(vma))
return 0;
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index f602b874e054..63c65e3d207b 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -616,6 +616,13 @@ static void xe_vma_ops_incr_pt_update_ops(struct xe_vma_ops *vops, u8 tile_mask,
vops->pt_update_ops[i].num_ops += inc_val;
}
+#define XE_VMA_CREATE_MASK ( \
+ XE_VMA_READ_ONLY | \
+ XE_VMA_DUMPABLE | \
+ XE_VMA_SYSTEM_ALLOCATOR | \
+ DRM_GPUVA_SPARSE | \
+ XE_VMA_MADV_AUTORESET)
+
static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma,
u8 tile_mask)
{
@@ -628,8 +635,7 @@ static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma,
op->base.map.gem.offset = vma->gpuva.gem.offset;
op->map.vma = vma;
op->map.immediate = true;
- op->map.dumpable = vma->gpuva.flags & XE_VMA_DUMPABLE;
- op->map.is_null = xe_vma_is_null(vma);
+ op->map.vma_flags = vma->gpuva.flags & XE_VMA_CREATE_MASK;
}
static int xe_vm_ops_add_rebind(struct xe_vma_ops *vops, struct xe_vma *vma,
@@ -932,11 +938,6 @@ static void xe_vma_free(struct xe_vma *vma)
kfree(vma);
}
-#define VMA_CREATE_FLAG_READ_ONLY BIT(0)
-#define VMA_CREATE_FLAG_IS_NULL BIT(1)
-#define VMA_CREATE_FLAG_DUMPABLE BIT(2)
-#define VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR BIT(3)
-
static struct xe_vma *xe_vma_create(struct xe_vm *vm,
struct xe_bo *bo,
u64 bo_offset_or_userptr,
@@ -947,11 +948,8 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
struct xe_vma *vma;
struct xe_tile *tile;
u8 id;
- bool read_only = (flags & VMA_CREATE_FLAG_READ_ONLY);
- bool is_null = (flags & VMA_CREATE_FLAG_IS_NULL);
- bool dumpable = (flags & VMA_CREATE_FLAG_DUMPABLE);
- bool is_cpu_addr_mirror =
- (flags & VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR);
+ bool is_null = (flags & DRM_GPUVA_SPARSE);
+ bool is_cpu_addr_mirror = (flags & XE_VMA_SYSTEM_ALLOCATOR);
xe_assert(vm->xe, start < end);
xe_assert(vm->xe, end < vm->size);
@@ -972,10 +970,6 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
if (!vma)
return ERR_PTR(-ENOMEM);
- if (is_cpu_addr_mirror)
- vma->gpuva.flags |= XE_VMA_SYSTEM_ALLOCATOR;
- if (is_null)
- vma->gpuva.flags |= DRM_GPUVA_SPARSE;
if (bo)
vma->gpuva.gem.obj = &bo->ttm.base;
}
@@ -986,10 +980,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
vma->gpuva.vm = &vm->gpuvm;
vma->gpuva.va.addr = start;
vma->gpuva.va.range = end - start + 1;
- if (read_only)
- vma->gpuva.flags |= XE_VMA_READ_ONLY;
- if (dumpable)
- vma->gpuva.flags |= XE_VMA_DUMPABLE;
+ vma->gpuva.flags = flags;
for_each_tile(tile, vm->xe, id)
vma->tile_mask |= 0x1 << id;
@@ -2272,12 +2263,16 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
if (__op->op == DRM_GPUVA_OP_MAP) {
op->map.immediate =
flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
- op->map.read_only =
- flags & DRM_XE_VM_BIND_FLAG_READONLY;
- op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
- op->map.is_cpu_addr_mirror = flags &
- DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR;
- op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE;
+ if (flags & DRM_XE_VM_BIND_FLAG_READONLY)
+ op->map.vma_flags |= XE_VMA_READ_ONLY;
+ if (flags & DRM_XE_VM_BIND_FLAG_NULL)
+ op->map.vma_flags |= DRM_GPUVA_SPARSE;
+ if (flags & DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR)
+ op->map.vma_flags |= XE_VMA_SYSTEM_ALLOCATOR;
+ if (flags & DRM_XE_VM_BIND_FLAG_DUMPABLE)
+ op->map.vma_flags |= XE_VMA_DUMPABLE;
+ if (flags & DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET)
+ op->map.vma_flags |= XE_VMA_MADV_AUTORESET;
op->map.pat_index = pat_index;
op->map.invalidate_on_bind =
__xe_vm_needs_clear_scratch_pages(vm, flags);
@@ -2590,14 +2585,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
.pat_index = op->map.pat_index,
};
- flags |= op->map.read_only ?
- VMA_CREATE_FLAG_READ_ONLY : 0;
- flags |= op->map.is_null ?
- VMA_CREATE_FLAG_IS_NULL : 0;
- flags |= op->map.dumpable ?
- VMA_CREATE_FLAG_DUMPABLE : 0;
- flags |= op->map.is_cpu_addr_mirror ?
- VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0;
+ flags |= op->map.vma_flags & XE_VMA_CREATE_MASK;
vma = new_vma(vm, &op->base.map, &default_attr,
flags);
@@ -2606,7 +2594,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
op->map.vma = vma;
if (((op->map.immediate || !xe_vm_in_fault_mode(vm)) &&
- !op->map.is_cpu_addr_mirror) ||
+ !(op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR)) ||
op->map.invalidate_on_bind)
xe_vma_ops_incr_pt_update_ops(vops,
op->tile_mask, 1);
@@ -2637,18 +2625,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
op->remap.start = xe_vma_start(old);
op->remap.range = xe_vma_size(old);
- flags |= op->base.remap.unmap->va->flags &
- XE_VMA_READ_ONLY ?
- VMA_CREATE_FLAG_READ_ONLY : 0;
- flags |= op->base.remap.unmap->va->flags &
- DRM_GPUVA_SPARSE ?
- VMA_CREATE_FLAG_IS_NULL : 0;
- flags |= op->base.remap.unmap->va->flags &
- XE_VMA_DUMPABLE ?
- VMA_CREATE_FLAG_DUMPABLE : 0;
- flags |= xe_vma_is_cpu_addr_mirror(old) ?
- VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0;
-
+ flags |= op->base.remap.unmap->va->flags & XE_VMA_CREATE_MASK;
if (op->base.remap.prev) {
vma = new_vma(vm, op->base.remap.prev,
&old->attr, flags);
@@ -3279,7 +3256,8 @@ ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_execute, ERRNO);
DRM_XE_VM_BIND_FLAG_NULL | \
DRM_XE_VM_BIND_FLAG_DUMPABLE | \
DRM_XE_VM_BIND_FLAG_CHECK_PXP | \
- DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR)
+ DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR | \
+ DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET)
#ifdef TEST_VM_OPS_ERROR
#define SUPPORTED_FLAGS (SUPPORTED_FLAGS_STUB | FORCE_OP_ERROR)
@@ -3394,7 +3372,9 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
XE_IOCTL_DBG(xe, (prefetch_region != DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC &&
!(BIT(prefetch_region) & xe->info.mem_region_mask))) ||
XE_IOCTL_DBG(xe, obj &&
- op == DRM_XE_VM_BIND_OP_UNMAP)) {
+ op == DRM_XE_VM_BIND_OP_UNMAP) ||
+ XE_IOCTL_DBG(xe, (flags & DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET) &&
+ (!is_cpu_addr_mirror || op != DRM_XE_VM_BIND_OP_MAP))) {
err = -EINVAL;
goto free_bind_ops;
}
@@ -4212,7 +4192,7 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
struct xe_vma_ops vops;
struct drm_gpuva_ops *ops = NULL;
struct drm_gpuva_op *__op;
- bool is_cpu_addr_mirror = false;
+ unsigned int vma_flags = 0;
bool remap_op = false;
struct xe_vma_mem_attr tmp_attr;
u16 default_pat;
@@ -4242,15 +4222,17 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
vma = gpuva_to_vma(op->base.unmap.va);
XE_WARN_ON(!xe_vma_has_default_mem_attrs(vma));
default_pat = vma->attr.default_pat_index;
+ vma_flags = vma->gpuva.flags;
}
if (__op->op == DRM_GPUVA_OP_REMAP) {
vma = gpuva_to_vma(op->base.remap.unmap->va);
default_pat = vma->attr.default_pat_index;
+ vma_flags = vma->gpuva.flags;
}
if (__op->op == DRM_GPUVA_OP_MAP) {
- op->map.is_cpu_addr_mirror = true;
+ op->map.vma_flags |= vma_flags & XE_VMA_CREATE_MASK;
op->map.pat_index = default_pat;
}
} else {
@@ -4259,11 +4241,7 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
xe_assert(vm->xe, !remap_op);
xe_assert(vm->xe, xe_vma_has_no_bo(vma));
remap_op = true;
-
- if (xe_vma_is_cpu_addr_mirror(vma))
- is_cpu_addr_mirror = true;
- else
- is_cpu_addr_mirror = false;
+ vma_flags = vma->gpuva.flags;
}
if (__op->op == DRM_GPUVA_OP_MAP) {
@@ -4272,10 +4250,10 @@ static int xe_vm_alloc_vma(struct xe_vm *vm,
/*
* In case of madvise ops DRM_GPUVA_OP_MAP is
* always after DRM_GPUVA_OP_REMAP, so ensure
- * we assign op->map.is_cpu_addr_mirror true
- * if REMAP is for xe_vma_is_cpu_addr_mirror vma
+ * to propagate the flags from the vma we're
+ * unmapping.
*/
- op->map.is_cpu_addr_mirror = is_cpu_addr_mirror;
+ op->map.vma_flags |= vma_flags & XE_VMA_CREATE_MASK;
}
}
print_op(vm->xe, __op);
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 413353e1c225..d6e2a0fdd4b3 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -46,6 +46,7 @@ struct xe_vm_pgtable_update_op;
#define XE_VMA_PTE_COMPACT (DRM_GPUVA_USERBITS << 7)
#define XE_VMA_DUMPABLE (DRM_GPUVA_USERBITS << 8)
#define XE_VMA_SYSTEM_ALLOCATOR (DRM_GPUVA_USERBITS << 9)
+#define XE_VMA_MADV_AUTORESET (DRM_GPUVA_USERBITS << 10)
/**
* struct xe_vma_mem_attr - memory attributes associated with vma
@@ -345,17 +346,10 @@ struct xe_vm {
struct xe_vma_op_map {
/** @vma: VMA to map */
struct xe_vma *vma;
+ unsigned int vma_flags;
/** @immediate: Immediate bind */
bool immediate;
/** @read_only: Read only */
- bool read_only;
- /** @is_null: is NULL binding */
- bool is_null;
- /** @is_cpu_addr_mirror: is CPU address mirror binding */
- bool is_cpu_addr_mirror;
- /** @dumpable: whether BO is dumped on GPU hang */
- bool dumpable;
- /** @invalidate: invalidate the VMA before bind */
bool invalidate_on_bind;
/** @pat_index: The pat index to use for this operation. */
u16 pat_index;
diff --git a/drivers/hwmon/cgbc-hwmon.c b/drivers/hwmon/cgbc-hwmon.c
index 772f44d56ccf..3aff4e092132 100644
--- a/drivers/hwmon/cgbc-hwmon.c
+++ b/drivers/hwmon/cgbc-hwmon.c
@@ -107,6 +107,9 @@ static int cgbc_hwmon_probe_sensors(struct device *dev, struct cgbc_hwmon_data *
nb_sensors = data[0];
hwmon->sensors = devm_kzalloc(dev, sizeof(*hwmon->sensors) * nb_sensors, GFP_KERNEL);
+ if (!hwmon->sensors)
+ return -ENOMEM;
+
sensor = hwmon->sensors;
for (i = 0; i < nb_sensors; i++) {
diff --git a/drivers/hwmon/gpd-fan.c b/drivers/hwmon/gpd-fan.c
index 644dc3ca9df7..321794807e8d 100644
--- a/drivers/hwmon/gpd-fan.c
+++ b/drivers/hwmon/gpd-fan.c
@@ -615,14 +615,14 @@ static int gpd_fan_probe(struct platform_device *pdev)
const struct device *hwdev;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (IS_ERR(res))
- return dev_err_probe(dev, PTR_ERR(res),
+ if (!res)
+ return dev_err_probe(dev, -EINVAL,
"Failed to get platform resource\n");
region = devm_request_region(dev, res->start,
resource_size(res), DRIVER_NAME);
- if (IS_ERR(region))
- return dev_err_probe(dev, PTR_ERR(region),
+ if (!region)
+ return dev_err_probe(dev, -EBUSY,
"Failed to request region\n");
hwdev = devm_hwmon_device_register_with_info(dev,
@@ -631,7 +631,7 @@ static int gpd_fan_probe(struct platform_device *pdev)
&gpd_fan_chip_info,
NULL);
if (IS_ERR(hwdev))
- return dev_err_probe(dev, PTR_ERR(region),
+ return dev_err_probe(dev, PTR_ERR(hwdev),
"Failed to register hwmon device\n");
return 0;
diff --git a/drivers/hwmon/pmbus/isl68137.c b/drivers/hwmon/pmbus/isl68137.c
index 52cf62e45a86..6bba9b50c51b 100644
--- a/drivers/hwmon/pmbus/isl68137.c
+++ b/drivers/hwmon/pmbus/isl68137.c
@@ -336,10 +336,9 @@ static int isl68137_probe_from_dt(struct device *dev,
struct isl68137_data *data)
{
const struct device_node *np = dev->of_node;
- struct device_node *child;
int err;
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
if (strcmp(child->name, "channel"))
continue;
diff --git a/drivers/hwmon/pmbus/max34440.c b/drivers/hwmon/pmbus/max34440.c
index 56834d26f8ef..ef981ed97da8 100644
--- a/drivers/hwmon/pmbus/max34440.c
+++ b/drivers/hwmon/pmbus/max34440.c
@@ -336,18 +336,18 @@ static struct pmbus_driver_info max34440_info[] = {
.format[PSC_CURRENT_IN] = direct,
.format[PSC_CURRENT_OUT] = direct,
.format[PSC_TEMPERATURE] = direct,
- .m[PSC_VOLTAGE_IN] = 1,
+ .m[PSC_VOLTAGE_IN] = 125,
.b[PSC_VOLTAGE_IN] = 0,
.R[PSC_VOLTAGE_IN] = 0,
- .m[PSC_VOLTAGE_OUT] = 1,
+ .m[PSC_VOLTAGE_OUT] = 125,
.b[PSC_VOLTAGE_OUT] = 0,
.R[PSC_VOLTAGE_OUT] = 0,
- .m[PSC_CURRENT_IN] = 1,
+ .m[PSC_CURRENT_IN] = 250,
.b[PSC_CURRENT_IN] = 0,
- .R[PSC_CURRENT_IN] = 2,
- .m[PSC_CURRENT_OUT] = 1,
+ .R[PSC_CURRENT_IN] = -1,
+ .m[PSC_CURRENT_OUT] = 250,
.b[PSC_CURRENT_OUT] = 0,
- .R[PSC_CURRENT_OUT] = 2,
+ .R[PSC_CURRENT_OUT] = -1,
.m[PSC_TEMPERATURE] = 1,
.b[PSC_TEMPERATURE] = 0,
.R[PSC_TEMPERATURE] = 2,
diff --git a/drivers/hwmon/sht3x.c b/drivers/hwmon/sht3x.c
index 557ad3e7752a..f36c0229328f 100644
--- a/drivers/hwmon/sht3x.c
+++ b/drivers/hwmon/sht3x.c
@@ -291,24 +291,26 @@ out:
return data;
}
-static int temp1_input_read(struct device *dev)
+static int temp1_input_read(struct device *dev, long *temp)
{
struct sht3x_data *data = sht3x_update_client(dev);
if (IS_ERR(data))
return PTR_ERR(data);
- return data->temperature;
+ *temp = data->temperature;
+ return 0;
}
-static int humidity1_input_read(struct device *dev)
+static int humidity1_input_read(struct device *dev, long *humidity)
{
struct sht3x_data *data = sht3x_update_client(dev);
if (IS_ERR(data))
return PTR_ERR(data);
- return data->humidity;
+ *humidity = data->humidity;
+ return 0;
}
/*
@@ -706,6 +708,7 @@ static int sht3x_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val)
{
enum sht3x_limits index;
+ int ret;
switch (type) {
case hwmon_chip:
@@ -720,10 +723,12 @@ static int sht3x_read(struct device *dev, enum hwmon_sensor_types type,
case hwmon_temp:
switch (attr) {
case hwmon_temp_input:
- *val = temp1_input_read(dev);
- break;
+ return temp1_input_read(dev, val);
case hwmon_temp_alarm:
- *val = temp1_alarm_read(dev);
+ ret = temp1_alarm_read(dev);
+ if (ret < 0)
+ return ret;
+ *val = ret;
break;
case hwmon_temp_max:
index = limit_max;
@@ -748,10 +753,12 @@ static int sht3x_read(struct device *dev, enum hwmon_sensor_types type,
case hwmon_humidity:
switch (attr) {
case hwmon_humidity_input:
- *val = humidity1_input_read(dev);
- break;
+ return humidity1_input_read(dev, val);
case hwmon_humidity_alarm:
- *val = humidity1_alarm_read(dev);
+ ret = humidity1_alarm_read(dev);
+ if (ret < 0)
+ return ret;
+ *val = ret;
break;
case hwmon_humidity_max:
index = limit_max;
diff --git a/drivers/misc/amd-sbi/Kconfig b/drivers/misc/amd-sbi/Kconfig
index 4aae0733d0fc..ab594908cb4a 100644
--- a/drivers/misc/amd-sbi/Kconfig
+++ b/drivers/misc/amd-sbi/Kconfig
@@ -2,9 +2,11 @@
config AMD_SBRMI_I2C
tristate "AMD side band RMI support"
depends on I2C
+ depends on ARM || ARM64 || COMPILE_TEST
select REGMAP_I2C
help
Side band RMI over I2C support for AMD out of band management.
+ This driver is intended to run on the BMC, not the managed node.
This driver can also be built as a module. If so, the module will
be called sbrmi-i2c.
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 621bce7e101c..ee652ef01534 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -381,6 +381,8 @@ static int fastrpc_map_lookup(struct fastrpc_user *fl, int fd,
}
spin_unlock(&fl->lock);
+ dma_buf_put(buf);
+
return ret;
}
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index bc40b940ae21..a4f75dc36929 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -120,6 +120,8 @@
#define MEI_DEV_ID_PTL_H 0xE370 /* Panther Lake H */
#define MEI_DEV_ID_PTL_P 0xE470 /* Panther Lake P */
+#define MEI_DEV_ID_WCL_P 0x4D70 /* Wildcat Lake P */
+
/*
* MEI HW Section
*/
diff --git a/drivers/misc/mei/mei_lb.c b/drivers/misc/mei/mei_lb.c
index 77686b108d3c..78717ee8ac9a 100644
--- a/drivers/misc/mei/mei_lb.c
+++ b/drivers/misc/mei/mei_lb.c
@@ -134,8 +134,7 @@ static bool mei_lb_check_response(const struct device *dev, ssize_t bytes,
return true;
}
-static int mei_lb_push_payload(struct device *dev,
- enum intel_lb_type type, u32 flags,
+static int mei_lb_push_payload(struct device *dev, u32 type, u32 flags,
const void *payload, size_t payload_size)
{
struct mei_cl_device *cldev;
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index b108a7c22388..b017ff29dbd1 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -127,6 +127,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_PTL_H, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_PTL_P, MEI_ME_PCH15_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_WCL_P, MEI_ME_PCH15_CFG)},
+
/* required last entry */
{0, }
};
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index c9eb5c5393e4..06b55a891c6b 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -109,19 +109,19 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto end;
}
+ err = mei_register(dev, &pdev->dev);
+ if (err)
+ goto release_irq;
+
if (mei_start(dev)) {
dev_err(&pdev->dev, "init hw failure.\n");
err = -ENODEV;
- goto release_irq;
+ goto deregister;
}
pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev);
- err = mei_register(dev, &pdev->dev);
- if (err)
- goto stop;
-
pci_set_drvdata(pdev, dev);
/*
@@ -144,8 +144,8 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
-stop:
- mei_stop(dev);
+deregister:
+ mei_deregister(dev);
release_irq:
mei_cancel_work(dev);
mei_disable_interrupts(dev);
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 6df51ee8db62..cc1d18b3df5c 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -1737,7 +1737,7 @@ static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
{
unsigned long status, flags;
struct vmballoon *b;
- int ret;
+ int ret = 0;
b = container_of(b_dev_info, struct vmballoon, b_dev_info);
@@ -1796,17 +1796,15 @@ static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
* A failure happened. While we can deflate the page we just
* inflated, this deflation can also encounter an error. Instead
* we will decrease the size of the balloon to reflect the
- * change and report failure.
+ * change.
*/
atomic64_dec(&b->size);
- ret = -EBUSY;
} else {
/*
* Success. Take a reference for the page, and we will add it to
* the list after acquiring the lock.
*/
get_page(newpage);
- ret = 0;
}
/* Update the balloon list under the @pages_lock */
@@ -1817,7 +1815,7 @@ static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
* If we succeed just insert it to the list and update the statistics
* under the lock.
*/
- if (!ret) {
+ if (status == VMW_BALLOON_SUCCESS) {
balloon_page_insert(&b->b_dev_info, newpage);
__count_vm_event(BALLOON_MIGRATE);
}
diff --git a/drivers/most/most_usb.c b/drivers/most/most_usb.c
index cf5be9c449a5..10064d7b7249 100644
--- a/drivers/most/most_usb.c
+++ b/drivers/most/most_usb.c
@@ -929,6 +929,10 @@ static void release_mdev(struct device *dev)
{
struct most_dev *mdev = to_mdev_from_dev(dev);
+ kfree(mdev->busy_urbs);
+ kfree(mdev->cap);
+ kfree(mdev->conf);
+ kfree(mdev->ep_address);
kfree(mdev);
}
/**
@@ -1093,7 +1097,7 @@ err_free_cap:
err_free_conf:
kfree(mdev->conf);
err_free_mdev:
- put_device(&mdev->dev);
+ kfree(mdev);
return ret;
}
@@ -1121,13 +1125,6 @@ static void hdm_disconnect(struct usb_interface *interface)
if (mdev->dci)
device_unregister(&mdev->dci->dev);
most_deregister_interface(&mdev->iface);
-
- kfree(mdev->busy_urbs);
- kfree(mdev->cap);
- kfree(mdev->conf);
- kfree(mdev->ep_address);
- put_device(&mdev->dci->dev);
- put_device(&mdev->dev);
}
static int hdm_suspend(struct usb_interface *interface, pm_message_t message)
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 4da619210c1f..e95e593cd12d 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2287,7 +2287,9 @@ skip_mac_set:
unblock_netpoll_tx();
}
- if (bond_mode_can_use_xmit_hash(bond))
+ /* broadcast mode uses the all_slaves to loop through slaves. */
+ if (bond_mode_can_use_xmit_hash(bond) ||
+ BOND_MODE(bond) == BOND_MODE_BROADCAST)
bond_update_slave_arr(bond, NULL);
if (!slave_dev->netdev_ops->ndo_bpf ||
@@ -2463,7 +2465,8 @@ static int __bond_release_one(struct net_device *bond_dev,
bond_upper_dev_unlink(bond, slave);
- if (bond_mode_can_use_xmit_hash(bond))
+ if (bond_mode_can_use_xmit_hash(bond) ||
+ BOND_MODE(bond) == BOND_MODE_BROADCAST)
bond_update_slave_arr(bond, slave);
slave_info(bond_dev, slave_dev, "Releasing %s interface\n",
@@ -2871,7 +2874,7 @@ static void bond_mii_monitor(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
mii_work.work);
- bool should_notify_peers = false;
+ bool should_notify_peers;
bool commit;
unsigned long delay;
struct slave *slave;
@@ -2883,30 +2886,33 @@ static void bond_mii_monitor(struct work_struct *work)
goto re_arm;
rcu_read_lock();
+
should_notify_peers = bond_should_notify_peers(bond);
commit = !!bond_miimon_inspect(bond);
- if (bond->send_peer_notif) {
- rcu_read_unlock();
- if (rtnl_trylock()) {
- bond->send_peer_notif--;
- rtnl_unlock();
- }
- } else {
- rcu_read_unlock();
- }
- if (commit) {
+ rcu_read_unlock();
+
+ if (commit || bond->send_peer_notif) {
/* Race avoidance with bond_close cancel of workqueue */
if (!rtnl_trylock()) {
delay = 1;
- should_notify_peers = false;
goto re_arm;
}
- bond_for_each_slave(bond, slave, iter) {
- bond_commit_link_state(slave, BOND_SLAVE_NOTIFY_LATER);
+ if (commit) {
+ bond_for_each_slave(bond, slave, iter) {
+ bond_commit_link_state(slave,
+ BOND_SLAVE_NOTIFY_LATER);
+ }
+ bond_miimon_commit(bond);
+ }
+
+ if (bond->send_peer_notif) {
+ bond->send_peer_notif--;
+ if (should_notify_peers)
+ call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
+ bond->dev);
}
- bond_miimon_commit(bond);
rtnl_unlock(); /* might sleep, hold no other locks */
}
@@ -2914,13 +2920,6 @@ static void bond_mii_monitor(struct work_struct *work)
re_arm:
if (bond->params.miimon)
queue_delayed_work(bond->wq, &bond->mii_work, delay);
-
- if (should_notify_peers) {
- if (!rtnl_trylock())
- return;
- call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
- rtnl_unlock();
- }
}
static int bond_upper_dev_walk(struct net_device *upper,
diff --git a/drivers/net/can/bxcan.c b/drivers/net/can/bxcan.c
index bfc60eb33dc3..333ad42ea73b 100644
--- a/drivers/net/can/bxcan.c
+++ b/drivers/net/can/bxcan.c
@@ -842,7 +842,7 @@ static netdev_tx_t bxcan_start_xmit(struct sk_buff *skb,
u32 id;
int i, j;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
if (bxcan_tx_busy(priv))
diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
index 0591406b6f32..6f83b87d54fc 100644
--- a/drivers/net/can/dev/netlink.c
+++ b/drivers/net/can/dev/netlink.c
@@ -452,7 +452,9 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
}
if (data[IFLA_CAN_RESTART_MS]) {
- if (!priv->do_set_mode) {
+ unsigned int restart_ms = nla_get_u32(data[IFLA_CAN_RESTART_MS]);
+
+ if (restart_ms != 0 && !priv->do_set_mode) {
NL_SET_ERR_MSG(extack,
"Device doesn't support restart from Bus Off");
return -EOPNOTSUPP;
@@ -461,7 +463,7 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
/* Do not allow changing restart delay while running */
if (dev->flags & IFF_UP)
return -EBUSY;
- priv->restart_ms = nla_get_u32(data[IFLA_CAN_RESTART_MS]);
+ priv->restart_ms = restart_ms;
}
if (data[IFLA_CAN_RESTART]) {
diff --git a/drivers/net/can/esd/esdacc.c b/drivers/net/can/esd/esdacc.c
index c80032bc1a52..73e66f9a3781 100644
--- a/drivers/net/can/esd/esdacc.c
+++ b/drivers/net/can/esd/esdacc.c
@@ -254,7 +254,7 @@ netdev_tx_t acc_start_xmit(struct sk_buff *skb, struct net_device *netdev)
u32 acc_id;
u32 acc_dlc;
- if (can_dropped_invalid_skb(netdev, skb))
+ if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
/* Access core->tx_fifo_tail only once because it may be changed
diff --git a/drivers/net/can/rockchip/rockchip_canfd-tx.c b/drivers/net/can/rockchip/rockchip_canfd-tx.c
index 865a15e033a9..12200dcfd338 100644
--- a/drivers/net/can/rockchip/rockchip_canfd-tx.c
+++ b/drivers/net/can/rockchip/rockchip_canfd-tx.c
@@ -72,7 +72,7 @@ netdev_tx_t rkcanfd_start_xmit(struct sk_buff *skb, struct net_device *ndev)
int err;
u8 i;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
if (!netif_subqueue_maybe_stop(priv->ndev, 0,
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 7077d705e471..6e4f17142519 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -733,7 +733,7 @@ start_xmit (struct sk_buff *skb, struct net_device *dev)
u64 tfc_vlan_tag = 0;
if (np->link_status == 0) { /* Link Down */
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
entry = np->cur_tx % TX_RING_SIZE;
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index c96d1d6ba8fe..18d86badd6ea 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -1077,8 +1077,7 @@ static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
dma_addr_t addr;
buffer_start = skb->data - dpaa2_eth_needed_headroom(skb);
- aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
- DPAA2_ETH_TX_BUF_ALIGN);
+ aligned_start = PTR_ALIGN(buffer_start, DPAA2_ETH_TX_BUF_ALIGN);
if (aligned_start >= skb->head)
buffer_start = aligned_start;
else
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index aae462a0cf5a..0535e92404e3 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -1595,6 +1595,8 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
/* next descriptor to process */
i = rx_ring->next_to_clean;
+ enetc_lock_mdio();
+
while (likely(rx_frm_cnt < work_limit)) {
union enetc_rx_bd *rxbd;
struct sk_buff *skb;
@@ -1630,7 +1632,9 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
rx_byte_cnt += skb->len + ETH_HLEN;
rx_frm_cnt++;
+ enetc_unlock_mdio();
napi_gro_receive(napi, skb);
+ enetc_lock_mdio();
}
rx_ring->next_to_clean = i;
@@ -1638,6 +1642,8 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
rx_ring->stats.packets += rx_frm_cnt;
rx_ring->stats.bytes += rx_byte_cnt;
+ enetc_unlock_mdio();
+
return rx_frm_cnt;
}
@@ -1947,6 +1953,8 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
/* next descriptor to process */
i = rx_ring->next_to_clean;
+ enetc_lock_mdio();
+
while (likely(rx_frm_cnt < work_limit)) {
union enetc_rx_bd *rxbd, *orig_rxbd;
struct xdp_buff xdp_buff;
@@ -2010,7 +2018,9 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
*/
enetc_bulk_flip_buff(rx_ring, orig_i, i);
+ enetc_unlock_mdio();
napi_gro_receive(napi, skb);
+ enetc_lock_mdio();
break;
case XDP_TX:
tx_ring = priv->xdp_tx_ring[rx_ring->index];
@@ -2045,7 +2055,9 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
}
break;
case XDP_REDIRECT:
+ enetc_unlock_mdio();
err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog);
+ enetc_lock_mdio();
if (unlikely(err)) {
enetc_xdp_drop(rx_ring, orig_i, i);
rx_ring->stats.xdp_redirect_failures++;
@@ -2065,8 +2077,11 @@ out:
rx_ring->stats.packets += rx_frm_cnt;
rx_ring->stats.bytes += rx_byte_cnt;
- if (xdp_redirect_frm_cnt)
+ if (xdp_redirect_frm_cnt) {
+ enetc_unlock_mdio();
xdp_do_flush();
+ enetc_lock_mdio();
+ }
if (xdp_tx_frm_cnt)
enetc_update_tx_ring_tail(tx_ring);
@@ -2075,6 +2090,8 @@ out:
enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring) -
rx_ring->xdp.xdp_tx_in_flight);
+ enetc_unlock_mdio();
+
return rx_frm_cnt;
}
@@ -2093,6 +2110,7 @@ static int enetc_poll(struct napi_struct *napi, int budget)
for (i = 0; i < v->count_tx_rings; i++)
if (!enetc_clean_tx_ring(&v->tx_ring[i], budget))
complete = false;
+ enetc_unlock_mdio();
prog = rx_ring->xdp.prog;
if (prog)
@@ -2104,10 +2122,8 @@ static int enetc_poll(struct napi_struct *napi, int budget)
if (work_done)
v->rx_napi_work = true;
- if (!complete) {
- enetc_unlock_mdio();
+ if (!complete)
return budget;
- }
napi_complete_done(napi, work_done);
@@ -2116,6 +2132,7 @@ static int enetc_poll(struct napi_struct *napi, int budget)
v->rx_napi_work = false;
+ enetc_lock_mdio();
/* enable interrupts */
enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index 0ec010a7d640..f279fa597991 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -76,7 +76,7 @@ struct enetc_lso_t {
#define ENETC_LSO_MAX_DATA_LEN SZ_256K
#define ENETC_RX_MAXFRM_SIZE ENETC_MAC_MAXFRM_SIZE
-#define ENETC_RXB_TRUESIZE 2048 /* PAGE_SIZE >> 1 */
+#define ENETC_RXB_TRUESIZE (PAGE_SIZE >> 1)
#define ENETC_RXB_PAD NET_SKB_PAD /* add extra space if needed */
#define ENETC_RXB_DMA_SIZE \
(SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - ENETC_RXB_PAD)
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index 65302c41bfb1..38875c196cb6 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -148,6 +148,7 @@ config HIBMCGE
tristate "Hisilicon BMC Gigabit Ethernet Device Support"
depends on PCI && PCI_MSI
select PHYLIB
+ select FIXED_PHY
select MOTORCOMM_PHY
select REALTEK_PHY
help
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 3692298e10f2..c9bdee9a8b30 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -100,7 +100,7 @@ u8 mlx5e_mpwrq_umr_entry_size(enum mlx5e_mpwrq_umr_mode mode)
return sizeof(struct mlx5_ksm) * 4;
}
WARN_ONCE(1, "MPWRQ UMR mode %d is not known\n", mode);
- return 0;
+ return 1;
}
u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 5d7c15abfcaf..f8eaaf37963b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -342,6 +342,7 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv,
struct mlx5e_priv *master_priv);
void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event);
+void mlx5e_ipsec_disable_events(struct mlx5e_priv *priv);
static inline struct mlx5_core_dev *
mlx5e_ipsec_sa2dev(struct mlx5e_ipsec_sa_entry *sa_entry)
@@ -387,6 +388,10 @@ static inline void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *sl
static inline void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event)
{
}
+
+static inline void mlx5e_ipsec_disable_events(struct mlx5e_priv *priv)
+{
+}
#endif
#endif /* __MLX5E_IPSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index bf1d2769d4f1..feef86fff4bf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -2893,9 +2893,30 @@ void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv,
void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event)
{
- if (!priv->ipsec)
- return; /* IPsec not supported */
+ if (!priv->ipsec || mlx5_devcom_comp_get_size(priv->devcom) < 2)
+ return; /* IPsec not supported or no peers */
mlx5_devcom_send_event(priv->devcom, event, event, priv);
wait_for_completion(&priv->ipsec->comp);
}
+
+void mlx5e_ipsec_disable_events(struct mlx5e_priv *priv)
+{
+ struct mlx5_devcom_comp_dev *tmp = NULL;
+ struct mlx5e_priv *peer_priv;
+
+ if (!priv->devcom)
+ return;
+
+ if (!mlx5_devcom_for_each_peer_begin(priv->devcom))
+ goto out;
+
+ peer_priv = mlx5_devcom_get_next_peer_data(priv->devcom, &tmp);
+ if (peer_priv)
+ complete_all(&peer_priv->ipsec->comp);
+
+ mlx5_devcom_for_each_peer_end(priv->devcom);
+out:
+ mlx5_devcom_unregister_component(priv->devcom);
+ priv->devcom = NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index a56825921c23..9c46511e7b43 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -242,8 +242,8 @@ static int mlx5e_devcom_init_mpv(struct mlx5e_priv *priv, u64 *data)
&attr,
mlx5e_devcom_event_mpv,
priv);
- if (IS_ERR(priv->devcom))
- return PTR_ERR(priv->devcom);
+ if (!priv->devcom)
+ return -EINVAL;
if (mlx5_core_is_mp_master(priv->mdev)) {
mlx5_devcom_send_event(priv->devcom, MPV_DEVCOM_MASTER_UP,
@@ -256,7 +256,7 @@ static int mlx5e_devcom_init_mpv(struct mlx5e_priv *priv, u64 *data)
static void mlx5e_devcom_cleanup_mpv(struct mlx5e_priv *priv)
{
- if (IS_ERR_OR_NULL(priv->devcom))
+ if (!priv->devcom)
return;
if (mlx5_core_is_mp_master(priv->mdev)) {
@@ -266,6 +266,7 @@ static void mlx5e_devcom_cleanup_mpv(struct mlx5e_priv *priv)
}
mlx5_devcom_unregister_component(priv->devcom);
+ priv->devcom = NULL;
}
static int blocking_event(struct notifier_block *nb, unsigned long event, void *data)
@@ -6120,6 +6121,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
if (mlx5e_monitor_counter_supported(priv))
mlx5e_monitor_counter_cleanup(priv);
+ mlx5e_ipsec_disable_events(priv);
mlx5e_disable_blocking_events(priv);
mlx5e_disable_async_events(priv);
mlx5_lag_remove_netdev(mdev, priv->netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 263d5628ee44..1c79adc51a04 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1794,14 +1794,27 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
}
prog = rcu_dereference(rq->xdp_prog);
- if (prog && mlx5e_xdp_handle(rq, prog, mxbuf)) {
- if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
- struct mlx5e_wqe_frag_info *pwi;
+ if (prog) {
+ u8 nr_frags_free, old_nr_frags = sinfo->nr_frags;
+
+ if (mlx5e_xdp_handle(rq, prog, mxbuf)) {
+ if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT,
+ rq->flags)) {
+ struct mlx5e_wqe_frag_info *pwi;
+
+ wi -= old_nr_frags - sinfo->nr_frags;
+
+ for (pwi = head_wi; pwi < wi; pwi++)
+ pwi->frag_page->frags++;
+ }
+ return NULL; /* page/packet was consumed by XDP */
+ }
- for (pwi = head_wi; pwi < wi; pwi++)
- pwi->frag_page->frags++;
+ nr_frags_free = old_nr_frags - sinfo->nr_frags;
+ if (unlikely(nr_frags_free)) {
+ wi -= nr_frags_free;
+ truesize -= nr_frags_free * frag_info->frag_stride;
}
- return NULL; /* page/packet was consumed by XDP */
}
skb = mlx5e_build_linear_skb(
@@ -2027,6 +2040,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
u32 byte_cnt = cqe_bcnt;
struct skb_shared_info *sinfo;
unsigned int truesize = 0;
+ u32 pg_consumed_bytes;
struct bpf_prog *prog;
struct sk_buff *skb;
u32 linear_frame_sz;
@@ -2080,7 +2094,8 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
while (byte_cnt) {
/* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
- u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
+ pg_consumed_bytes =
+ min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
truesize += pg_consumed_bytes;
@@ -2096,10 +2111,15 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
}
if (prog) {
+ u8 nr_frags_free, old_nr_frags = sinfo->nr_frags;
+ u32 len;
+
if (mlx5e_xdp_handle(rq, prog, mxbuf)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
struct mlx5e_frag_page *pfp;
+ frag_page -= old_nr_frags - sinfo->nr_frags;
+
for (pfp = head_page; pfp < frag_page; pfp++)
pfp->frags++;
@@ -2110,9 +2130,19 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
return NULL; /* page/packet was consumed by XDP */
}
+ nr_frags_free = old_nr_frags - sinfo->nr_frags;
+ if (unlikely(nr_frags_free)) {
+ frag_page -= nr_frags_free;
+ truesize -= (nr_frags_free - 1) * PAGE_SIZE +
+ ALIGN(pg_consumed_bytes,
+ BIT(rq->mpwqe.log_stride_sz));
+ }
+
+ len = mxbuf->xdp.data_end - mxbuf->xdp.data;
+
skb = mlx5e_build_linear_skb(
rq, mxbuf->xdp.data_hard_start, linear_frame_sz,
- mxbuf->xdp.data - mxbuf->xdp.data_hard_start, 0,
+ mxbuf->xdp.data - mxbuf->xdp.data_hard_start, len,
mxbuf->xdp.data - mxbuf->xdp.data_meta);
if (unlikely(!skb)) {
mlx5e_page_release_fragmented(rq->page_pool,
@@ -2137,8 +2167,11 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
do
pagep->frags++;
while (++pagep < frag_page);
+
+ headlen = min_t(u16, MLX5E_RX_MAX_HEAD - len,
+ skb->data_len);
+ __pskb_pull_tail(skb, headlen);
}
- __pskb_pull_tail(skb, headlen);
} else {
dma_addr_t addr;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 7c029a7d0fd7..a2802cfc9b98 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -1614,7 +1614,9 @@ void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
fec_set_corrected_bits_total(priv, fec_stats);
fec_set_block_stats(priv, mode, fec_stats);
- fec_set_histograms_stats(priv, mode, hist);
+
+ if (MLX5_CAP_PCAM_REG(priv->mdev, pphcr))
+ fec_set_histograms_stats(priv, mode, hist);
}
#define PPORT_ETH_EXT_OFF(c) \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index b7227afcb51d..2702b3885f06 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -256,7 +256,7 @@ mlx5e_tx_wqe_inline_mode(struct mlx5e_txqsq *sq, struct sk_buff *skb,
u8 mode;
#ifdef CONFIG_MLX5_EN_TLS
- if (accel && accel->tls.tls_tisn)
+ if (accel->tls.tls_tisn)
return MLX5_INLINE_MODE_TCP_UDP;
#endif
@@ -982,6 +982,7 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_attr attr;
struct mlx5i_tx_wqe *wqe;
+ struct mlx5e_accel_tx_state accel = {};
struct mlx5_wqe_datagram_seg *datagram;
struct mlx5_wqe_ctrl_seg *cseg;
struct mlx5_wqe_eth_seg *eseg;
@@ -992,7 +993,7 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
int num_dma;
u16 pi;
- mlx5e_sq_xmit_prepare(sq, skb, NULL, &attr);
+ mlx5e_sq_xmit_prepare(sq, skb, &accel, &attr);
mlx5i_sq_calc_wqe_attr(skb, &attr, &wqe_attr);
pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs);
@@ -1009,7 +1010,7 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram);
- mlx5e_txwqe_build_eseg_csum(sq, skb, NULL, eseg);
+ mlx5e_txwqe_build_eseg_csum(sq, skb, &accel, eseg);
eseg->mss = attr.mss;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 4cf995be127d..34749814f19b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -3129,7 +3129,7 @@ void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw,
attr,
mlx5_esw_offloads_devcom_event,
esw);
- if (IS_ERR(esw->devcom))
+ if (!esw->devcom)
return;
mlx5_devcom_send_event(esw->devcom,
@@ -3140,7 +3140,7 @@ void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw,
void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
{
- if (IS_ERR_OR_NULL(esw->devcom))
+ if (!esw->devcom)
return;
mlx5_devcom_send_event(esw->devcom,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index 59c00c911275..3db0387bf6dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -1430,11 +1430,10 @@ static int mlx5_lag_register_hca_devcom_comp(struct mlx5_core_dev *dev)
mlx5_devcom_register_component(dev->priv.devc,
MLX5_DEVCOM_HCA_PORTS,
&attr, NULL, dev);
- if (IS_ERR(dev->priv.hca_devcom_comp)) {
+ if (!dev->priv.hca_devcom_comp) {
mlx5_core_err(dev,
- "Failed to register devcom HCA component, err: %ld\n",
- PTR_ERR(dev->priv.hca_devcom_comp));
- return PTR_ERR(dev->priv.hca_devcom_comp);
+ "Failed to register devcom HCA component.");
+ return -EINVAL;
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index d0ba83d77cd1..29e7fa09c32c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -1444,7 +1444,7 @@ static void mlx5_shared_clock_register(struct mlx5_core_dev *mdev, u64 key)
compd = mlx5_devcom_register_component(mdev->priv.devc,
MLX5_DEVCOM_SHARED_CLOCK,
&attr, NULL, mdev);
- if (IS_ERR(compd))
+ if (!compd)
return;
mdev->clock_state->compdev = compd;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
index faa2833602c8..e749618229bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
@@ -76,20 +76,18 @@ mlx5_devcom_dev_alloc(struct mlx5_core_dev *dev)
struct mlx5_devcom_dev *
mlx5_devcom_register_device(struct mlx5_core_dev *dev)
{
- struct mlx5_devcom_dev *devc;
+ struct mlx5_devcom_dev *devc = NULL;
mutex_lock(&dev_list_lock);
if (devcom_dev_exists(dev)) {
- devc = ERR_PTR(-EEXIST);
+ mlx5_core_err(dev, "devcom device already exists");
goto out;
}
devc = mlx5_devcom_dev_alloc(dev);
- if (!devc) {
- devc = ERR_PTR(-ENOMEM);
+ if (!devc)
goto out;
- }
list_add_tail(&devc->list, &devcom_dev_list);
out:
@@ -110,8 +108,10 @@ mlx5_devcom_dev_release(struct kref *ref)
void mlx5_devcom_unregister_device(struct mlx5_devcom_dev *devc)
{
- if (!IS_ERR_OR_NULL(devc))
- kref_put(&devc->ref, mlx5_devcom_dev_release);
+ if (!devc)
+ return;
+
+ kref_put(&devc->ref, mlx5_devcom_dev_release);
}
static struct mlx5_devcom_comp *
@@ -122,7 +122,7 @@ mlx5_devcom_comp_alloc(u64 id, const struct mlx5_devcom_match_attr *attr,
comp = kzalloc(sizeof(*comp), GFP_KERNEL);
if (!comp)
- return ERR_PTR(-ENOMEM);
+ return NULL;
comp->id = id;
comp->key.key = attr->key;
@@ -160,7 +160,7 @@ devcom_alloc_comp_dev(struct mlx5_devcom_dev *devc,
devcom = kzalloc(sizeof(*devcom), GFP_KERNEL);
if (!devcom)
- return ERR_PTR(-ENOMEM);
+ return NULL;
kref_get(&devc->ref);
devcom->devc = devc;
@@ -240,31 +240,28 @@ mlx5_devcom_register_component(struct mlx5_devcom_dev *devc,
mlx5_devcom_event_handler_t handler,
void *data)
{
- struct mlx5_devcom_comp_dev *devcom;
+ struct mlx5_devcom_comp_dev *devcom = NULL;
struct mlx5_devcom_comp *comp;
- if (IS_ERR_OR_NULL(devc))
- return ERR_PTR(-EINVAL);
+ if (!devc)
+ return NULL;
mutex_lock(&comp_list_lock);
comp = devcom_component_get(devc, id, attr, handler);
- if (IS_ERR(comp)) {
- devcom = ERR_PTR(-EINVAL);
+ if (IS_ERR(comp))
goto out_unlock;
- }
if (!comp) {
comp = mlx5_devcom_comp_alloc(id, attr, handler);
- if (IS_ERR(comp)) {
- devcom = ERR_CAST(comp);
+ if (!comp)
goto out_unlock;
- }
+
list_add_tail(&comp->comp_list, &devcom_comp_list);
}
mutex_unlock(&comp_list_lock);
devcom = devcom_alloc_comp_dev(devc, comp, data);
- if (IS_ERR(devcom))
+ if (!devcom)
kref_put(&comp->ref, mlx5_devcom_comp_release);
return devcom;
@@ -276,8 +273,10 @@ out_unlock:
void mlx5_devcom_unregister_component(struct mlx5_devcom_comp_dev *devcom)
{
- if (!IS_ERR_OR_NULL(devcom))
- devcom_free_comp_dev(devcom);
+ if (!devcom)
+ return;
+
+ devcom_free_comp_dev(devcom);
}
int mlx5_devcom_comp_get_size(struct mlx5_devcom_comp_dev *devcom)
@@ -296,7 +295,7 @@ int mlx5_devcom_send_event(struct mlx5_devcom_comp_dev *devcom,
int err = 0;
void *data;
- if (IS_ERR_OR_NULL(devcom))
+ if (!devcom)
return -ENODEV;
comp = devcom->comp;
@@ -338,7 +337,7 @@ void mlx5_devcom_comp_set_ready(struct mlx5_devcom_comp_dev *devcom, bool ready)
bool mlx5_devcom_comp_is_ready(struct mlx5_devcom_comp_dev *devcom)
{
- if (IS_ERR_OR_NULL(devcom))
+ if (!devcom)
return false;
return READ_ONCE(devcom->comp->ready);
@@ -348,7 +347,7 @@ bool mlx5_devcom_for_each_peer_begin(struct mlx5_devcom_comp_dev *devcom)
{
struct mlx5_devcom_comp *comp;
- if (IS_ERR_OR_NULL(devcom))
+ if (!devcom)
return false;
comp = devcom->comp;
@@ -421,21 +420,21 @@ void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom_comp_dev *devcom,
void mlx5_devcom_comp_lock(struct mlx5_devcom_comp_dev *devcom)
{
- if (IS_ERR_OR_NULL(devcom))
+ if (!devcom)
return;
down_write(&devcom->comp->sem);
}
void mlx5_devcom_comp_unlock(struct mlx5_devcom_comp_dev *devcom)
{
- if (IS_ERR_OR_NULL(devcom))
+ if (!devcom)
return;
up_write(&devcom->comp->sem);
}
int mlx5_devcom_comp_trylock(struct mlx5_devcom_comp_dev *devcom)
{
- if (IS_ERR_OR_NULL(devcom))
+ if (!devcom)
return 0;
return down_write_trylock(&devcom->comp->sem);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
index f5c2701f6e87..8e17daae48af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
@@ -221,8 +221,8 @@ static int sd_register(struct mlx5_core_dev *dev)
attr.net = mlx5_core_net(dev);
devcom = mlx5_devcom_register_component(dev->priv.devc, MLX5_DEVCOM_SD_GROUP,
&attr, NULL, dev);
- if (IS_ERR(devcom))
- return PTR_ERR(devcom);
+ if (!devcom)
+ return -EINVAL;
sd->devcom = devcom;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index df93625c9dfa..70c156591b0b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -978,9 +978,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
int err;
dev->priv.devc = mlx5_devcom_register_device(dev);
- if (IS_ERR(dev->priv.devc))
- mlx5_core_warn(dev, "failed to register devcom device %pe\n",
- dev->priv.devc);
+ if (!dev->priv.devc)
+ mlx5_core_warn(dev, "failed to register devcom device\n");
err = mlx5_query_board_id(dev);
if (err) {
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 9d3bd65b85ff..e2d7ce1a85e8 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -2211,15 +2211,35 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_tx_timestamp(skb);
}
- /* Descriptor type must be set after all the above writes */
- dma_wmb();
+
if (num_tx_desc > 1) {
desc->die_dt = DT_FEND;
desc--;
+ /* When using multi-descriptors, DT_FEND needs to get written
+ * before DT_FSTART, but the compiler may reorder the memory
+ * writes in an attempt to optimize the code.
+ * Use a dma_wmb() barrier to make sure DT_FEND and DT_FSTART
+ * are written exactly in the order shown in the code.
+ * This is particularly important for cases where the DMA engine
+ * is already running when we are running this code. If the DMA
+ * sees DT_FSTART without the corresponding DT_FEND it will enter
+ * an error condition.
+ */
+ dma_wmb();
desc->die_dt = DT_FSTART;
} else {
+ /* Descriptor type must be set after all the above writes */
+ dma_wmb();
desc->die_dt = DT_FSINGLE;
}
+
+ /* Before ringing the doorbell we need to make sure that the latest
+ * writes have been committed to memory, otherwise it could delay
+ * things until the doorbell is rang again.
+ * This is in replacement of the read operation mentioned in the HW
+ * manuals.
+ */
+ dma_wmb();
ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q);
priv->cur_tx[q] += num_tx_desc;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 51ea0caf16c1..0786816e05f0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -1446,14 +1446,15 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
}
} else {
if (bsp_priv->clk_enabled) {
+ if (bsp_priv->ops && bsp_priv->ops->set_clock_selection) {
+ bsp_priv->ops->set_clock_selection(bsp_priv,
+ bsp_priv->clock_input, false);
+ }
+
clk_bulk_disable_unprepare(bsp_priv->num_clks,
bsp_priv->clks);
clk_disable_unprepare(bsp_priv->clk_phy);
- if (bsp_priv->ops && bsp_priv->ops->set_clock_selection)
- bsp_priv->ops->set_clock_selection(bsp_priv,
- bsp_priv->clock_input, false);
-
bsp_priv->clk_enabled = false;
}
}
diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
index 59d6ab989c55..8ffbfaa3ab18 100644
--- a/drivers/net/ethernet/ti/am65-cpts.c
+++ b/drivers/net/ethernet/ti/am65-cpts.c
@@ -163,7 +163,9 @@ struct am65_cpts {
struct device_node *clk_mux_np;
struct clk *refclk;
u32 refclk_freq;
- struct list_head events;
+ /* separate lists to handle TX and RX timestamp independently */
+ struct list_head events_tx;
+ struct list_head events_rx;
struct list_head pool;
struct am65_cpts_event pool_data[AM65_CPTS_MAX_EVENTS];
spinlock_t lock; /* protects events lists*/
@@ -227,6 +229,24 @@ static void am65_cpts_disable(struct am65_cpts *cpts)
am65_cpts_write32(cpts, 0, int_enable);
}
+static int am65_cpts_purge_event_list(struct am65_cpts *cpts,
+ struct list_head *events)
+{
+ struct list_head *this, *next;
+ struct am65_cpts_event *event;
+ int removed = 0;
+
+ list_for_each_safe(this, next, events) {
+ event = list_entry(this, struct am65_cpts_event, list);
+ if (time_after(jiffies, event->tmo)) {
+ list_del_init(&event->list);
+ list_add(&event->list, &cpts->pool);
+ ++removed;
+ }
+ }
+ return removed;
+}
+
static int am65_cpts_event_get_port(struct am65_cpts_event *event)
{
return (event->event1 & AM65_CPTS_EVENT_1_PORT_NUMBER_MASK) >>
@@ -239,20 +259,12 @@ static int am65_cpts_event_get_type(struct am65_cpts_event *event)
AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT;
}
-static int am65_cpts_cpts_purge_events(struct am65_cpts *cpts)
+static int am65_cpts_purge_events(struct am65_cpts *cpts)
{
- struct list_head *this, *next;
- struct am65_cpts_event *event;
int removed = 0;
- list_for_each_safe(this, next, &cpts->events) {
- event = list_entry(this, struct am65_cpts_event, list);
- if (time_after(jiffies, event->tmo)) {
- list_del_init(&event->list);
- list_add(&event->list, &cpts->pool);
- ++removed;
- }
- }
+ removed += am65_cpts_purge_event_list(cpts, &cpts->events_tx);
+ removed += am65_cpts_purge_event_list(cpts, &cpts->events_rx);
if (removed)
dev_dbg(cpts->dev, "event pool cleaned up %d\n", removed);
@@ -287,7 +299,7 @@ static int __am65_cpts_fifo_read(struct am65_cpts *cpts)
struct am65_cpts_event, list);
if (!event) {
- if (am65_cpts_cpts_purge_events(cpts)) {
+ if (am65_cpts_purge_events(cpts)) {
dev_err(cpts->dev, "cpts: event pool empty\n");
ret = -1;
goto out;
@@ -306,11 +318,21 @@ static int __am65_cpts_fifo_read(struct am65_cpts *cpts)
cpts->timestamp);
break;
case AM65_CPTS_EV_RX:
+ event->tmo = jiffies +
+ msecs_to_jiffies(AM65_CPTS_EVENT_RX_TX_TIMEOUT);
+
+ list_move_tail(&event->list, &cpts->events_rx);
+
+ dev_dbg(cpts->dev,
+ "AM65_CPTS_EV_RX e1:%08x e2:%08x t:%lld\n",
+ event->event1, event->event2,
+ event->timestamp);
+ break;
case AM65_CPTS_EV_TX:
event->tmo = jiffies +
msecs_to_jiffies(AM65_CPTS_EVENT_RX_TX_TIMEOUT);
- list_move_tail(&event->list, &cpts->events);
+ list_move_tail(&event->list, &cpts->events_tx);
dev_dbg(cpts->dev,
"AM65_CPTS_EV_TX e1:%08x e2:%08x t:%lld\n",
@@ -828,7 +850,7 @@ static bool am65_cpts_match_tx_ts(struct am65_cpts *cpts,
return found;
}
-static void am65_cpts_find_ts(struct am65_cpts *cpts)
+static void am65_cpts_find_tx_ts(struct am65_cpts *cpts)
{
struct am65_cpts_event *event;
struct list_head *this, *next;
@@ -837,7 +859,7 @@ static void am65_cpts_find_ts(struct am65_cpts *cpts)
LIST_HEAD(events);
spin_lock_irqsave(&cpts->lock, flags);
- list_splice_init(&cpts->events, &events);
+ list_splice_init(&cpts->events_tx, &events);
spin_unlock_irqrestore(&cpts->lock, flags);
list_for_each_safe(this, next, &events) {
@@ -850,7 +872,7 @@ static void am65_cpts_find_ts(struct am65_cpts *cpts)
}
spin_lock_irqsave(&cpts->lock, flags);
- list_splice_tail(&events, &cpts->events);
+ list_splice_tail(&events, &cpts->events_tx);
list_splice_tail(&events_free, &cpts->pool);
spin_unlock_irqrestore(&cpts->lock, flags);
}
@@ -861,7 +883,7 @@ static long am65_cpts_ts_work(struct ptp_clock_info *ptp)
unsigned long flags;
long delay = -1;
- am65_cpts_find_ts(cpts);
+ am65_cpts_find_tx_ts(cpts);
spin_lock_irqsave(&cpts->txq.lock, flags);
if (!skb_queue_empty(&cpts->txq))
@@ -905,7 +927,7 @@ static u64 am65_cpts_find_rx_ts(struct am65_cpts *cpts, u32 skb_mtype_seqid)
spin_lock_irqsave(&cpts->lock, flags);
__am65_cpts_fifo_read(cpts);
- list_for_each_safe(this, next, &cpts->events) {
+ list_for_each_safe(this, next, &cpts->events_rx) {
event = list_entry(this, struct am65_cpts_event, list);
if (time_after(jiffies, event->tmo)) {
list_move(&event->list, &cpts->pool);
@@ -1155,7 +1177,8 @@ struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
return ERR_PTR(ret);
mutex_init(&cpts->ptp_clk_lock);
- INIT_LIST_HEAD(&cpts->events);
+ INIT_LIST_HEAD(&cpts->events_tx);
+ INIT_LIST_HEAD(&cpts->events_rx);
INIT_LIST_HEAD(&cpts->pool);
spin_lock_init(&cpts->lock);
skb_queue_head_init(&cpts->txq);
diff --git a/drivers/net/ovpn/tcp.c b/drivers/net/ovpn/tcp.c
index 289f62c5d2c7..0d7f30360d87 100644
--- a/drivers/net/ovpn/tcp.c
+++ b/drivers/net/ovpn/tcp.c
@@ -560,16 +560,34 @@ static void ovpn_tcp_close(struct sock *sk, long timeout)
static __poll_t ovpn_tcp_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
- __poll_t mask = datagram_poll(file, sock, wait);
+ struct sk_buff_head *queue = &sock->sk->sk_receive_queue;
struct ovpn_socket *ovpn_sock;
+ struct ovpn_peer *peer = NULL;
+ __poll_t mask;
rcu_read_lock();
ovpn_sock = rcu_dereference_sk_user_data(sock->sk);
- if (ovpn_sock && ovpn_sock->peer &&
- !skb_queue_empty(&ovpn_sock->peer->tcp.user_queue))
- mask |= EPOLLIN | EPOLLRDNORM;
+ /* if we landed in this callback, we expect to have a
+ * meaningful state. The ovpn_socket lifecycle would
+ * prevent it otherwise.
+ */
+ if (WARN(!ovpn_sock || !ovpn_sock->peer,
+ "ovpn: null state in ovpn_tcp_poll!")) {
+ rcu_read_unlock();
+ return 0;
+ }
+
+ if (ovpn_peer_hold(ovpn_sock->peer)) {
+ peer = ovpn_sock->peer;
+ queue = &peer->tcp.user_queue;
+ }
rcu_read_unlock();
+ mask = datagram_poll_queue(file, sock, wait, queue);
+
+ if (peer)
+ ovpn_peer_put(peer);
+
return mask;
}
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 79ce3eb6752b..604b5de0c158 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -4262,6 +4262,8 @@ static int __lan8814_ptp_probe_once(struct phy_device *phydev, char *pin_name,
{
struct lan8814_shared_priv *shared = phy_package_get_priv(phydev);
+ shared->phydev = phydev;
+
/* Initialise shared lock for clock*/
mutex_init(&shared->shared_lock);
@@ -4317,8 +4319,6 @@ static int __lan8814_ptp_probe_once(struct phy_device *phydev, char *pin_name,
phydev_dbg(phydev, "successfully registered ptp clock\n");
- shared->phydev = phydev;
-
/* The EP.4 is shared between all the PHYs in the package and also it
* can be accessed by any of the PHYs
*/
diff --git a/drivers/net/phy/realtek/realtek_main.c b/drivers/net/phy/realtek/realtek_main.c
index a724b21b4fe7..16a347084293 100644
--- a/drivers/net/phy/realtek/realtek_main.c
+++ b/drivers/net/phy/realtek/realtek_main.c
@@ -154,7 +154,7 @@
#define RTL_8211FVD_PHYID 0x001cc878
#define RTL_8221B 0x001cc840
#define RTL_8221B_VB_CG 0x001cc849
-#define RTL_8221B_VN_CG 0x001cc84a
+#define RTL_8221B_VM_CG 0x001cc84a
#define RTL_8251B 0x001cc862
#define RTL_8261C 0x001cc890
@@ -1523,16 +1523,16 @@ static int rtl8221b_vb_cg_c45_match_phy_device(struct phy_device *phydev,
return rtlgen_is_c45_match(phydev, RTL_8221B_VB_CG, true);
}
-static int rtl8221b_vn_cg_c22_match_phy_device(struct phy_device *phydev,
+static int rtl8221b_vm_cg_c22_match_phy_device(struct phy_device *phydev,
const struct phy_driver *phydrv)
{
- return rtlgen_is_c45_match(phydev, RTL_8221B_VN_CG, false);
+ return rtlgen_is_c45_match(phydev, RTL_8221B_VM_CG, false);
}
-static int rtl8221b_vn_cg_c45_match_phy_device(struct phy_device *phydev,
+static int rtl8221b_vm_cg_c45_match_phy_device(struct phy_device *phydev,
const struct phy_driver *phydrv)
{
- return rtlgen_is_c45_match(phydev, RTL_8221B_VN_CG, true);
+ return rtlgen_is_c45_match(phydev, RTL_8221B_VM_CG, true);
}
static int rtl_internal_nbaset_match_phy_device(struct phy_device *phydev,
@@ -1879,7 +1879,7 @@ static struct phy_driver realtek_drvs[] = {
.suspend = genphy_c45_pma_suspend,
.resume = rtlgen_c45_resume,
}, {
- .match_phy_device = rtl8221b_vn_cg_c22_match_phy_device,
+ .match_phy_device = rtl8221b_vm_cg_c22_match_phy_device,
.name = "RTL8221B-VM-CG 2.5Gbps PHY (C22)",
.probe = rtl822x_probe,
.get_features = rtl822x_get_features,
@@ -1892,8 +1892,8 @@ static struct phy_driver realtek_drvs[] = {
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
}, {
- .match_phy_device = rtl8221b_vn_cg_c45_match_phy_device,
- .name = "RTL8221B-VN-CG 2.5Gbps PHY (C45)",
+ .match_phy_device = rtl8221b_vm_cg_c45_match_phy_device,
+ .name = "RTL8221B-VM-CG 2.5Gbps PHY (C45)",
.probe = rtl822x_probe,
.config_init = rtl822xb_config_init,
.get_rate_matching = rtl822xb_get_rate_matching,
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 92add3daadbb..278e6cb6f4d9 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -685,9 +685,16 @@ static netdev_tx_t rtl8150_start_xmit(struct sk_buff *skb,
rtl8150_t *dev = netdev_priv(netdev);
int count, res;
+ /* pad the frame and ensure terminating USB packet, datasheet 9.2.3 */
+ count = max(skb->len, ETH_ZLEN);
+ if (count % 64 == 0)
+ count++;
+ if (skb_padto(skb, count)) {
+ netdev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
netif_stop_queue(netdev);
- count = (skb->len < 60) ? 60 : skb->len;
- count = (count & 0x3f) ? count : count + 1;
dev->tx_skb = skb;
usb_fill_bulk_urb(dev->tx_urb, dev->udev, usb_sndbulkpipe(dev->udev, 2),
skb->data, count, write_bulk_callback, dev);
diff --git a/drivers/nvmem/rcar-efuse.c b/drivers/nvmem/rcar-efuse.c
index f24bdb9cb5a7..d9a96a1d59c8 100644
--- a/drivers/nvmem/rcar-efuse.c
+++ b/drivers/nvmem/rcar-efuse.c
@@ -127,6 +127,7 @@ static const struct of_device_id rcar_fuse_match[] = {
{ .compatible = "renesas,r8a779h0-otp", .data = &rcar_fuse_v4m },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, rcar_fuse_match);
static struct platform_driver rcar_fuse_driver = {
.probe = rcar_fuse_probe,
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 65c3c23255b7..1cd93549d093 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -671,6 +671,36 @@ err:
}
}
+static int of_check_msi_parent(struct device_node *dev_node, struct device_node **msi_node)
+{
+ struct of_phandle_args msi_spec;
+ int ret;
+
+ /*
+ * An msi-parent phandle with a missing or == 0 #msi-cells
+ * property identifies a 1:1 ID translation mapping.
+ *
+ * Set the msi controller node if the firmware matches this
+ * condition.
+ */
+ ret = of_parse_phandle_with_optional_args(dev_node, "msi-parent", "#msi-cells",
+ 0, &msi_spec);
+ if (ret)
+ return ret;
+
+ if ((*msi_node && *msi_node != msi_spec.np) || msi_spec.args_count != 0)
+ ret = -EINVAL;
+
+ if (!ret) {
+ /* Return with a node reference held */
+ *msi_node = msi_spec.np;
+ return 0;
+ }
+ of_node_put(msi_spec.np);
+
+ return ret;
+}
+
/**
* of_msi_xlate - map a MSI ID and find relevant MSI controller node
* @dev: device for which the mapping is to be done.
@@ -678,7 +708,7 @@ err:
* @id_in: Device ID.
*
* Walk up the device hierarchy looking for devices with a "msi-map"
- * property. If found, apply the mapping to @id_in.
+ * or "msi-parent" property. If found, apply the mapping to @id_in.
* If @msi_np points to a non-NULL device node pointer, only entries targeting
* that node will be matched; if it points to a NULL value, it will receive the
* device node of the first matching target phandle, with a reference held.
@@ -692,14 +722,18 @@ u32 of_msi_xlate(struct device *dev, struct device_node **msi_np, u32 id_in)
/*
* Walk up the device parent links looking for one with a
- * "msi-map" property.
+ * "msi-map" or an "msi-parent" property.
*/
- for (parent_dev = dev; parent_dev; parent_dev = parent_dev->parent)
+ for (parent_dev = dev; parent_dev; parent_dev = parent_dev->parent) {
if (!of_map_id(parent_dev->of_node, id_in, "msi-map",
"msi-map-mask", msi_np, &id_out))
break;
+ if (!of_check_msi_parent(parent_dev->of_node, msi_np))
+ break;
+ }
return id_out;
}
+EXPORT_SYMBOL_GPL(of_msi_xlate);
/**
* of_msi_map_get_device_domain - Use msi-map to find the relevant MSI domain
@@ -741,8 +775,10 @@ struct irq_domain *of_msi_get_domain(struct device *dev,
of_for_each_phandle(&it, err, np, "msi-parent", "#msi-cells", 0) {
d = irq_find_matching_host(it.node, token);
- if (d)
+ if (d) {
+ of_node_put(it.node);
return d;
+ }
}
return NULL;
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 20c9333bcb1c..e92513c5bda5 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -23,6 +23,7 @@
#include "pcie-designware.h"
static struct pci_ops dw_pcie_ops;
+static struct pci_ops dw_pcie_ecam_ops;
static struct pci_ops dw_child_pcie_ops;
#define DW_PCIE_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
@@ -471,9 +472,6 @@ static int dw_pcie_create_ecam_window(struct dw_pcie_rp *pp, struct resource *re
if (IS_ERR(pp->cfg))
return PTR_ERR(pp->cfg);
- pci->dbi_base = pp->cfg->win;
- pci->dbi_phys_addr = res->start;
-
return 0;
}
@@ -529,7 +527,7 @@ static int dw_pcie_host_get_resources(struct dw_pcie_rp *pp)
if (ret)
return ret;
- pp->bridge->ops = (struct pci_ops *)&pci_generic_ecam_ops.pci_ops;
+ pp->bridge->ops = &dw_pcie_ecam_ops;
pp->bridge->sysdata = pp->cfg;
pp->cfg->priv = pp;
} else {
@@ -842,12 +840,34 @@ void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn,
}
EXPORT_SYMBOL_GPL(dw_pcie_own_conf_map_bus);
+static void __iomem *dw_pcie_ecam_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ struct dw_pcie_rp *pp = cfg->priv;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ unsigned int busn = bus->number;
+
+ if (busn > 0)
+ return pci_ecam_map_bus(bus, devfn, where);
+
+ if (PCI_SLOT(devfn) > 0)
+ return NULL;
+
+ return pci->dbi_base + where;
+}
+
static struct pci_ops dw_pcie_ops = {
.map_bus = dw_pcie_own_conf_map_bus,
.read = pci_generic_config_read,
.write = pci_generic_config_write,
};
+static struct pci_ops dw_pcie_ecam_ops = {
+ .map_bus = dw_pcie_ecam_conf_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 805edbbfe7eb..6948824642dc 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -55,7 +55,6 @@
#define PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1a8
#define PARF_Q2A_FLUSH 0x1ac
#define PARF_LTSSM 0x1b0
-#define PARF_SLV_DBI_ELBI 0x1b4
#define PARF_INT_ALL_STATUS 0x224
#define PARF_INT_ALL_CLEAR 0x228
#define PARF_INT_ALL_MASK 0x22c
@@ -65,16 +64,6 @@
#define PARF_DBI_BASE_ADDR_V2_HI 0x354
#define PARF_SLV_ADDR_SPACE_SIZE_V2 0x358
#define PARF_SLV_ADDR_SPACE_SIZE_V2_HI 0x35c
-#define PARF_BLOCK_SLV_AXI_WR_BASE 0x360
-#define PARF_BLOCK_SLV_AXI_WR_BASE_HI 0x364
-#define PARF_BLOCK_SLV_AXI_WR_LIMIT 0x368
-#define PARF_BLOCK_SLV_AXI_WR_LIMIT_HI 0x36c
-#define PARF_BLOCK_SLV_AXI_RD_BASE 0x370
-#define PARF_BLOCK_SLV_AXI_RD_BASE_HI 0x374
-#define PARF_BLOCK_SLV_AXI_RD_LIMIT 0x378
-#define PARF_BLOCK_SLV_AXI_RD_LIMIT_HI 0x37c
-#define PARF_ECAM_BASE 0x380
-#define PARF_ECAM_BASE_HI 0x384
#define PARF_NO_SNOOP_OVERRIDE 0x3d4
#define PARF_ATU_BASE_ADDR 0x634
#define PARF_ATU_BASE_ADDR_HI 0x638
@@ -98,7 +87,6 @@
/* PARF_SYS_CTRL register fields */
#define MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN BIT(29)
-#define PCIE_ECAM_BLOCKER_EN BIT(26)
#define MST_WAKEUP_EN BIT(13)
#define SLV_WAKEUP_EN BIT(12)
#define MSTR_ACLK_CGC_DIS BIT(10)
@@ -146,9 +134,6 @@
/* PARF_LTSSM register fields */
#define LTSSM_EN BIT(8)
-/* PARF_SLV_DBI_ELBI */
-#define SLV_DBI_ELBI_ADDR_BASE GENMASK(11, 0)
-
/* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */
#define PARF_INT_ALL_LINK_UP BIT(13)
#define PARF_INT_MSI_DEV_0_7 GENMASK(30, 23)
@@ -326,47 +311,6 @@ static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
qcom_perst_assert(pcie, false);
}
-static void qcom_pci_config_ecam(struct dw_pcie_rp *pp)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct qcom_pcie *pcie = to_qcom_pcie(pci);
- u64 addr, addr_end;
- u32 val;
-
- writel_relaxed(lower_32_bits(pci->dbi_phys_addr), pcie->parf + PARF_ECAM_BASE);
- writel_relaxed(upper_32_bits(pci->dbi_phys_addr), pcie->parf + PARF_ECAM_BASE_HI);
-
- /*
- * The only device on the root bus is a single Root Port. If we try to
- * access any devices other than Device/Function 00.0 on Bus 0, the TLP
- * will go outside of the controller to the PCI bus. But with CFG Shift
- * Feature (ECAM) enabled in iATU, there is no guarantee that the
- * response is going to be all F's. Hence, to make sure that the
- * requester gets all F's response for accesses other than the Root
- * Port, configure iATU to block the transactions starting from
- * function 1 of the root bus to the end of the root bus (i.e., from
- * dbi_base + 4KB to dbi_base + 1MB).
- */
- addr = pci->dbi_phys_addr + SZ_4K;
- writel_relaxed(lower_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_WR_BASE);
- writel_relaxed(upper_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_WR_BASE_HI);
-
- writel_relaxed(lower_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_RD_BASE);
- writel_relaxed(upper_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_RD_BASE_HI);
-
- addr_end = pci->dbi_phys_addr + SZ_1M - 1;
-
- writel_relaxed(lower_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_WR_LIMIT);
- writel_relaxed(upper_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_WR_LIMIT_HI);
-
- writel_relaxed(lower_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_RD_LIMIT);
- writel_relaxed(upper_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_RD_LIMIT_HI);
-
- val = readl_relaxed(pcie->parf + PARF_SYS_CTRL);
- val |= PCIE_ECAM_BLOCKER_EN;
- writel_relaxed(val, pcie->parf + PARF_SYS_CTRL);
-}
-
static int qcom_pcie_start_link(struct dw_pcie *pci)
{
struct qcom_pcie *pcie = to_qcom_pcie(pci);
@@ -1320,7 +1264,6 @@ static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct qcom_pcie *pcie = to_qcom_pcie(pci);
- u16 offset;
int ret;
qcom_ep_reset_assert(pcie);
@@ -1329,17 +1272,6 @@ static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
if (ret)
return ret;
- if (pp->ecam_enabled) {
- /*
- * Override ELBI when ECAM is enabled, as when ECAM is enabled,
- * ELBI moves under the 'config' space.
- */
- offset = FIELD_GET(SLV_DBI_ELBI_ADDR_BASE, readl(pcie->parf + PARF_SLV_DBI_ELBI));
- pci->elbi_base = pci->dbi_base + offset;
-
- qcom_pci_config_ecam(pp);
- }
-
ret = qcom_pcie_phy_power_on(pcie);
if (ret)
goto err_deinit;
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 7cc8281e7011..79b965158473 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -243,8 +243,7 @@ struct pcie_link_state {
/* Clock PM state */
u32 clkpm_capable:1; /* Clock PM capable? */
u32 clkpm_enabled:1; /* Current Clock PM state */
- u32 clkpm_default:1; /* Default Clock PM state by BIOS or
- override */
+ u32 clkpm_default:1; /* Default Clock PM state by BIOS */
u32 clkpm_disable:1; /* Clock PM disabled */
};
@@ -376,18 +375,6 @@ static void pcie_set_clkpm(struct pcie_link_state *link, int enable)
pcie_set_clkpm_nocheck(link, enable);
}
-static void pcie_clkpm_override_default_link_state(struct pcie_link_state *link,
- int enabled)
-{
- struct pci_dev *pdev = link->downstream;
-
- /* For devicetree platforms, enable ClockPM by default */
- if (of_have_populated_dt() && !enabled) {
- link->clkpm_default = 1;
- pci_info(pdev, "ASPM: DT platform, enabling ClockPM\n");
- }
-}
-
static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
{
int capable = 1, enabled = 1;
@@ -410,7 +397,6 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
}
link->clkpm_enabled = enabled;
link->clkpm_default = enabled;
- pcie_clkpm_override_default_link_state(link, enabled);
link->clkpm_capable = capable;
link->clkpm_disable = blacklist ? 1 : 0;
}
@@ -811,19 +797,17 @@ static void pcie_aspm_override_default_link_state(struct pcie_link_state *link)
struct pci_dev *pdev = link->downstream;
u32 override;
- /* For devicetree platforms, enable all ASPM states by default */
+ /* For devicetree platforms, enable L0s and L1 by default */
if (of_have_populated_dt()) {
- link->aspm_default = PCIE_LINK_STATE_ASPM_ALL;
+ if (link->aspm_support & PCIE_LINK_STATE_L0S)
+ link->aspm_default |= PCIE_LINK_STATE_L0S;
+ if (link->aspm_support & PCIE_LINK_STATE_L1)
+ link->aspm_default |= PCIE_LINK_STATE_L1;
override = link->aspm_default & ~link->aspm_enabled;
if (override)
- pci_info(pdev, "ASPM: DT platform, enabling%s%s%s%s%s%s%s\n",
- FLAG(override, L0S_UP, " L0s-up"),
- FLAG(override, L0S_DW, " L0s-dw"),
- FLAG(override, L1, " L1"),
- FLAG(override, L1_1, " ASPM-L1.1"),
- FLAG(override, L1_2, " ASPM-L1.2"),
- FLAG(override, L1_1_PCIPM, " PCI-PM-L1.1"),
- FLAG(override, L1_2_PCIPM, " PCI-PM-L1.2"));
+ pci_info(pdev, "ASPM: default states%s%s\n",
+ FLAG(override, L0S, " L0s"),
+ FLAG(override, L1, " L1"));
}
}
diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c
index 4776013e0764..16a2fd9fdd9b 100644
--- a/drivers/platform/mellanox/mlxbf-pmc.c
+++ b/drivers/platform/mellanox/mlxbf-pmc.c
@@ -2015,6 +2015,7 @@ static int mlxbf_pmc_init_perftype_counter(struct device *dev, unsigned int blk_
if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_CRSPACE) {
/* Program crspace counters to count clock cycles using "count_clock" sysfs */
attr = &pmc->block[blk_num].attr_count_clock;
+ sysfs_attr_init(&attr->dev_attr.attr);
attr->dev_attr.attr.mode = 0644;
attr->dev_attr.show = mlxbf_pmc_count_clock_show;
attr->dev_attr.store = mlxbf_pmc_count_clock_store;
diff --git a/drivers/platform/x86/dell/alienware-wmi-wmax.c b/drivers/platform/x86/dell/alienware-wmi-wmax.c
index 31f9643a6a3b..f417dcc9af35 100644
--- a/drivers/platform/x86/dell/alienware-wmi-wmax.c
+++ b/drivers/platform/x86/dell/alienware-wmi-wmax.c
@@ -210,6 +210,14 @@ static const struct dmi_system_id awcc_dmi_table[] __initconst = {
.driver_data = &g_series_quirks,
},
{
+ .ident = "Dell Inc. G15 5530",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Dell G15 5530"),
+ },
+ .driver_data = &g_series_quirks,
+ },
+ {
.ident = "Dell Inc. G16 7630",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
@@ -1639,7 +1647,7 @@ static int wmax_wmi_probe(struct wmi_device *wdev, const void *context)
static int wmax_wmi_suspend(struct device *dev)
{
- if (awcc->hwmon)
+ if (awcc && awcc->hwmon)
awcc_hwmon_suspend(dev);
return 0;
@@ -1647,7 +1655,7 @@ static int wmax_wmi_suspend(struct device *dev)
static int wmax_wmi_resume(struct device *dev)
{
- if (awcc->hwmon)
+ if (awcc && awcc->hwmon)
awcc_hwmon_resume(dev);
return 0;
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index 794ec6e71990..a5c363252986 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -2548,7 +2548,7 @@ ptp_ocp_sma_fb_init(struct ptp_ocp *bp)
for (i = 0; i < OCP_SMA_NUM; i++) {
bp->sma[i].fixed_fcn = true;
bp->sma[i].fixed_dir = true;
- bp->sma[1].dpll_prop.capabilities &=
+ bp->sma[i].dpll_prop.capabilities &=
~DPLL_PIN_CAPABILITIES_DIRECTION_CAN_CHANGE;
}
return;
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 16d0f02af1e4..31d08c115521 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -503,7 +503,7 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
host_bcode = FC_ERROR;
goto err;
}
- if (offset + len > fsp->data_len) {
+ if (size_add(offset, len) > fsp->data_len) {
/* this should never happen */
if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) &&
fc_frame_crc_check(fp))
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index a761c0aa5127..83ff66f954e6 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -4104,7 +4104,7 @@ void qla4xxx_srb_compl(struct kref *ref)
* The mid-level driver tries to ensure that queuecommand never gets
* invoked concurrently with itself or the interrupt handler (although
* the interrupt handler may call this routine as part of request-
- * completion handling). Unfortunely, it sometimes calls the scheduler
+ * completion handling). Unfortunately, it sometimes calls the scheduler
* in interrupt context which is a big NO! NO!.
**/
static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
@@ -4647,7 +4647,7 @@ static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
cmd = scsi_host_find_tag(ha->host, index);
/*
* We cannot just check if the index is valid,
- * becase if we are run from the scsi eh, then
+ * because if we are run from the scsi eh, then
* the scsi/block layer is going to prevent
* the tag from being released.
*/
@@ -4952,7 +4952,7 @@ recover_ha_init_adapter:
/* Upon successful firmware/chip reset, re-initialize the adapter */
if (status == QLA_SUCCESS) {
/* For ISP-4xxx, force function 1 to always initialize
- * before function 3 to prevent both funcions from
+ * before function 3 to prevent both functions from
* stepping on top of the other */
if (is_qla40XX(ha) && (ha->mac_index == 3))
ssleep(6);
@@ -6914,7 +6914,7 @@ static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry = NULL;
/* Create session object, with INVALID_ENTRY,
- * the targer_id would get set when we issue the login
+ * the target_id would get set when we issue the login
*/
cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host,
cmds_max, sizeof(struct ddb_entry),
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 567f9cd29102..6e4112143c76 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1406,14 +1406,19 @@ static struct vmbus_channel *get_og_chn(struct storvsc_device *stor_device,
}
/*
- * Our channel array is sparsley populated and we
+ * Our channel array could be sparsley populated and we
* initiated I/O on a processor/hw-q that does not
* currently have a designated channel. Fix this.
* The strategy is simple:
- * I. Ensure NUMA locality
- * II. Distribute evenly (best effort)
+ * I. Prefer the channel associated with the current CPU
+ * II. Ensure NUMA locality
+ * III. Distribute evenly (best effort)
*/
+ /* Prefer the channel on the I/O issuing processor/hw-q */
+ if (cpumask_test_cpu(q_num, &stor_device->alloced_cpus))
+ return stor_device->stor_chns[q_num];
+
node_mask = cpumask_of_node(cpu_to_node(q_num));
num_channels = 0;
@@ -1469,59 +1474,48 @@ static int storvsc_do_io(struct hv_device *device,
/* See storvsc_change_target_cpu(). */
outgoing_channel = READ_ONCE(stor_device->stor_chns[q_num]);
if (outgoing_channel != NULL) {
- if (outgoing_channel->target_cpu == q_num) {
- /*
- * Ideally, we want to pick a different channel if
- * available on the same NUMA node.
- */
- node_mask = cpumask_of_node(cpu_to_node(q_num));
- for_each_cpu_wrap(tgt_cpu,
- &stor_device->alloced_cpus, q_num + 1) {
- if (!cpumask_test_cpu(tgt_cpu, node_mask))
- continue;
- if (tgt_cpu == q_num)
- continue;
- channel = READ_ONCE(
- stor_device->stor_chns[tgt_cpu]);
- if (channel == NULL)
- continue;
- if (hv_get_avail_to_write_percent(
- &channel->outbound)
- > ring_avail_percent_lowater) {
- outgoing_channel = channel;
- goto found_channel;
- }
- }
+ if (hv_get_avail_to_write_percent(&outgoing_channel->outbound)
+ > ring_avail_percent_lowater)
+ goto found_channel;
- /*
- * All the other channels on the same NUMA node are
- * busy. Try to use the channel on the current CPU
- */
- if (hv_get_avail_to_write_percent(
- &outgoing_channel->outbound)
- > ring_avail_percent_lowater)
+ /*
+ * Channel is busy, try to find a channel on the same NUMA node
+ */
+ node_mask = cpumask_of_node(cpu_to_node(q_num));
+ for_each_cpu_wrap(tgt_cpu, &stor_device->alloced_cpus,
+ q_num + 1) {
+ if (!cpumask_test_cpu(tgt_cpu, node_mask))
+ continue;
+ channel = READ_ONCE(stor_device->stor_chns[tgt_cpu]);
+ if (!channel)
+ continue;
+ if (hv_get_avail_to_write_percent(&channel->outbound)
+ > ring_avail_percent_lowater) {
+ outgoing_channel = channel;
goto found_channel;
+ }
+ }
- /*
- * If we reach here, all the channels on the current
- * NUMA node are busy. Try to find a channel in
- * other NUMA nodes
- */
- for_each_cpu(tgt_cpu, &stor_device->alloced_cpus) {
- if (cpumask_test_cpu(tgt_cpu, node_mask))
- continue;
- channel = READ_ONCE(
- stor_device->stor_chns[tgt_cpu]);
- if (channel == NULL)
- continue;
- if (hv_get_avail_to_write_percent(
- &channel->outbound)
- > ring_avail_percent_lowater) {
- outgoing_channel = channel;
- goto found_channel;
- }
+ /*
+ * If we reach here, all the channels on the current
+ * NUMA node are busy. Try to find a channel in
+ * all NUMA nodes
+ */
+ for_each_cpu_wrap(tgt_cpu, &stor_device->alloced_cpus,
+ q_num + 1) {
+ channel = READ_ONCE(stor_device->stor_chns[tgt_cpu]);
+ if (!channel)
+ continue;
+ if (hv_get_avail_to_write_percent(&channel->outbound)
+ > ring_avail_percent_lowater) {
+ outgoing_channel = channel;
+ goto found_channel;
}
}
+ /*
+ * If we reach here, all the channels are busy. Use the
+ * original channel found.
+ */
} else {
spin_lock_irqsave(&stor_device->lock, flags);
outgoing_channel = stor_device->stor_chns[q_num];
diff --git a/drivers/spi/spi-airoha-snfi.c b/drivers/spi/spi-airoha-snfi.c
index dbe640986825..b78163eaed61 100644
--- a/drivers/spi/spi-airoha-snfi.c
+++ b/drivers/spi/spi-airoha-snfi.c
@@ -192,6 +192,14 @@
#define SPI_NAND_OP_RESET 0xff
#define SPI_NAND_OP_DIE_SELECT 0xc2
+/* SNAND FIFO commands */
+#define SNAND_FIFO_TX_BUSWIDTH_SINGLE 0x08
+#define SNAND_FIFO_TX_BUSWIDTH_DUAL 0x09
+#define SNAND_FIFO_TX_BUSWIDTH_QUAD 0x0a
+#define SNAND_FIFO_RX_BUSWIDTH_SINGLE 0x0c
+#define SNAND_FIFO_RX_BUSWIDTH_DUAL 0x0e
+#define SNAND_FIFO_RX_BUSWIDTH_QUAD 0x0f
+
#define SPI_NAND_CACHE_SIZE (SZ_4K + SZ_256)
#define SPI_MAX_TRANSFER_SIZE 511
@@ -387,10 +395,26 @@ static int airoha_snand_set_mode(struct airoha_snand_ctrl *as_ctrl,
return regmap_write(as_ctrl->regmap_ctrl, REG_SPI_CTRL_DUMMY, 0);
}
-static int airoha_snand_write_data(struct airoha_snand_ctrl *as_ctrl, u8 cmd,
- const u8 *data, int len)
+static int airoha_snand_write_data(struct airoha_snand_ctrl *as_ctrl,
+ const u8 *data, int len, int buswidth)
{
int i, data_len;
+ u8 cmd;
+
+ switch (buswidth) {
+ case 0:
+ case 1:
+ cmd = SNAND_FIFO_TX_BUSWIDTH_SINGLE;
+ break;
+ case 2:
+ cmd = SNAND_FIFO_TX_BUSWIDTH_DUAL;
+ break;
+ case 4:
+ cmd = SNAND_FIFO_TX_BUSWIDTH_QUAD;
+ break;
+ default:
+ return -EINVAL;
+ }
for (i = 0; i < len; i += data_len) {
int err;
@@ -409,16 +433,32 @@ static int airoha_snand_write_data(struct airoha_snand_ctrl *as_ctrl, u8 cmd,
return 0;
}
-static int airoha_snand_read_data(struct airoha_snand_ctrl *as_ctrl, u8 *data,
- int len)
+static int airoha_snand_read_data(struct airoha_snand_ctrl *as_ctrl,
+ u8 *data, int len, int buswidth)
{
int i, data_len;
+ u8 cmd;
+
+ switch (buswidth) {
+ case 0:
+ case 1:
+ cmd = SNAND_FIFO_RX_BUSWIDTH_SINGLE;
+ break;
+ case 2:
+ cmd = SNAND_FIFO_RX_BUSWIDTH_DUAL;
+ break;
+ case 4:
+ cmd = SNAND_FIFO_RX_BUSWIDTH_QUAD;
+ break;
+ default:
+ return -EINVAL;
+ }
for (i = 0; i < len; i += data_len) {
int err;
data_len = min(len - i, SPI_MAX_TRANSFER_SIZE);
- err = airoha_snand_set_fifo_op(as_ctrl, 0xc, data_len);
+ err = airoha_snand_set_fifo_op(as_ctrl, cmd, data_len);
if (err)
return err;
@@ -618,6 +658,10 @@ static int airoha_snand_dirmap_create(struct spi_mem_dirmap_desc *desc)
if (desc->info.offset + desc->info.length > U32_MAX)
return -EINVAL;
+ /* continuous reading is not supported */
+ if (desc->info.length > SPI_NAND_CACHE_SIZE)
+ return -E2BIG;
+
if (!airoha_snand_supports_op(desc->mem, &desc->info.op_tmpl))
return -EOPNOTSUPP;
@@ -654,13 +698,13 @@ static ssize_t airoha_snand_dirmap_read(struct spi_mem_dirmap_desc *desc,
err = airoha_snand_nfi_config(as_ctrl);
if (err)
- return err;
+ goto error_dma_mode_off;
dma_addr = dma_map_single(as_ctrl->dev, txrx_buf, SPI_NAND_CACHE_SIZE,
DMA_FROM_DEVICE);
err = dma_mapping_error(as_ctrl->dev, dma_addr);
if (err)
- return err;
+ goto error_dma_mode_off;
/* set dma addr */
err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_STRADDR,
@@ -689,8 +733,9 @@ static ssize_t airoha_snand_dirmap_read(struct spi_mem_dirmap_desc *desc,
if (err)
goto error_dma_unmap;
- /* set read addr */
- err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_RD_CTL3, 0x0);
+ /* set read addr: zero page offset + descriptor read offset */
+ err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_RD_CTL3,
+ desc->info.offset);
if (err)
goto error_dma_unmap;
@@ -760,6 +805,8 @@ static ssize_t airoha_snand_dirmap_read(struct spi_mem_dirmap_desc *desc,
error_dma_unmap:
dma_unmap_single(as_ctrl->dev, dma_addr, SPI_NAND_CACHE_SIZE,
DMA_FROM_DEVICE);
+error_dma_mode_off:
+ airoha_snand_set_mode(as_ctrl, SPI_MODE_MANUAL);
return err;
}
@@ -824,7 +871,9 @@ static ssize_t airoha_snand_dirmap_write(struct spi_mem_dirmap_desc *desc,
if (err)
goto error_dma_unmap;
- err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_PG_CTL2, 0x0);
+ /* set write addr: zero page offset + descriptor write offset */
+ err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_PG_CTL2,
+ desc->info.offset);
if (err)
goto error_dma_unmap;
@@ -892,18 +941,35 @@ static ssize_t airoha_snand_dirmap_write(struct spi_mem_dirmap_desc *desc,
error_dma_unmap:
dma_unmap_single(as_ctrl->dev, dma_addr, SPI_NAND_CACHE_SIZE,
DMA_TO_DEVICE);
+ airoha_snand_set_mode(as_ctrl, SPI_MODE_MANUAL);
return err;
}
static int airoha_snand_exec_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
- u8 data[8], cmd, opcode = op->cmd.opcode;
struct airoha_snand_ctrl *as_ctrl;
+ int op_len, addr_len, dummy_len;
+ u8 buf[20], *data;
int i, err;
as_ctrl = spi_controller_get_devdata(mem->spi->controller);
+ op_len = op->cmd.nbytes;
+ addr_len = op->addr.nbytes;
+ dummy_len = op->dummy.nbytes;
+
+ if (op_len + dummy_len + addr_len > sizeof(buf))
+ return -EIO;
+
+ data = buf;
+ for (i = 0; i < op_len; i++)
+ *data++ = op->cmd.opcode >> (8 * (op_len - i - 1));
+ for (i = 0; i < addr_len; i++)
+ *data++ = op->addr.val >> (8 * (addr_len - i - 1));
+ for (i = 0; i < dummy_len; i++)
+ *data++ = 0xff;
+
/* switch to manual mode */
err = airoha_snand_set_mode(as_ctrl, SPI_MODE_MANUAL);
if (err < 0)
@@ -914,40 +980,40 @@ static int airoha_snand_exec_op(struct spi_mem *mem,
return err;
/* opcode */
- err = airoha_snand_write_data(as_ctrl, 0x8, &opcode, sizeof(opcode));
+ data = buf;
+ err = airoha_snand_write_data(as_ctrl, data, op_len,
+ op->cmd.buswidth);
if (err)
return err;
/* addr part */
- cmd = opcode == SPI_NAND_OP_GET_FEATURE ? 0x11 : 0x8;
- put_unaligned_be64(op->addr.val, data);
-
- for (i = ARRAY_SIZE(data) - op->addr.nbytes;
- i < ARRAY_SIZE(data); i++) {
- err = airoha_snand_write_data(as_ctrl, cmd, &data[i],
- sizeof(data[0]));
+ data += op_len;
+ if (addr_len) {
+ err = airoha_snand_write_data(as_ctrl, data, addr_len,
+ op->addr.buswidth);
if (err)
return err;
}
/* dummy */
- data[0] = 0xff;
- for (i = 0; i < op->dummy.nbytes; i++) {
- err = airoha_snand_write_data(as_ctrl, 0x8, &data[0],
- sizeof(data[0]));
+ data += addr_len;
+ if (dummy_len) {
+ err = airoha_snand_write_data(as_ctrl, data, dummy_len,
+ op->dummy.buswidth);
if (err)
return err;
}
/* data */
- if (op->data.dir == SPI_MEM_DATA_IN) {
- err = airoha_snand_read_data(as_ctrl, op->data.buf.in,
- op->data.nbytes);
- if (err)
- return err;
- } else {
- err = airoha_snand_write_data(as_ctrl, 0x8, op->data.buf.out,
- op->data.nbytes);
+ if (op->data.nbytes) {
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ err = airoha_snand_read_data(as_ctrl, op->data.buf.in,
+ op->data.nbytes,
+ op->data.buswidth);
+ else
+ err = airoha_snand_write_data(as_ctrl, op->data.buf.out,
+ op->data.nbytes,
+ op->data.buswidth);
if (err)
return err;
}
diff --git a/drivers/spi/spi-amlogic-spifc-a4.c b/drivers/spi/spi-amlogic-spifc-a4.c
index 4338d00e56a6..35a7c4965e11 100644
--- a/drivers/spi/spi-amlogic-spifc-a4.c
+++ b/drivers/spi/spi-amlogic-spifc-a4.c
@@ -286,7 +286,7 @@ static int aml_sfc_set_bus_width(struct aml_sfc *sfc, u8 buswidth, u32 mask)
for (i = 0; i <= LANE_MAX; i++) {
if (buswidth == 1 << i) {
- conf = i << __bf_shf(mask);
+ conf = i << __ffs(mask);
return regmap_update_bits(sfc->regmap_base, SFC_SPI_CFG,
mask, conf);
}
@@ -566,7 +566,7 @@ static int aml_sfc_raw_io_op(struct aml_sfc *sfc, const struct spi_mem_op *op)
if (!op->data.nbytes)
goto end_xfer;
- conf = (op->data.nbytes >> RAW_SIZE_BW) << __bf_shf(RAW_EXT_SIZE);
+ conf = (op->data.nbytes >> RAW_SIZE_BW) << __ffs(RAW_EXT_SIZE);
ret = regmap_update_bits(sfc->regmap_base, SFC_SPI_CFG, RAW_EXT_SIZE, conf);
if (ret)
goto err_out;
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index 8fb13df8ff87..81017402bc56 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -1995,7 +1995,7 @@ static int cqspi_probe(struct platform_device *pdev)
if (cqspi->use_direct_mode) {
ret = cqspi_request_mmap_dma(cqspi);
if (ret == -EPROBE_DEFER)
- goto probe_setup_failed;
+ goto probe_dma_failed;
}
if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) {
@@ -2019,9 +2019,10 @@ static int cqspi_probe(struct platform_device *pdev)
return 0;
probe_setup_failed:
- cqspi_controller_enable(cqspi, 0);
if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM)))
pm_runtime_disable(dev);
+probe_dma_failed:
+ cqspi_controller_enable(cqspi, 0);
probe_reset_failed:
if (cqspi->is_jh7110)
cqspi_jh7110_disable_clk(pdev, cqspi);
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index f0f576fac77a..7a5197586919 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -358,7 +358,9 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
if (IS_ERR(dwsmmio->rstc))
return PTR_ERR(dwsmmio->rstc);
- reset_control_deassert(dwsmmio->rstc);
+ ret = reset_control_deassert(dwsmmio->rstc);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to deassert resets\n");
dws->bus_num = pdev->id;
diff --git a/drivers/spi/spi-intel-pci.c b/drivers/spi/spi-intel-pci.c
index 4b63cb98df9c..7765fb27c37c 100644
--- a/drivers/spi/spi-intel-pci.c
+++ b/drivers/spi/spi-intel-pci.c
@@ -75,10 +75,12 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x38a4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x43a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x4b24), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x4d23), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x4da4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x51a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x54a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x5794), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x7723), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x7a24), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x7aa4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x7e23), (unsigned long)&cnl_info },
diff --git a/drivers/spi/spi-intel.c b/drivers/spi/spi-intel.c
index 13bbb2133507..1775ad39e633 100644
--- a/drivers/spi/spi-intel.c
+++ b/drivers/spi/spi-intel.c
@@ -132,6 +132,7 @@
#define FLCOMP_C0DEN_16M 0x05
#define FLCOMP_C0DEN_32M 0x06
#define FLCOMP_C0DEN_64M 0x07
+#define FLCOMP_C0DEN_128M 0x08
#define INTEL_SPI_TIMEOUT 5000 /* ms */
#define INTEL_SPI_FIFO_SZ 64
@@ -1347,7 +1348,12 @@ static int intel_spi_read_desc(struct intel_spi *ispi)
case FLCOMP_C0DEN_64M:
ispi->chip0_size = SZ_64M;
break;
+ case FLCOMP_C0DEN_128M:
+ ispi->chip0_size = SZ_128M;
+ break;
default:
+ dev_warn(ispi->dev, "unsupported C0DEN: %#lx\n",
+ flcomp & FLCOMP_C0DEN_MASK);
return -EINVAL;
}
diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
index f9371f98a65b..b6c79e50d842 100644
--- a/drivers/spi/spi-nxp-fspi.c
+++ b/drivers/spi/spi-nxp-fspi.c
@@ -404,6 +404,10 @@ struct nxp_fspi {
#define FSPI_NEED_INIT BIT(0)
#define FSPI_DTR_MODE BIT(1)
int flags;
+ /* save the previous operation clock rate */
+ unsigned long pre_op_rate;
+ /* the max clock rate fspi output to device */
+ unsigned long max_rate;
};
static inline int needs_ip_only(struct nxp_fspi *f)
@@ -685,10 +689,13 @@ static void nxp_fspi_select_rx_sample_clk_source(struct nxp_fspi *f,
* change the mode back to mode 0.
*/
reg = fspi_readl(f, f->iobase + FSPI_MCR0);
- if (op_is_dtr)
+ if (op_is_dtr) {
reg |= FSPI_MCR0_RXCLKSRC(3);
- else /*select mode 0 */
+ f->max_rate = 166000000;
+ } else { /*select mode 0 */
reg &= ~FSPI_MCR0_RXCLKSRC(3);
+ f->max_rate = 66000000;
+ }
fspi_writel(f, reg, f->iobase + FSPI_MCR0);
}
@@ -719,6 +726,12 @@ static void nxp_fspi_dll_calibration(struct nxp_fspi *f)
0, POLL_TOUT, true);
if (ret)
dev_warn(f->dev, "DLL lock failed, please fix it!\n");
+
+ /*
+ * For ERR050272, DLL lock status bit is not accurate,
+ * wait for 4us more as a workaround.
+ */
+ udelay(4);
}
/*
@@ -780,11 +793,17 @@ static void nxp_fspi_select_mem(struct nxp_fspi *f, struct spi_device *spi,
uint64_t size_kb;
/*
- * Return, if previously selected target device is same as current
- * requested target device. Also the DTR or STR mode do not change.
+ * Return when following condition all meet,
+ * 1, if previously selected target device is same as current
+ * requested target device.
+ * 2, the DTR or STR mode do not change.
+ * 3, previous operation max rate equals current one.
+ *
+ * For other case, need to re-config.
*/
if ((f->selected == spi_get_chipselect(spi, 0)) &&
- (!!(f->flags & FSPI_DTR_MODE) == op_is_dtr))
+ (!!(f->flags & FSPI_DTR_MODE) == op_is_dtr) &&
+ (f->pre_op_rate == op->max_freq))
return;
/* Reset FLSHxxCR0 registers */
@@ -802,6 +821,7 @@ static void nxp_fspi_select_mem(struct nxp_fspi *f, struct spi_device *spi,
dev_dbg(f->dev, "Target device [CS:%x] selected\n", spi_get_chipselect(spi, 0));
nxp_fspi_select_rx_sample_clk_source(f, op_is_dtr);
+ rate = min(f->max_rate, op->max_freq);
if (op_is_dtr) {
f->flags |= FSPI_DTR_MODE;
@@ -832,6 +852,8 @@ static void nxp_fspi_select_mem(struct nxp_fspi *f, struct spi_device *spi,
else
nxp_fspi_dll_override(f);
+ f->pre_op_rate = op->max_freq;
+
f->selected = spi_get_chipselect(spi, 0);
}
diff --git a/drivers/spi/spi-rockchip-sfc.c b/drivers/spi/spi-rockchip-sfc.c
index 9eba5c0a60f2..b3c2b03b1153 100644
--- a/drivers/spi/spi-rockchip-sfc.c
+++ b/drivers/spi/spi-rockchip-sfc.c
@@ -704,7 +704,12 @@ static int rockchip_sfc_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto err_dma;
}
- sfc->dma_buffer = virt_to_phys(sfc->buffer);
+ sfc->dma_buffer = dma_map_single(dev, sfc->buffer,
+ sfc->max_iosize, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, sfc->dma_buffer)) {
+ ret = -ENOMEM;
+ goto err_dma_map;
+ }
}
ret = devm_spi_register_controller(dev, host);
@@ -715,6 +720,9 @@ static int rockchip_sfc_probe(struct platform_device *pdev)
return 0;
err_register:
+ dma_unmap_single(dev, sfc->dma_buffer, sfc->max_iosize,
+ DMA_BIDIRECTIONAL);
+err_dma_map:
free_pages((unsigned long)sfc->buffer, get_order(sfc->max_iosize));
err_dma:
pm_runtime_get_sync(dev);
@@ -736,6 +744,8 @@ static void rockchip_sfc_remove(struct platform_device *pdev)
struct spi_controller *host = sfc->host;
spi_unregister_controller(host);
+ dma_unmap_single(&pdev->dev, sfc->dma_buffer, sfc->max_iosize,
+ DMA_BIDIRECTIONAL);
free_pages((unsigned long)sfc->buffer, get_order(sfc->max_iosize));
clk_disable_unprepare(sfc->clk);
diff --git a/drivers/staging/gpib/agilent_82350b/agilent_82350b.c b/drivers/staging/gpib/agilent_82350b/agilent_82350b.c
index 94bbb3b6576d..01a5bb43cd2d 100644
--- a/drivers/staging/gpib/agilent_82350b/agilent_82350b.c
+++ b/drivers/staging/gpib/agilent_82350b/agilent_82350b.c
@@ -182,10 +182,12 @@ static int agilent_82350b_accel_write(struct gpib_board *board, u8 *buffer,
return retval;
#endif
- retval = agilent_82350b_write(board, buffer, 1, 0, &num_bytes);
- *bytes_written += num_bytes;
- if (retval < 0)
- return retval;
+ if (fifotransferlength > 0) {
+ retval = agilent_82350b_write(board, buffer, 1, 0, &num_bytes);
+ *bytes_written += num_bytes;
+ if (retval < 0)
+ return retval;
+ }
write_byte(tms_priv, tms_priv->imr0_bits & ~HR_BOIE, IMR0);
for (i = 1; i < fifotransferlength;) {
@@ -217,7 +219,7 @@ static int agilent_82350b_accel_write(struct gpib_board *board, u8 *buffer,
break;
}
write_byte(tms_priv, tms_priv->imr0_bits, IMR0);
- if (retval)
+ if (retval < 0)
return retval;
if (send_eoi) {
diff --git a/drivers/staging/gpib/fmh_gpib/fmh_gpib.c b/drivers/staging/gpib/fmh_gpib/fmh_gpib.c
index 164dcfc3c9ef..f7bfb4a8e553 100644
--- a/drivers/staging/gpib/fmh_gpib/fmh_gpib.c
+++ b/drivers/staging/gpib/fmh_gpib/fmh_gpib.c
@@ -1517,6 +1517,11 @@ void fmh_gpib_detach(struct gpib_board *board)
resource_size(e_priv->gpib_iomem_res));
}
fmh_gpib_generic_detach(board);
+
+ if (board->dev) {
+ put_device(board->dev);
+ board->dev = NULL;
+ }
}
static int fmh_gpib_pci_attach_impl(struct gpib_board *board,
diff --git a/drivers/staging/gpib/ni_usb/ni_usb_gpib.c b/drivers/staging/gpib/ni_usb/ni_usb_gpib.c
index 4dec87d12687..1f8412de9fa3 100644
--- a/drivers/staging/gpib/ni_usb/ni_usb_gpib.c
+++ b/drivers/staging/gpib/ni_usb/ni_usb_gpib.c
@@ -327,7 +327,10 @@ static void ni_usb_soft_update_status(struct gpib_board *board, unsigned int ni_
board->status &= ~clear_mask;
board->status &= ~ni_usb_ibsta_mask;
board->status |= ni_usb_ibsta & ni_usb_ibsta_mask;
- // FIXME should generate events on DTAS and DCAS
+ if (ni_usb_ibsta & DCAS)
+ push_gpib_event(board, EVENT_DEV_CLR);
+ if (ni_usb_ibsta & DTAS)
+ push_gpib_event(board, EVENT_DEV_TRG);
spin_lock_irqsave(&board->spinlock, flags);
/* remove set status bits from monitored set why ?***/
@@ -694,8 +697,12 @@ static int ni_usb_read(struct gpib_board *board, u8 *buffer, size_t length,
*/
break;
case NIUSB_ATN_STATE_ERROR:
- retval = -EIO;
- dev_err(&usb_dev->dev, "read when ATN set\n");
+ if (status.ibsta & DCAS) {
+ retval = -EINTR;
+ } else {
+ retval = -EIO;
+ dev_dbg(&usb_dev->dev, "read when ATN set stat: 0x%06x\n", status.ibsta);
+ }
break;
case NIUSB_ADDRESSING_ERROR:
retval = -EIO;
diff --git a/drivers/tee/qcomtee/Kconfig b/drivers/tee/qcomtee/Kconfig
index 927686abceb1..9f19dee08db4 100644
--- a/drivers/tee/qcomtee/Kconfig
+++ b/drivers/tee/qcomtee/Kconfig
@@ -2,6 +2,7 @@
# Qualcomm Trusted Execution Environment Configuration
config QCOMTEE
tristate "Qualcomm TEE Support"
+ depends on ARCH_QCOM || COMPILE_TEST
depends on !CPU_BIG_ENDIAN
select QCOM_SCM
select QCOM_TZMEM_MODE_SHMBRIDGE
diff --git a/drivers/tee/qcomtee/call.c b/drivers/tee/qcomtee/call.c
index cc17a48d0ab7..ac134452cc9c 100644
--- a/drivers/tee/qcomtee/call.c
+++ b/drivers/tee/qcomtee/call.c
@@ -308,7 +308,7 @@ out_failed:
}
/* Release any IO and OO objects not processed. */
- for (; u[i].type && i < num_params; i++) {
+ for (; i < num_params && u[i].type; i++) {
if (u[i].type == QCOMTEE_ARG_TYPE_OO ||
u[i].type == QCOMTEE_ARG_TYPE_IO)
qcomtee_object_put(u[i].o);
diff --git a/drivers/tee/qcomtee/core.c b/drivers/tee/qcomtee/core.c
index 783acc59cfa9..b6715ada7700 100644
--- a/drivers/tee/qcomtee/core.c
+++ b/drivers/tee/qcomtee/core.c
@@ -424,7 +424,7 @@ static int qcomtee_prepare_msg(struct qcomtee_object_invoke_ctx *oic,
if (!(u[i].flags & QCOMTEE_ARG_FLAGS_UADDR))
memcpy(msgptr, u[i].b.addr, u[i].b.size);
else if (copy_from_user(msgptr, u[i].b.uaddr, u[i].b.size))
- return -EINVAL;
+ return -EFAULT;
offset += qcomtee_msg_offset_align(u[i].b.size);
ib++;
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index a53ba04d9770..710ae4d40aec 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -635,7 +635,9 @@ static int dw8250_probe(struct platform_device *pdev)
if (IS_ERR(data->rst))
return PTR_ERR(data->rst);
- reset_control_deassert(data->rst);
+ err = reset_control_deassert(data->rst);
+ if (err)
+ return dev_err_probe(dev, err, "failed to deassert resets\n");
err = devm_add_action_or_reset(dev, dw8250_reset_control_assert, data->rst);
if (err)
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index 04a0cbab02c2..b9cc0b786ca6 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -40,6 +40,8 @@
#define PCI_DEVICE_ID_ACCESSIO_COM_4SM 0x10db
#define PCI_DEVICE_ID_ACCESSIO_COM_8SM 0x10ea
+#define PCI_DEVICE_ID_ADVANTECH_XR17V352 0x0018
+
#define PCI_DEVICE_ID_COMMTECH_4224PCI335 0x0002
#define PCI_DEVICE_ID_COMMTECH_4222PCI335 0x0004
#define PCI_DEVICE_ID_COMMTECH_2324PCI335 0x000a
@@ -1622,6 +1624,12 @@ static const struct exar8250_board pbn_fastcom35x_8 = {
.exit = pci_xr17v35x_exit,
};
+static const struct exar8250_board pbn_adv_XR17V352 = {
+ .num_ports = 2,
+ .setup = pci_xr17v35x_setup,
+ .exit = pci_xr17v35x_exit,
+};
+
static const struct exar8250_board pbn_exar_XR17V4358 = {
.num_ports = 12,
.setup = pci_xr17v35x_setup,
@@ -1696,6 +1704,9 @@ static const struct pci_device_id exar_pci_tbl[] = {
USR_DEVICE(XR17C152, 2980, pbn_exar_XR17C15x),
USR_DEVICE(XR17C152, 2981, pbn_exar_XR17C15x),
+ /* ADVANTECH devices */
+ EXAR_DEVICE(ADVANTECH, XR17V352, pbn_adv_XR17V352),
+
/* Exar Corp. XR17C15[248] Dual/Quad/Octal UART */
EXAR_DEVICE(EXAR, XR17C152, pbn_exar_XR17C15x),
EXAR_DEVICE(EXAR, XR17C154, pbn_exar_XR17C15x),
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
index b44de2ed7413..5875a7b9b4b1 100644
--- a/drivers/tty/serial/8250/8250_mtk.c
+++ b/drivers/tty/serial/8250/8250_mtk.c
@@ -435,6 +435,7 @@ static int __maybe_unused mtk8250_runtime_suspend(struct device *dev)
while
(serial_in(up, MTK_UART_DEBUG0));
+ clk_disable_unprepare(data->uart_clk);
clk_disable_unprepare(data->bus_clk);
return 0;
@@ -445,6 +446,7 @@ static int __maybe_unused mtk8250_runtime_resume(struct device *dev)
struct mtk8250_data *data = dev_get_drvdata(dev);
clk_prepare_enable(data->bus_clk);
+ clk_prepare_enable(data->uart_clk);
return 0;
}
@@ -475,13 +477,13 @@ static int mtk8250_probe_of(struct platform_device *pdev, struct uart_port *p,
int dmacnt;
#endif
- data->uart_clk = devm_clk_get(&pdev->dev, "baud");
+ data->uart_clk = devm_clk_get_enabled(&pdev->dev, "baud");
if (IS_ERR(data->uart_clk)) {
/*
* For compatibility with older device trees try unnamed
* clk when no baud clk can be found.
*/
- data->uart_clk = devm_clk_get(&pdev->dev, NULL);
+ data->uart_clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(data->uart_clk)) {
dev_warn(&pdev->dev, "Can't get uart clock\n");
return PTR_ERR(data->uart_clk);
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 1a2c4c14f6aa..c7435595dce1 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -588,13 +588,6 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud)
div /= prescaler;
}
- /* Enable enhanced features */
- sc16is7xx_efr_lock(port);
- sc16is7xx_port_update(port, SC16IS7XX_EFR_REG,
- SC16IS7XX_EFR_ENABLE_BIT,
- SC16IS7XX_EFR_ENABLE_BIT);
- sc16is7xx_efr_unlock(port);
-
/* If bit MCR_CLKSEL is set, the divide by 4 prescaler is activated. */
sc16is7xx_port_update(port, SC16IS7XX_MCR_REG,
SC16IS7XX_MCR_CLKSEL_BIT,
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 538b2f991609..62bb62b82cbe 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1014,16 +1014,18 @@ static int sci_handle_fifo_overrun(struct uart_port *port)
struct sci_port *s = to_sci_port(port);
const struct plat_sci_reg *reg;
int copied = 0;
- u16 status;
+ u32 status;
- reg = sci_getreg(port, s->params->overrun_reg);
- if (!reg->size)
- return 0;
+ if (s->type != SCI_PORT_RSCI) {
+ reg = sci_getreg(port, s->params->overrun_reg);
+ if (!reg->size)
+ return 0;
+ }
- status = sci_serial_in(port, s->params->overrun_reg);
+ status = s->ops->read_reg(port, s->params->overrun_reg);
if (status & s->params->overrun_mask) {
status &= ~s->params->overrun_mask;
- sci_serial_out(port, s->params->overrun_reg, status);
+ s->ops->write_reg(port, s->params->overrun_reg, status);
port->icount.overrun++;
diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
index d9d8dc05b235..168707213ed9 100644
--- a/drivers/usb/cdns3/cdns3-gadget.c
+++ b/drivers/usb/cdns3/cdns3-gadget.c
@@ -3251,7 +3251,6 @@ static void cdns3_gadget_exit(struct cdns *cdns)
priv_dev = cdns->gadget_dev;
- pm_runtime_mark_last_busy(cdns->dev);
pm_runtime_put_autosuspend(cdns->dev);
usb_del_gadget(&priv_dev->gadget);
diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c
index 0252560cbc80..d37c29a253dd 100644
--- a/drivers/usb/cdns3/cdnsp-gadget.c
+++ b/drivers/usb/cdns3/cdnsp-gadget.c
@@ -1999,7 +1999,6 @@ static void cdnsp_gadget_exit(struct cdns *cdns)
struct cdnsp_device *pdev = cdns->gadget_dev;
devm_free_irq(pdev->dev, cdns->dev_irq, pdev);
- pm_runtime_mark_last_busy(cdns->dev);
pm_runtime_put_autosuspend(cdns->dev);
usb_del_gadget(&pdev->gadget);
cdnsp_gadget_free_endpoints(pdev);
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 694b4a8e4e1d..a6ce73dcc871 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -1372,7 +1372,6 @@ static int ci_controller_resume(struct device *dev)
ci->in_lpm = false;
if (ci->wakeup_int) {
ci->wakeup_int = false;
- pm_runtime_mark_last_busy(ci->dev);
pm_runtime_put_autosuspend(ci->dev);
enable_irq(ci->irq);
if (ci_otg_is_fsm_mode(ci))
diff --git a/drivers/usb/chipidea/otg_fsm.c b/drivers/usb/chipidea/otg_fsm.c
index a093544482d5..929536dc96ec 100644
--- a/drivers/usb/chipidea/otg_fsm.c
+++ b/drivers/usb/chipidea/otg_fsm.c
@@ -629,7 +629,6 @@ int ci_otg_fsm_work(struct ci_hdrc *ci)
ci_otg_queue_work(ci);
}
} else if (ci->fsm.otg->state == OTG_STATE_A_HOST) {
- pm_runtime_mark_last_busy(ci->dev);
pm_runtime_put_autosuspend(ci->dev);
return 0;
}
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
index b1418885707c..bb027d2bd700 100644
--- a/drivers/usb/chipidea/usbmisc_imx.c
+++ b/drivers/usb/chipidea/usbmisc_imx.c
@@ -1224,6 +1224,14 @@ static const struct usbmisc_ops imx7ulp_usbmisc_ops = {
.power_lost_check = usbmisc_imx7d_power_lost_check,
};
+static const struct usbmisc_ops imx94_usbmisc_ops = {
+ .init = usbmisc_imx7d_init,
+ .set_wakeup = usbmisc_imx95_set_wakeup,
+ .charger_detection = imx7d_charger_detection,
+ .power_lost_check = usbmisc_imx7d_power_lost_check,
+ .vbus_comparator_on = usbmisc_imx7d_vbus_comparator_on,
+};
+
static const struct usbmisc_ops imx95_usbmisc_ops = {
.init = usbmisc_imx7d_init,
.set_wakeup = usbmisc_imx95_set_wakeup,
@@ -1482,6 +1490,10 @@ static const struct of_device_id usbmisc_imx_dt_ids[] = {
.data = &imx7ulp_usbmisc_ops,
},
{
+ .compatible = "fsl,imx94-usbmisc",
+ .data = &imx94_usbmisc_ops,
+ },
+ {
.compatible = "fsl,imx95-usbmisc",
.data = &imx95_usbmisc_ops,
},
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 73f9476774ae..54be4aa1dcb2 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1475,7 +1475,7 @@ made_compressed_probe:
if (!acm->country_codes)
goto skip_countries;
acm->country_code_size = cfd->bLength - 4;
- memcpy(acm->country_codes, (u8 *)&cfd->wCountyCode0,
+ memcpy(acm->country_codes, cfd->wCountryCodes,
cfd->bLength - 4);
acm->country_rel_date = cfd->iCountryCodeRelDate;
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 75de29725a45..206f1b738ed3 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -1936,10 +1936,8 @@ static int usbtmc_ioctl_request(struct usbtmc_device_data *data,
u8 *buffer = NULL;
int rv;
unsigned int is_in, pipe;
- unsigned long res;
- res = copy_from_user(&request, arg, sizeof(struct usbtmc_ctrlrequest));
- if (res)
+ if (copy_from_user(&request, arg, sizeof(struct usbtmc_ctrlrequest)))
return -EFAULT;
if (request.req.wLength > USBTMC_BUFSIZE)
@@ -1956,9 +1954,8 @@ static int usbtmc_ioctl_request(struct usbtmc_device_data *data,
if (!is_in) {
/* Send control data to device */
- res = copy_from_user(buffer, request.data,
- request.req.wLength);
- if (res) {
+ if (copy_from_user(buffer, request.data,
+ request.req.wLength)) {
rv = -EFAULT;
goto exit;
}
@@ -1984,8 +1981,7 @@ static int usbtmc_ioctl_request(struct usbtmc_device_data *data,
if (rv && is_in) {
/* Read control data from device */
- res = copy_to_user(request.data, buffer, rv);
- if (res)
+ if (copy_to_user(request.data, buffer, rv))
rv = -EFAULT;
}
diff --git a/drivers/usb/core/Makefile b/drivers/usb/core/Makefile
index 766000b4939e..60ea76160122 100644
--- a/drivers/usb/core/Makefile
+++ b/drivers/usb/core/Makefile
@@ -3,10 +3,13 @@
# Makefile for USB Core files and filesystem
#
+# define_trace.h needs to know how to find our header
+CFLAGS_trace.o := -I$(src)
+
usbcore-y := usb.o hub.o hcd.o urb.o message.o driver.o
usbcore-y += config.o file.o buffer.o sysfs.o endpoint.o
usbcore-y += devio.o notify.o generic.o quirks.o devices.o
-usbcore-y += phy.o port.o
+usbcore-y += phy.o port.o trace.o
usbcore-$(CONFIG_OF) += of.o
usbcore-$(CONFIG_USB_XHCI_SIDEBAND) += offload.o
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 9dd79769cad1..24feb0de1c00 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2696,18 +2696,18 @@ static void hcd_release(struct kref *kref)
kfree(hcd);
}
-struct usb_hcd *usb_get_hcd (struct usb_hcd *hcd)
+struct usb_hcd *usb_get_hcd(struct usb_hcd *hcd)
{
if (hcd)
- kref_get (&hcd->kref);
+ kref_get(&hcd->kref);
return hcd;
}
EXPORT_SYMBOL_GPL(usb_get_hcd);
-void usb_put_hcd (struct usb_hcd *hcd)
+void usb_put_hcd(struct usb_hcd *hcd)
{
if (hcd)
- kref_put (&hcd->kref, hcd_release);
+ kref_put(&hcd->kref, hcd_release);
}
EXPORT_SYMBOL_GPL(usb_put_hcd);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 256fe8c86828..be50d03034a9 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -28,6 +28,7 @@
#include <linux/usb/otg.h>
#include <linux/usb/quirks.h>
#include <linux/workqueue.h>
+#include <linux/minmax.h>
#include <linux/mutex.h>
#include <linux/random.h>
#include <linux/pm_qos.h>
@@ -40,6 +41,7 @@
#include "hub.h"
#include "phy.h"
#include "otg_productlist.h"
+#include "trace.h"
#define USB_VENDOR_GENESYS_LOGIC 0x05e3
#define USB_VENDOR_SMSC 0x0424
@@ -277,10 +279,7 @@ static void usb_set_lpm_pel(struct usb_device *udev,
* device and the parent hub into U0. The exit latency is the bigger of
* the device exit latency or the hub exit latency.
*/
- if (udev_exit_latency > hub_exit_latency)
- first_link_pel = udev_exit_latency * 1000;
- else
- first_link_pel = hub_exit_latency * 1000;
+ first_link_pel = max(udev_exit_latency, hub_exit_latency) * 1000;
/*
* When the hub starts to receive the LFPS, there is a slight delay for
@@ -294,10 +293,7 @@ static void usb_set_lpm_pel(struct usb_device *udev,
* According to figure C-7 in the USB 3.0 spec, the PEL for this device
* is the greater of the two exit latencies.
*/
- if (first_link_pel > hub_pel)
- udev_lpm_params->pel = first_link_pel;
- else
- udev_lpm_params->pel = hub_pel;
+ udev_lpm_params->pel = max(first_link_pel, hub_pel);
}
/*
@@ -2147,6 +2143,21 @@ static void update_port_device_state(struct usb_device *udev)
}
}
+static void update_usb_device_state(struct usb_device *udev,
+ enum usb_device_state new_state)
+{
+ if (udev->state == USB_STATE_SUSPENDED &&
+ new_state != USB_STATE_SUSPENDED)
+ udev->active_duration -= jiffies;
+ else if (new_state == USB_STATE_SUSPENDED &&
+ udev->state != USB_STATE_SUSPENDED)
+ udev->active_duration += jiffies;
+
+ udev->state = new_state;
+ update_port_device_state(udev);
+ trace_usb_set_device_state(udev);
+}
+
static void recursively_mark_NOTATTACHED(struct usb_device *udev)
{
struct usb_hub *hub = usb_hub_to_struct_hub(udev);
@@ -2156,10 +2167,7 @@ static void recursively_mark_NOTATTACHED(struct usb_device *udev)
if (hub->ports[i]->child)
recursively_mark_NOTATTACHED(hub->ports[i]->child);
}
- if (udev->state == USB_STATE_SUSPENDED)
- udev->active_duration -= jiffies;
- udev->state = USB_STATE_NOTATTACHED;
- update_port_device_state(udev);
+ update_usb_device_state(udev, USB_STATE_NOTATTACHED);
}
/**
@@ -2209,14 +2217,7 @@ void usb_set_device_state(struct usb_device *udev,
else
wakeup = 0;
}
- if (udev->state == USB_STATE_SUSPENDED &&
- new_state != USB_STATE_SUSPENDED)
- udev->active_duration -= jiffies;
- else if (new_state == USB_STATE_SUSPENDED &&
- udev->state != USB_STATE_SUSPENDED)
- udev->active_duration += jiffies;
- udev->state = new_state;
- update_port_device_state(udev);
+ update_usb_device_state(udev, new_state);
} else
recursively_mark_NOTATTACHED(udev);
spin_unlock_irqrestore(&device_state_lock, flags);
@@ -6077,7 +6078,7 @@ int usb_hub_init(void)
* device was gone before the EHCI controller had handed its port
* over to the companion full-speed controller.
*/
- hub_wq = alloc_workqueue("usb_hub_wq", WQ_FREEZABLE, 0);
+ hub_wq = alloc_workqueue("usb_hub_wq", WQ_FREEZABLE | WQ_PERCPU, 0);
if (hub_wq)
return 0;
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index d2b2787be409..6138468c67c4 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -2431,7 +2431,7 @@ int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr,
break;
case USB_CDC_MBIM_EXTENDED_TYPE:
if (elength < sizeof(struct usb_cdc_mbim_extended_desc))
- break;
+ goto next_desc;
hdr->usb_cdc_mbim_extended_desc =
(struct usb_cdc_mbim_extended_desc *)buffer;
break;
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index f5bc53875330..47f589c4104a 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -467,6 +467,8 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Huawei 4G LTE module */
{ USB_DEVICE(0x12d1, 0x15bb), .driver_info =
USB_QUIRK_DISCONNECT_SUSPEND },
+ { USB_DEVICE(0x12d1, 0x15c1), .driver_info =
+ USB_QUIRK_DISCONNECT_SUSPEND },
{ USB_DEVICE(0x12d1, 0x15c3), .driver_info =
USB_QUIRK_DISCONNECT_SUSPEND },
diff --git a/drivers/usb/core/trace.c b/drivers/usb/core/trace.c
new file mode 100644
index 000000000000..607bcf639d27
--- /dev/null
+++ b/drivers/usb/core/trace.c
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 Google LLC
+ */
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/drivers/usb/core/trace.h b/drivers/usb/core/trace.h
new file mode 100644
index 000000000000..903e57dc273a
--- /dev/null
+++ b/drivers/usb/core/trace.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2025 Google LLC
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM usbcore
+
+#if !defined(_USB_CORE_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _USB_CORE_TRACE_H
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include <linux/usb.h>
+
+DECLARE_EVENT_CLASS(usb_core_log_usb_device,
+ TP_PROTO(struct usb_device *udev),
+ TP_ARGS(udev),
+ TP_STRUCT__entry(
+ __string(name, dev_name(&udev->dev))
+ __field(enum usb_device_speed, speed)
+ __field(enum usb_device_state, state)
+ __field(unsigned short, bus_mA)
+ __field(unsigned, authorized)
+ ),
+ TP_fast_assign(
+ __assign_str(name);
+ __entry->speed = udev->speed;
+ __entry->state = udev->state;
+ __entry->bus_mA = udev->bus_mA;
+ __entry->authorized = udev->authorized;
+ ),
+ TP_printk("usb %s speed %s state %s %dmA [%s]",
+ __get_str(name),
+ usb_speed_string(__entry->speed),
+ usb_state_string(__entry->state),
+ __entry->bus_mA,
+ __entry->authorized ? "authorized" : "unauthorized")
+);
+
+DEFINE_EVENT(usb_core_log_usb_device, usb_set_device_state,
+ TP_PROTO(struct usb_device *udev),
+ TP_ARGS(udev)
+);
+
+DEFINE_EVENT(usb_core_log_usb_device, usb_alloc_dev,
+ TP_PROTO(struct usb_device *udev),
+ TP_ARGS(udev)
+);
+
+
+#endif /* _USB_CORE_TRACE_H */
+
+/* this part has to be here */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index b6b0b8489523..e740f7852bcd 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -46,6 +46,7 @@
#include <linux/dma-mapping.h>
#include "hub.h"
+#include "trace.h"
const char *usbcore_name = "usbcore";
@@ -746,6 +747,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
#endif
dev->authorized = usb_dev_authorized(dev, usb_hcd);
+ trace_usb_alloc_dev(dev);
return dev;
}
EXPORT_SYMBOL_GPL(usb_alloc_dev);
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 3f83ecc9fc23..ef0d73077034 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -369,11 +369,11 @@ static void dwc2_driver_shutdown(struct platform_device *dev)
{
struct dwc2_hsotg *hsotg = platform_get_drvdata(dev);
- dwc2_disable_global_interrupts(hsotg);
- synchronize_irq(hsotg->irq);
-
- if (hsotg->ll_hw_enabled)
+ if (hsotg->ll_hw_enabled) {
+ dwc2_disable_global_interrupts(hsotg);
+ synchronize_irq(hsotg->irq);
dwc2_lowlevel_hw_disable(hsotg);
+ }
}
/**
@@ -649,9 +649,13 @@ error:
static int __maybe_unused dwc2_suspend(struct device *dev)
{
struct dwc2_hsotg *dwc2 = dev_get_drvdata(dev);
- bool is_device_mode = dwc2_is_device_mode(dwc2);
+ bool is_device_mode;
int ret = 0;
+ if (!dwc2->ll_hw_enabled)
+ return 0;
+
+ is_device_mode = dwc2_is_device_mode(dwc2);
if (is_device_mode)
dwc2_hsotg_suspend(dwc2);
@@ -728,6 +732,9 @@ static int __maybe_unused dwc2_resume(struct device *dev)
struct dwc2_hsotg *dwc2 = dev_get_drvdata(dev);
int ret = 0;
+ if (!dwc2->ll_hw_enabled)
+ return 0;
+
if (dwc2->phy_off_for_suspend && dwc2->ll_hw_enabled) {
ret = __dwc2_lowlevel_hw_enable(dwc2);
if (ret)
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index 4925d15084f8..bf3e04635131 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -200,4 +200,15 @@ config USB_DWC3_GENERIC_PLAT
the dwc3 child node in the device tree.
Say 'Y' or 'M' here if your platform integrates DWC3 in a similar way.
+config USB_DWC3_APPLE
+ tristate "Apple Silicon DWC3 Platform Driver"
+ depends on OF && ARCH_APPLE
+ default USB_DWC3
+ select USB_ROLE_SWITCH
+ help
+ Support Apple Silicon SoCs with DesignWare Core USB3 IP.
+ The DesignWare Core USB3 IP has to be used in dual-role
+ mode on these machines.
+ Say 'Y' or 'M' if you have such device.
+
endif
diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile
index 96469e48ff9d..89d46ab50068 100644
--- a/drivers/usb/dwc3/Makefile
+++ b/drivers/usb/dwc3/Makefile
@@ -43,6 +43,7 @@ endif
##
obj-$(CONFIG_USB_DWC3_AM62) += dwc3-am62.o
+obj-$(CONFIG_USB_DWC3_APPLE) += dwc3-apple.o
obj-$(CONFIG_USB_DWC3_OMAP) += dwc3-omap.o
obj-$(CONFIG_USB_DWC3_EXYNOS) += dwc3-exynos.o
obj-$(CONFIG_USB_DWC3_PCI) += dwc3-pci.o
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index ae140c356295..d7f340f22595 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -132,6 +132,7 @@ void dwc3_enable_susphy(struct dwc3 *dwc, bool enable)
dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(i), reg);
}
}
+EXPORT_SYMBOL_GPL(dwc3_enable_susphy);
void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode, bool ignore_susphy)
{
@@ -158,6 +159,7 @@ void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode, bool ignore_susphy)
dwc->current_dr_role = mode;
trace_dwc3_set_prtcap(mode);
}
+EXPORT_SYMBOL_GPL(dwc3_set_prtcap);
static void __dwc3_set_mode(struct work_struct *work)
{
@@ -280,7 +282,6 @@ static void __dwc3_set_mode(struct work_struct *work)
}
out:
- pm_runtime_mark_last_busy(dwc->dev);
pm_runtime_put_autosuspend(dwc->dev);
mutex_unlock(&dwc->mutex);
}
@@ -975,7 +976,7 @@ static void dwc3_clk_disable(struct dwc3 *dwc)
clk_disable_unprepare(dwc->bus_clk);
}
-static void dwc3_core_exit(struct dwc3 *dwc)
+void dwc3_core_exit(struct dwc3 *dwc)
{
dwc3_event_buffers_cleanup(dwc);
dwc3_phy_power_off(dwc);
@@ -983,6 +984,7 @@ static void dwc3_core_exit(struct dwc3 *dwc)
dwc3_clk_disable(dwc);
reset_control_assert(dwc->reset);
}
+EXPORT_SYMBOL_GPL(dwc3_core_exit);
static bool dwc3_core_is_valid(struct dwc3 *dwc)
{
@@ -1328,7 +1330,7 @@ static void dwc3_config_threshold(struct dwc3 *dwc)
*
* Returns 0 on success otherwise negative errno.
*/
-static int dwc3_core_init(struct dwc3 *dwc)
+int dwc3_core_init(struct dwc3 *dwc)
{
unsigned int hw_mode;
u32 reg;
@@ -1528,6 +1530,7 @@ err_exit_ulpi:
return ret;
}
+EXPORT_SYMBOL_GPL(dwc3_core_init);
static int dwc3_core_get_phy(struct dwc3 *dwc)
{
@@ -1666,7 +1669,8 @@ static void dwc3_core_exit_mode(struct dwc3 *dwc)
dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, true);
}
-static void dwc3_get_software_properties(struct dwc3 *dwc)
+static void dwc3_get_software_properties(struct dwc3 *dwc,
+ const struct dwc3_properties *properties)
{
struct device *tmpdev;
u16 gsbuscfg0_reqinfo;
@@ -1674,6 +1678,12 @@ static void dwc3_get_software_properties(struct dwc3 *dwc)
dwc->gsbuscfg0_reqinfo = DWC3_GSBUSCFG0_REQINFO_UNSPECIFIED;
+ if (properties->gsbuscfg0_reqinfo !=
+ DWC3_GSBUSCFG0_REQINFO_UNSPECIFIED) {
+ dwc->gsbuscfg0_reqinfo = properties->gsbuscfg0_reqinfo;
+ return;
+ }
+
/*
* Iterate over all parent nodes for finding swnode properties
* and non-DT (non-ABI) properties.
@@ -2206,7 +2216,7 @@ int dwc3_core_probe(const struct dwc3_probe_data *data)
dwc3_get_properties(dwc);
- dwc3_get_software_properties(dwc);
+ dwc3_get_software_properties(dwc, &data->properties);
dwc->usb_psy = dwc3_get_usb_power_supply(dwc);
if (IS_ERR(dwc->usb_psy))
@@ -2299,9 +2309,11 @@ int dwc3_core_probe(const struct dwc3_probe_data *data)
dwc3_check_params(dwc);
dwc3_debugfs_init(dwc);
- ret = dwc3_core_init_mode(dwc);
- if (ret)
- goto err_exit_debugfs;
+ if (!data->skip_core_init_mode) {
+ ret = dwc3_core_init_mode(dwc);
+ if (ret)
+ goto err_exit_debugfs;
+ }
pm_runtime_put(dev);
@@ -2356,6 +2368,7 @@ static int dwc3_probe(struct platform_device *pdev)
probe_data.dwc = dwc;
probe_data.res = res;
+ probe_data.properties = DWC3_DEFAULT_PROPERTIES;
return dwc3_core_probe(&probe_data);
}
@@ -2644,7 +2657,6 @@ int dwc3_runtime_idle(struct dwc3 *dwc)
break;
}
- pm_runtime_mark_last_busy(dev);
pm_runtime_autosuspend(dev);
return 0;
diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c
index 4c91240eb429..589bbeb27454 100644
--- a/drivers/usb/dwc3/drd.c
+++ b/drivers/usb/dwc3/drd.c
@@ -515,6 +515,7 @@ static int dwc3_setup_role_switch(struct dwc3 *dwc)
dwc3_role_switch.set = dwc3_usb_role_switch_set;
dwc3_role_switch.get = dwc3_usb_role_switch_get;
dwc3_role_switch.driver_data = dwc;
+ dwc3_role_switch.allow_userspace_control = true;
dwc->role_sw = usb_role_switch_register(dwc->dev, &dwc3_role_switch);
if (IS_ERR(dwc->role_sw))
return PTR_ERR(dwc->role_sw);
diff --git a/drivers/usb/dwc3/dwc3-am62.c b/drivers/usb/dwc3/dwc3-am62.c
index 9db8f3ca493d..e11d7643f966 100644
--- a/drivers/usb/dwc3/dwc3-am62.c
+++ b/drivers/usb/dwc3/dwc3-am62.c
@@ -292,7 +292,6 @@ static int dwc3_ti_probe(struct platform_device *pdev)
/* Setting up autosuspend */
pm_runtime_set_autosuspend_delay(dev, DWC3_AM62_AUTOSUSPEND_DELAY);
pm_runtime_use_autosuspend(dev);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
diff --git a/drivers/usb/dwc3/dwc3-apple.c b/drivers/usb/dwc3/dwc3-apple.c
new file mode 100644
index 000000000000..cc47cad232e3
--- /dev/null
+++ b/drivers/usb/dwc3/dwc3-apple.c
@@ -0,0 +1,489 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Apple Silicon DWC3 Glue driver
+ * Copyright (C) The Asahi Linux Contributors
+ *
+ * Based on:
+ * - dwc3-qcom.c Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * - dwc3-of-simple.c Copyright (c) 2015 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include "glue.h"
+
+/*
+ * This platform requires a very specific sequence of operations to bring up dwc3 and its USB3 PHY:
+ *
+ * 1) The PHY itself has to be brought up; for this we need to know the mode (USB3,
+ * USB3+DisplayPort, USB4, etc) and the lane orientation. This happens through typec_mux_set.
+ * 2) DWC3 has to be brought up but we must not touch the gadget area or start xhci yet.
+ * 3) The PHY bring-up has to be finalized and dwc3's PIPE interface has to be switched to the
+ * USB3 PHY, this is done inside phy_set_mode.
+ * 4) We can now initialize xhci or gadget mode.
+ *
+ * We can switch 1 and 2 but 3 has to happen after (1 and 2) and 4 has to happen after 3.
+ *
+ * And then to bring this all down again:
+ *
+ * 1) DWC3 has to exit host or gadget mode and must no longer touch those registers
+ * 2) The PHY has to switch dwc3's PIPE interface back to the dummy backend
+ * 3) The PHY itself can be shut down, this happens from typec_mux_set
+ *
+ * We also can't transition the PHY from one mode to another while dwc3 is up and running (this is
+ * slightly wrong, some transitions are possible, others aren't but because we have no documentation
+ * for this I'd rather play it safe).
+ *
+ * After both the PHY and dwc3 are initialized we will only ever see a single "new device connected"
+ * event. If we just keep them running only the first device plugged in will ever work. XHCI's port
+ * status register actually does show the correct state but no interrupt ever comes in. In gadget
+ * mode we don't even get a USBDisconnected event and everything looks like there's still something
+ * connected on the other end.
+ * This can be partially explained because the USB2 D+/D- lines are connected through a stateful
+ * eUSB2 repeater which in turn is controlled by a variant of the TI TPS6598x USB PD chip which
+ * resets the repeater out-of-band everytime the CC lines are (dis)connected. This then requires a
+ * PHY reset to make sure the PHY and the eUSB2 repeater state are synchronized again.
+ *
+ * And to make this all extra fun: If we get the order of some of this wrong either the port is just
+ * broken until a phy+dwc3 reset, or it's broken until a full SoC reset (likely because we can't
+ * reset some parts of the PHY), or some watchdog kicks in after a few seconds and forces a full SoC
+ * reset (mostly seen this with USB4/Thunderbolt but there's clearly some watchdog that hates
+ * invalid states).
+ *
+ * Hence there's really no good way to keep dwc3 fully up and running after we disconnect a cable
+ * because then we can't shut down the PHY anymore. And if we kept the PHY running in whatever mode
+ * it was until the next cable is connected we'd need to tear it all down and bring it back up again
+ * anyway to detect and use the next device.
+ *
+ * Instead, we just shut down everything when a cable is disconnected and transition to
+ * DWC3_APPLE_NO_CABLE.
+ * During initial probe we don't have any information about the connected cable and can't bring up
+ * the PHY properly and thus also can't fully bring up dwc3. Instead, we just keep everything off
+ * and defer the first dwc3 probe until we get the first cable connected event. Until then we stay
+ * in DWC3_APPLE_PROBE_PENDING.
+ * Once a cable is connected we then keep track of the controller mode here by transitioning to
+ * DWC3_APPLE_HOST or DWC3_APPLE_DEVICE.
+ */
+enum dwc3_apple_state {
+ DWC3_APPLE_PROBE_PENDING, /* Before first cable connection, dwc3_core_probe not called */
+ DWC3_APPLE_NO_CABLE, /* No cable connected, dwc3 suspended after dwc3_core_exit */
+ DWC3_APPLE_HOST, /* Cable connected, dwc3 in host mode */
+ DWC3_APPLE_DEVICE, /* Cable connected, dwc3 in device mode */
+};
+
+/**
+ * struct dwc3_apple - Apple-specific DWC3 USB controller
+ * @dwc: Core DWC3 structure
+ * @dev: Pointer to the device structure
+ * @mmio_resource: Resource to be passed to dwc3_core_probe
+ * @apple_regs: Apple-specific DWC3 registers
+ * @reset: Reset control
+ * @role_sw: USB role switch
+ * @lock: Mutex for synchronizing access
+ * @state: Current state of the controller, see documentation for the enum for details
+ */
+struct dwc3_apple {
+ struct dwc3 dwc;
+
+ struct device *dev;
+ struct resource *mmio_resource;
+ void __iomem *apple_regs;
+
+ struct reset_control *reset;
+ struct usb_role_switch *role_sw;
+
+ struct mutex lock;
+
+ enum dwc3_apple_state state;
+};
+
+#define to_dwc3_apple(d) container_of((d), struct dwc3_apple, dwc)
+
+/*
+ * Apple Silicon dwc3 vendor-specific registers
+ *
+ * These registers were identified by tracing XNU's memory access patterns and correlating them with
+ * debug output over serial to determine their names. We don't exactly know what these do but
+ * without these USB3 devices sometimes don't work.
+ */
+#define APPLE_DWC3_REGS_START 0xcd00
+#define APPLE_DWC3_REGS_END 0xcdff
+
+#define APPLE_DWC3_CIO_LFPS_OFFSET 0xcd38
+#define APPLE_DWC3_CIO_LFPS_OFFSET_VALUE 0xf800f80
+
+#define APPLE_DWC3_CIO_BW_NGT_OFFSET 0xcd3c
+#define APPLE_DWC3_CIO_BW_NGT_OFFSET_VALUE 0xfc00fc0
+
+#define APPLE_DWC3_CIO_LINK_TIMER 0xcd40
+#define APPLE_DWC3_CIO_PENDING_HP_TIMER GENMASK(23, 16)
+#define APPLE_DWC3_CIO_PENDING_HP_TIMER_VALUE 0x14
+#define APPLE_DWC3_CIO_PM_LC_TIMER GENMASK(15, 8)
+#define APPLE_DWC3_CIO_PM_LC_TIMER_VALUE 0xa
+#define APPLE_DWC3_CIO_PM_ENTRY_TIMER GENMASK(7, 0)
+#define APPLE_DWC3_CIO_PM_ENTRY_TIMER_VALUE 0x10
+
+static inline void dwc3_apple_writel(struct dwc3_apple *appledwc, u32 offset, u32 value)
+{
+ writel(value, appledwc->apple_regs + offset - APPLE_DWC3_REGS_START);
+}
+
+static inline u32 dwc3_apple_readl(struct dwc3_apple *appledwc, u32 offset)
+{
+ return readl(appledwc->apple_regs + offset - APPLE_DWC3_REGS_START);
+}
+
+static inline void dwc3_apple_mask(struct dwc3_apple *appledwc, u32 offset, u32 mask, u32 value)
+{
+ u32 reg;
+
+ reg = dwc3_apple_readl(appledwc, offset);
+ reg &= ~mask;
+ reg |= value;
+ dwc3_apple_writel(appledwc, offset, reg);
+}
+
+static void dwc3_apple_setup_cio(struct dwc3_apple *appledwc)
+{
+ dwc3_apple_writel(appledwc, APPLE_DWC3_CIO_LFPS_OFFSET, APPLE_DWC3_CIO_LFPS_OFFSET_VALUE);
+ dwc3_apple_writel(appledwc, APPLE_DWC3_CIO_BW_NGT_OFFSET,
+ APPLE_DWC3_CIO_BW_NGT_OFFSET_VALUE);
+ dwc3_apple_mask(appledwc, APPLE_DWC3_CIO_LINK_TIMER, APPLE_DWC3_CIO_PENDING_HP_TIMER,
+ FIELD_PREP(APPLE_DWC3_CIO_PENDING_HP_TIMER,
+ APPLE_DWC3_CIO_PENDING_HP_TIMER_VALUE));
+ dwc3_apple_mask(appledwc, APPLE_DWC3_CIO_LINK_TIMER, APPLE_DWC3_CIO_PM_LC_TIMER,
+ FIELD_PREP(APPLE_DWC3_CIO_PM_LC_TIMER, APPLE_DWC3_CIO_PM_LC_TIMER_VALUE));
+ dwc3_apple_mask(appledwc, APPLE_DWC3_CIO_LINK_TIMER, APPLE_DWC3_CIO_PM_ENTRY_TIMER,
+ FIELD_PREP(APPLE_DWC3_CIO_PM_ENTRY_TIMER,
+ APPLE_DWC3_CIO_PM_ENTRY_TIMER_VALUE));
+}
+
+static void dwc3_apple_set_ptrcap(struct dwc3_apple *appledwc, u32 mode)
+{
+ guard(spinlock_irqsave)(&appledwc->dwc.lock);
+ dwc3_set_prtcap(&appledwc->dwc, mode, false);
+}
+
+static int dwc3_apple_core_probe(struct dwc3_apple *appledwc)
+{
+ struct dwc3_probe_data probe_data = {};
+ int ret;
+
+ lockdep_assert_held(&appledwc->lock);
+ WARN_ON_ONCE(appledwc->state != DWC3_APPLE_PROBE_PENDING);
+
+ appledwc->dwc.dev = appledwc->dev;
+ probe_data.dwc = &appledwc->dwc;
+ probe_data.res = appledwc->mmio_resource;
+ probe_data.ignore_clocks_and_resets = true;
+ probe_data.skip_core_init_mode = true;
+ probe_data.properties = DWC3_DEFAULT_PROPERTIES;
+
+ ret = dwc3_core_probe(&probe_data);
+ if (ret)
+ return ret;
+
+ appledwc->state = DWC3_APPLE_NO_CABLE;
+ return 0;
+}
+
+static int dwc3_apple_core_init(struct dwc3_apple *appledwc)
+{
+ int ret;
+
+ lockdep_assert_held(&appledwc->lock);
+
+ switch (appledwc->state) {
+ case DWC3_APPLE_PROBE_PENDING:
+ ret = dwc3_apple_core_probe(appledwc);
+ if (ret)
+ dev_err(appledwc->dev, "Failed to probe DWC3 Core, err=%d\n", ret);
+ break;
+ case DWC3_APPLE_NO_CABLE:
+ ret = dwc3_core_init(&appledwc->dwc);
+ if (ret)
+ dev_err(appledwc->dev, "Failed to initialize DWC3 Core, err=%d\n", ret);
+ break;
+ default:
+ /* Unreachable unless there's a bug in this driver */
+ WARN_ON_ONCE(1);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static void dwc3_apple_phy_set_mode(struct dwc3_apple *appledwc, enum phy_mode mode)
+{
+ lockdep_assert_held(&appledwc->lock);
+
+ /*
+ * This platform requires SUSPHY to be enabled here already in order to properly configure
+ * the PHY and switch dwc3's PIPE interface to USB3 PHY.
+ */
+ dwc3_enable_susphy(&appledwc->dwc, true);
+ phy_set_mode(appledwc->dwc.usb2_generic_phy[0], mode);
+ phy_set_mode(appledwc->dwc.usb3_generic_phy[0], mode);
+}
+
+static int dwc3_apple_init(struct dwc3_apple *appledwc, enum dwc3_apple_state state)
+{
+ int ret, ret_reset;
+
+ lockdep_assert_held(&appledwc->lock);
+
+ ret = reset_control_deassert(appledwc->reset);
+ if (ret) {
+ dev_err(appledwc->dev, "Failed to deassert reset, err=%d\n", ret);
+ return ret;
+ }
+
+ ret = dwc3_apple_core_init(appledwc);
+ if (ret)
+ goto reset_assert;
+
+ /*
+ * Now that the core is initialized and already went through dwc3_core_soft_reset we can
+ * configure some unknown Apple-specific settings and then bring up xhci or gadget mode.
+ */
+ dwc3_apple_setup_cio(appledwc);
+
+ switch (state) {
+ case DWC3_APPLE_HOST:
+ appledwc->dwc.dr_mode = USB_DR_MODE_HOST;
+ dwc3_apple_set_ptrcap(appledwc, DWC3_GCTL_PRTCAP_HOST);
+ dwc3_apple_phy_set_mode(appledwc, PHY_MODE_USB_HOST);
+ ret = dwc3_host_init(&appledwc->dwc);
+ if (ret) {
+ dev_err(appledwc->dev, "Failed to initialize host, ret=%d\n", ret);
+ goto core_exit;
+ }
+
+ break;
+ case DWC3_APPLE_DEVICE:
+ appledwc->dwc.dr_mode = USB_DR_MODE_PERIPHERAL;
+ dwc3_apple_set_ptrcap(appledwc, DWC3_GCTL_PRTCAP_DEVICE);
+ dwc3_apple_phy_set_mode(appledwc, PHY_MODE_USB_DEVICE);
+ ret = dwc3_gadget_init(&appledwc->dwc);
+ if (ret) {
+ dev_err(appledwc->dev, "Failed to initialize gadget, ret=%d\n", ret);
+ goto core_exit;
+ }
+ break;
+ default:
+ /* Unreachable unless there's a bug in this driver */
+ WARN_ON_ONCE(1);
+ ret = -EINVAL;
+ goto core_exit;
+ }
+
+ appledwc->state = state;
+ return 0;
+
+core_exit:
+ dwc3_core_exit(&appledwc->dwc);
+reset_assert:
+ ret_reset = reset_control_assert(appledwc->reset);
+ if (ret_reset)
+ dev_warn(appledwc->dev, "Failed to assert reset, err=%d\n", ret_reset);
+
+ return ret;
+}
+
+static int dwc3_apple_exit(struct dwc3_apple *appledwc)
+{
+ int ret = 0;
+
+ lockdep_assert_held(&appledwc->lock);
+
+ switch (appledwc->state) {
+ case DWC3_APPLE_PROBE_PENDING:
+ case DWC3_APPLE_NO_CABLE:
+ /* Nothing to do if we're already off */
+ return 0;
+ case DWC3_APPLE_DEVICE:
+ dwc3_gadget_exit(&appledwc->dwc);
+ break;
+ case DWC3_APPLE_HOST:
+ dwc3_host_exit(&appledwc->dwc);
+ break;
+ }
+
+ /*
+ * This platform requires SUSPHY to be enabled in order to properly power down the PHY
+ * and switch dwc3's PIPE interface back to a dummy PHY (i.e. no USB3 support and USB2 via
+ * a different PHY connected through ULPI).
+ */
+ dwc3_enable_susphy(&appledwc->dwc, true);
+ dwc3_core_exit(&appledwc->dwc);
+ appledwc->state = DWC3_APPLE_NO_CABLE;
+
+ ret = reset_control_assert(appledwc->reset);
+ if (ret) {
+ dev_err(appledwc->dev, "Failed to assert reset, err=%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dwc3_usb_role_switch_set(struct usb_role_switch *sw, enum usb_role role)
+{
+ struct dwc3_apple *appledwc = usb_role_switch_get_drvdata(sw);
+ int ret;
+
+ guard(mutex)(&appledwc->lock);
+
+ /*
+ * We need to tear all of dwc3 down and re-initialize it every time a cable is
+ * connected or disconnected or when the mode changes. See the documentation for enum
+ * dwc3_apple_state for details.
+ */
+ ret = dwc3_apple_exit(appledwc);
+ if (ret)
+ return ret;
+
+ switch (role) {
+ case USB_ROLE_NONE:
+ /* Nothing to do if no cable is connected */
+ return 0;
+ case USB_ROLE_HOST:
+ return dwc3_apple_init(appledwc, DWC3_APPLE_HOST);
+ case USB_ROLE_DEVICE:
+ return dwc3_apple_init(appledwc, DWC3_APPLE_DEVICE);
+ default:
+ dev_err(appledwc->dev, "Invalid target role: %d\n", role);
+ return -EINVAL;
+ }
+}
+
+static enum usb_role dwc3_usb_role_switch_get(struct usb_role_switch *sw)
+{
+ struct dwc3_apple *appledwc = usb_role_switch_get_drvdata(sw);
+
+ guard(mutex)(&appledwc->lock);
+
+ switch (appledwc->state) {
+ case DWC3_APPLE_HOST:
+ return USB_ROLE_HOST;
+ case DWC3_APPLE_DEVICE:
+ return USB_ROLE_DEVICE;
+ case DWC3_APPLE_NO_CABLE:
+ case DWC3_APPLE_PROBE_PENDING:
+ return USB_ROLE_NONE;
+ default:
+ /* Unreachable unless there's a bug in this driver */
+ dev_err(appledwc->dev, "Invalid internal state: %d\n", appledwc->state);
+ return USB_ROLE_NONE;
+ }
+}
+
+static int dwc3_apple_setup_role_switch(struct dwc3_apple *appledwc)
+{
+ struct usb_role_switch_desc dwc3_role_switch = { NULL };
+
+ dwc3_role_switch.fwnode = dev_fwnode(appledwc->dev);
+ dwc3_role_switch.set = dwc3_usb_role_switch_set;
+ dwc3_role_switch.get = dwc3_usb_role_switch_get;
+ dwc3_role_switch.driver_data = appledwc;
+ appledwc->role_sw = usb_role_switch_register(appledwc->dev, &dwc3_role_switch);
+ if (IS_ERR(appledwc->role_sw))
+ return PTR_ERR(appledwc->role_sw);
+
+ return 0;
+}
+
+static int dwc3_apple_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dwc3_apple *appledwc;
+ int ret;
+
+ appledwc = devm_kzalloc(&pdev->dev, sizeof(*appledwc), GFP_KERNEL);
+ if (!appledwc)
+ return -ENOMEM;
+
+ appledwc->dev = &pdev->dev;
+ mutex_init(&appledwc->lock);
+
+ appledwc->reset = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(appledwc->reset))
+ return dev_err_probe(&pdev->dev, PTR_ERR(appledwc->reset),
+ "Failed to get reset control\n");
+
+ ret = reset_control_assert(appledwc->reset);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to assert reset, err=%d\n", ret);
+ return ret;
+ }
+
+ appledwc->mmio_resource = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dwc3-core");
+ if (!appledwc->mmio_resource) {
+ dev_err(dev, "Failed to get DWC3 MMIO\n");
+ return -EINVAL;
+ }
+
+ appledwc->apple_regs = devm_platform_ioremap_resource_byname(pdev, "dwc3-apple");
+ if (IS_ERR(appledwc->apple_regs))
+ return dev_err_probe(dev, PTR_ERR(appledwc->apple_regs),
+ "Failed to map Apple-specific MMIO\n");
+
+ /*
+ * On this platform, DWC3 can only be brought up after parts of the PHY have been
+ * initialized with knowledge of the target mode and cable orientation from typec_set_mux.
+ * Since this has not happened here we cannot setup DWC3 yet and instead defer this until
+ * the first cable is connected. See the documentation for enum dwc3_apple_state for
+ * details.
+ */
+ appledwc->state = DWC3_APPLE_PROBE_PENDING;
+ ret = dwc3_apple_setup_role_switch(appledwc);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to setup role switch\n");
+
+ return 0;
+}
+
+static void dwc3_apple_remove(struct platform_device *pdev)
+{
+ struct dwc3 *dwc = platform_get_drvdata(pdev);
+ struct dwc3_apple *appledwc = to_dwc3_apple(dwc);
+
+ guard(mutex)(&appledwc->lock);
+
+ usb_role_switch_unregister(appledwc->role_sw);
+
+ /*
+ * If we're still in DWC3_APPLE_PROBE_PENDING we never got any cable connected event and
+ * dwc3_core_probe was never called and there's hence no need to call dwc3_core_remove.
+ * dwc3_apple_exit can be called unconditionally because it checks the state itself.
+ */
+ dwc3_apple_exit(appledwc);
+ if (appledwc->state != DWC3_APPLE_PROBE_PENDING)
+ dwc3_core_remove(&appledwc->dwc);
+}
+
+static const struct of_device_id dwc3_apple_of_match[] = {
+ { .compatible = "apple,t8103-dwc3" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dwc3_apple_of_match);
+
+static struct platform_driver dwc3_apple_driver = {
+ .probe = dwc3_apple_probe,
+ .remove = dwc3_apple_remove,
+ .driver = {
+ .name = "dwc3-apple",
+ .of_match_table = dwc3_apple_of_match,
+ },
+};
+
+module_platform_driver(dwc3_apple_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sven Peter <sven@kernel.org>");
+MODULE_DESCRIPTION("DesignWare DWC3 Apple Silicon Glue Driver");
diff --git a/drivers/usb/dwc3/dwc3-generic-plat.c b/drivers/usb/dwc3/dwc3-generic-plat.c
index d96b20570002..e846844e0023 100644
--- a/drivers/usb/dwc3/dwc3-generic-plat.c
+++ b/drivers/usb/dwc3/dwc3-generic-plat.c
@@ -10,8 +10,16 @@
#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
#include "glue.h"
+#define EIC7700_HSP_BUS_FILTER_EN BIT(0)
+#define EIC7700_HSP_BUS_CLKEN_GM BIT(9)
+#define EIC7700_HSP_BUS_CLKEN_GS BIT(16)
+#define EIC7700_HSP_AXI_LP_XM_CSYSREQ BIT(0)
+#define EIC7700_HSP_AXI_LP_XS_CSYSREQ BIT(16)
+
struct dwc3_generic {
struct device *dev;
struct dwc3 dwc;
@@ -20,6 +28,11 @@ struct dwc3_generic {
struct reset_control *resets;
};
+struct dwc3_generic_config {
+ int (*init)(struct dwc3_generic *dwc3g);
+ struct dwc3_properties properties;
+};
+
#define to_dwc3_generic(d) container_of((d), struct dwc3_generic, dwc)
static void dwc3_generic_reset_control_assert(void *data)
@@ -27,8 +40,38 @@ static void dwc3_generic_reset_control_assert(void *data)
reset_control_assert(data);
}
+static int dwc3_eic7700_init(struct dwc3_generic *dwc3g)
+{
+ struct device *dev = dwc3g->dev;
+ struct regmap *regmap;
+ u32 hsp_usb_axi_lp;
+ u32 hsp_usb_bus;
+ u32 args[2];
+ u32 val;
+
+ regmap = syscon_regmap_lookup_by_phandle_args(dev->of_node,
+ "eswin,hsp-sp-csr",
+ ARRAY_SIZE(args), args);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "No hsp-sp-csr phandle specified\n");
+ return PTR_ERR(regmap);
+ }
+
+ hsp_usb_bus = args[0];
+ hsp_usb_axi_lp = args[1];
+
+ regmap_read(regmap, hsp_usb_bus, &val);
+ regmap_write(regmap, hsp_usb_bus, val | EIC7700_HSP_BUS_FILTER_EN |
+ EIC7700_HSP_BUS_CLKEN_GM | EIC7700_HSP_BUS_CLKEN_GS);
+
+ regmap_write(regmap, hsp_usb_axi_lp, EIC7700_HSP_AXI_LP_XM_CSYSREQ |
+ EIC7700_HSP_AXI_LP_XS_CSYSREQ);
+ return 0;
+}
+
static int dwc3_generic_probe(struct platform_device *pdev)
{
+ const struct dwc3_generic_config *plat_config;
struct dwc3_probe_data probe_data = {};
struct device *dev = &pdev->dev;
struct dwc3_generic *dwc3g;
@@ -75,6 +118,22 @@ static int dwc3_generic_probe(struct platform_device *pdev)
probe_data.dwc = &dwc3g->dwc;
probe_data.res = res;
probe_data.ignore_clocks_and_resets = true;
+
+ plat_config = of_device_get_match_data(dev);
+ if (!plat_config) {
+ probe_data.properties = DWC3_DEFAULT_PROPERTIES;
+ goto core_probe;
+ }
+
+ probe_data.properties = plat_config->properties;
+ if (plat_config->init) {
+ ret = plat_config->init(dwc3g);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to init platform\n");
+ }
+
+core_probe:
ret = dwc3_core_probe(&probe_data);
if (ret)
return dev_err_probe(dev, ret, "failed to register DWC3 Core\n");
@@ -85,11 +144,8 @@ static int dwc3_generic_probe(struct platform_device *pdev)
static void dwc3_generic_remove(struct platform_device *pdev)
{
struct dwc3 *dwc = platform_get_drvdata(pdev);
- struct dwc3_generic *dwc3g = to_dwc3_generic(dwc);
dwc3_core_remove(dwc);
-
- clk_bulk_disable_unprepare(dwc3g->num_clocks, dwc3g->clks);
}
static int dwc3_generic_suspend(struct device *dev)
@@ -145,8 +201,19 @@ static const struct dev_pm_ops dwc3_generic_dev_pm_ops = {
dwc3_generic_runtime_idle)
};
+static const struct dwc3_generic_config fsl_ls1028_dwc3 = {
+ .properties.gsbuscfg0_reqinfo = 0x2222,
+};
+
+static const struct dwc3_generic_config eic7700_dwc3 = {
+ .init = dwc3_eic7700_init,
+ .properties = DWC3_DEFAULT_PROPERTIES,
+};
+
static const struct of_device_id dwc3_generic_of_match[] = {
{ .compatible = "spacemit,k1-dwc3", },
+ { .compatible = "fsl,ls1028a-dwc3", &fsl_ls1028_dwc3},
+ { .compatible = "eswin,eic7700-dwc3", &eic7700_dwc3},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, dwc3_generic_of_match);
diff --git a/drivers/usb/dwc3/dwc3-imx8mp.c b/drivers/usb/dwc3/dwc3-imx8mp.c
index bce6af82f54c..050da327f785 100644
--- a/drivers/usb/dwc3/dwc3-imx8mp.c
+++ b/drivers/usb/dwc3/dwc3-imx8mp.c
@@ -312,7 +312,6 @@ static int dwc3_imx8mp_resume(struct dwc3_imx8mp *dwc3_imx, pm_message_t msg)
if (dwc3_imx->wakeup_pending) {
dwc3_imx->wakeup_pending = false;
if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_DEVICE) {
- pm_runtime_mark_last_busy(dwc->dev);
pm_runtime_put_autosuspend(dwc->dev);
} else {
/*
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 39c72cb52ce7..ce7b76d24fb2 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -322,7 +322,6 @@ static void dwc3_pci_resume_work(struct work_struct *work)
return;
}
- pm_runtime_mark_last_busy(&dwc3->dev);
pm_runtime_put_sync_autosuspend(&dwc3->dev);
}
#endif
diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
index ded2ca86670c..9ac75547820d 100644
--- a/drivers/usb/dwc3/dwc3-qcom.c
+++ b/drivers/usb/dwc3/dwc3-qcom.c
@@ -704,6 +704,7 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
probe_data.dwc = &qcom->dwc;
probe_data.res = &res;
probe_data.ignore_clocks_and_resets = true;
+ probe_data.properties = DWC3_DEFAULT_PROPERTIES;
ret = dwc3_core_probe(&probe_data);
if (ret) {
ret = dev_err_probe(dev, ret, "failed to register DWC3 Core\n");
diff --git a/drivers/usb/dwc3/dwc3-xilinx.c b/drivers/usb/dwc3/dwc3-xilinx.c
index 1e28d6f50ed0..0a8c47876ff9 100644
--- a/drivers/usb/dwc3/dwc3-xilinx.c
+++ b/drivers/usb/dwc3/dwc3-xilinx.c
@@ -383,7 +383,6 @@ static int __maybe_unused dwc3_xlnx_runtime_resume(struct device *dev)
static int __maybe_unused dwc3_xlnx_runtime_idle(struct device *dev)
{
- pm_runtime_mark_last_busy(dev);
pm_runtime_autosuspend(dev);
return 0;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 6f18b4840a25..3830aa2c10a9 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -3872,7 +3872,7 @@ static void dwc3_gadget_endpoint_stream_event(struct dwc3_ep *dep,
case DEPEVT_STREAM_NOSTREAM:
dep->flags &= ~DWC3_EP_STREAM_PRIMED;
if (dep->flags & DWC3_EP_FORCE_RESTART_STREAM)
- queue_delayed_work(system_wq, &dep->nostream_work,
+ queue_delayed_work(system_percpu_wq, &dep->nostream_work,
msecs_to_jiffies(100));
break;
}
@@ -4810,6 +4810,7 @@ err1:
err0:
return ret;
}
+EXPORT_SYMBOL_GPL(dwc3_gadget_init);
/* -------------------------------------------------------------------------- */
@@ -4828,6 +4829,7 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2,
dwc->ep0_trb, dwc->ep0_trb_addr);
}
+EXPORT_SYMBOL_GPL(dwc3_gadget_exit);
int dwc3_gadget_suspend(struct dwc3 *dwc)
{
diff --git a/drivers/usb/dwc3/glue.h b/drivers/usb/dwc3/glue.h
index 2efd00e763be..df86e14cb706 100644
--- a/drivers/usb/dwc3/glue.h
+++ b/drivers/usb/dwc3/glue.h
@@ -10,21 +10,65 @@
#include "core.h"
/**
+ * dwc3_properties: DWC3 core properties
+ * @gsbuscfg0_reqinfo: Value to be programmed in the GSBUSCFG0.REQINFO field
+ */
+struct dwc3_properties {
+ u32 gsbuscfg0_reqinfo;
+};
+
+#define DWC3_DEFAULT_PROPERTIES ((struct dwc3_properties){ \
+ .gsbuscfg0_reqinfo = DWC3_GSBUSCFG0_REQINFO_UNSPECIFIED, \
+ })
+
+/**
* dwc3_probe_data: Initialization parameters passed to dwc3_core_probe()
* @dwc: Reference to dwc3 context structure
* @res: resource for the DWC3 core mmio region
* @ignore_clocks_and_resets: clocks and resets defined for the device should
* be ignored by the DWC3 core, as they are managed by the glue
+ * @skip_core_init_mode: Skip the finial initialization of the target mode, as
+ * it must be managed by the glue
+ * @properties: dwc3 software manage properties
*/
struct dwc3_probe_data {
struct dwc3 *dwc;
struct resource *res;
bool ignore_clocks_and_resets;
+ bool skip_core_init_mode;
+ struct dwc3_properties properties;
};
+/**
+ * dwc3_core_probe - Initialize the core dwc3 driver
+ * @data: Initialization and configuration parameters for the controller
+ *
+ * Initializes the DesignWare USB3 core driver by setting up resources,
+ * registering interrupts, performing hardware setup, and preparing
+ * the controller for operation in the appropriate mode (host, gadget,
+ * or OTG). This is the main initialization function called by glue
+ * layer drivers to set up the core controller.
+ *
+ * Return: 0 on success, negative error code on failure
+ */
int dwc3_core_probe(const struct dwc3_probe_data *data);
+
+/**
+ * dwc3_core_remove - Deinitialize and remove the core dwc3 driver
+ * @dwc: Pointer to DWC3 controller context
+ *
+ * Cleans up resources and disables the dwc3 core driver. This should be called
+ * during driver removal or when the glue layer needs to shut down the
+ * controller completely.
+ */
void dwc3_core_remove(struct dwc3 *dwc);
+/*
+ * The following callbacks are provided for glue drivers to call from their
+ * own pm callbacks provided in struct dev_pm_ops. Glue drivers can perform
+ * platform-specific work before or after calling these functions and delegate
+ * the core suspend/resume operations to the core driver.
+ */
int dwc3_runtime_suspend(struct dwc3 *dwc);
int dwc3_runtime_resume(struct dwc3 *dwc);
int dwc3_runtime_idle(struct dwc3 *dwc);
@@ -33,4 +77,117 @@ int dwc3_pm_resume(struct dwc3 *dwc);
void dwc3_pm_complete(struct dwc3 *dwc);
int dwc3_pm_prepare(struct dwc3 *dwc);
+
+/* All of the following functions must only be used with skip_core_init_mode */
+
+/**
+ * dwc3_core_init - Initialize DWC3 core hardware
+ * @dwc: Pointer to DWC3 controller context
+ *
+ * Configures and initializes the core hardware, usually done by dwc3_core_probe.
+ * This function is provided for platforms that use skip_core_init_mode and need
+ * to finalize the core initialization after some platform-specific setup.
+ * It must only be called when using skip_core_init_mode and before
+ * dwc3_host_init or dwc3_gadget_init.
+ *
+ * Return: 0 on success, negative error code on failure
+ */
+int dwc3_core_init(struct dwc3 *dwc);
+
+/**
+ * dwc3_core_exit - Shut down DWC3 core hardware
+ * @dwc: Pointer to DWC3 controller context
+ *
+ * Disables and cleans up the core hardware state. This is usually handled
+ * internally by dwc3 and must only be called when using skip_core_init_mode
+ * and only after dwc3_core_init. Afterwards, dwc3_core_init may be called
+ * again.
+ */
+void dwc3_core_exit(struct dwc3 *dwc);
+
+/**
+ * dwc3_host_init - Initialize host mode operation
+ * @dwc: Pointer to DWC3 controller context
+ *
+ * Initializes the controller for USB host mode operation, usually done by
+ * dwc3_core_probe or from within the dwc3 USB role switch callback.
+ * This function is provided for platforms that use skip_core_init_mode and need
+ * to finalize the host initialization after some platform-specific setup.
+ * It must not be called before dwc3_core_init or when skip_core_init_mode is
+ * not used. It must also not be called when gadget or host mode has already
+ * been initialized.
+ *
+ * Return: 0 on success, negative error code on failure
+ */
+int dwc3_host_init(struct dwc3 *dwc);
+
+/**
+ * dwc3_host_exit - Shut down host mode operation
+ * @dwc: Pointer to DWC3 controller context
+ *
+ * Disables and cleans up host mode resources, usually done by
+ * the dwc3 USB role switch callback before switching controller mode.
+ * It must only be called when skip_core_init_mode is used and only after
+ * dwc3_host_init.
+ */
+void dwc3_host_exit(struct dwc3 *dwc);
+
+/**
+ * dwc3_gadget_init - Initialize gadget mode operation
+ * @dwc: Pointer to DWC3 controller context
+ *
+ * Initializes the controller for USB gadget mode operation, usually done by
+ * dwc3_core_probe or from within the dwc3 USB role switch callback. This
+ * function is provided for platforms that use skip_core_init_mode and need to
+ * finalize the gadget initialization after some platform-specific setup.
+ * It must not be called before dwc3_core_init or when skip_core_init_mode is
+ * not used. It must also not be called when gadget or host mode has already
+ * been initialized.
+ *
+ * Return: 0 on success, negative error code on failure
+ */
+int dwc3_gadget_init(struct dwc3 *dwc);
+
+/**
+ * dwc3_gadget_exit - Shut down gadget mode operation
+ * @dwc: Pointer to DWC3 controller context
+ *
+ * Disables and cleans up gadget mode resources, usually done by
+ * the dwc3 USB role switch callback before switching controller mode.
+ * It must only be called when skip_core_init_mode is used and only after
+ * dwc3_gadget_init.
+ */
+void dwc3_gadget_exit(struct dwc3 *dwc);
+
+/**
+ * dwc3_enable_susphy - Control SUSPHY status for all USB ports
+ * @dwc: Pointer to DWC3 controller context
+ * @enable: True to enable SUSPHY, false to disable
+ *
+ * Enables or disables the USB3 PHY SUSPEND and USB2 PHY SUSPHY feature for
+ * all available ports.
+ * This is usually handled by the dwc3 core code and should only be used
+ * when skip_core_init_mode is used and the glue layer needs to manage SUSPHY
+ * settings itself, e.g., due to platform-specific requirements during mode
+ * switches.
+ */
+void dwc3_enable_susphy(struct dwc3 *dwc, bool enable);
+
+/**
+ * dwc3_set_prtcap - Set the USB controller PRTCAP mode
+ * @dwc: Pointer to DWC3 controller context
+ * @mode: Target mode, must be one of DWC3_GCTL_PRTCAP_{HOST,DEVICE,OTG}
+ * @ignore_susphy: If true, skip disabling the SUSPHY and keep the current state
+ *
+ * Updates PRTCAP of the controller and current_dr_role inside the dwc3
+ * structure. For DRD controllers, this also disables SUSPHY unless explicitly
+ * told to skip via the ignore_susphy parameter.
+ *
+ * This is usually handled by the dwc3 core code and should only be used
+ * when skip_core_init_mode is used and the glue layer needs to manage mode
+ * transitions itself due to platform-specific requirements. It must be called
+ * with the correct mode before calling dwc3_host_init or dwc3_gadget_init.
+ */
+void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode, bool ignore_susphy);
+
#endif
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index 1c513bf8002e..cf6512ed17a6 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -37,7 +37,10 @@ static void dwc3_power_off_all_roothub_ports(struct dwc3 *dwc)
/* xhci regs are not mapped yet, do it temporarily here */
if (dwc->xhci_resources[0].start) {
- xhci_regs = ioremap(dwc->xhci_resources[0].start, DWC3_XHCI_REGS_END);
+ if (dwc->xhci_resources[0].flags & IORESOURCE_MEM_NONPOSTED)
+ xhci_regs = ioremap_np(dwc->xhci_resources[0].start, DWC3_XHCI_REGS_END);
+ else
+ xhci_regs = ioremap(dwc->xhci_resources[0].start, DWC3_XHCI_REGS_END);
if (!xhci_regs) {
dev_err(dwc->dev, "Failed to ioremap xhci_regs\n");
return;
@@ -217,6 +220,7 @@ err:
platform_device_put(xhci);
return ret;
}
+EXPORT_SYMBOL_GPL(dwc3_host_init);
void dwc3_host_exit(struct dwc3 *dwc)
{
@@ -227,3 +231,4 @@ void dwc3_host_exit(struct dwc3 *dwc)
platform_device_unregister(dwc->xhci);
dwc->xhci = NULL;
}
+EXPORT_SYMBOL_GPL(dwc3_host_exit);
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 47cfbe41fdff..7f8e566b1c57 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1306,9 +1306,7 @@ static void ffs_dmabuf_release(struct kref *ref)
struct dma_buf *dmabuf = attach->dmabuf;
pr_vdebug("FFS DMABUF release\n");
- dma_resv_lock(dmabuf->resv, NULL);
- dma_buf_unmap_attachment(attach, priv->sgt, priv->dir);
- dma_resv_unlock(dmabuf->resv);
+ dma_buf_unmap_attachment_unlocked(attach, priv->sgt, priv->dir);
dma_buf_detach(attach->dmabuf, attach);
dma_buf_put(dmabuf);
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 307ea563af95..3ddfd4f66f0b 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -1272,8 +1272,7 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
INIT_WORK(&hidg->work, get_report_workqueue_handler);
hidg->workqueue = alloc_workqueue("report_work",
- WQ_FREEZABLE |
- WQ_MEM_RECLAIM,
+ WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
1);
if (!hidg->workqueue) {
diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c
index 20165e1582d9..46f343ba48b3 100644
--- a/drivers/usb/gadget/legacy/raw_gadget.c
+++ b/drivers/usb/gadget/legacy/raw_gadget.c
@@ -40,6 +40,7 @@ MODULE_LICENSE("GPL");
static DEFINE_IDA(driver_id_numbers);
#define DRIVER_DRIVER_NAME_LENGTH_MAX 32
+#define USB_RAW_IO_LENGTH_MAX KMALLOC_MAX_SIZE
#define RAW_EVENT_QUEUE_SIZE 16
@@ -667,7 +668,7 @@ static void *raw_alloc_io_data(struct usb_raw_ep_io *io, void __user *ptr,
return ERR_PTR(-EINVAL);
if (!usb_raw_io_flags_valid(io->flags))
return ERR_PTR(-EINVAL);
- if (io->length > PAGE_SIZE)
+ if (io->length > USB_RAW_IO_LENGTH_MAX)
return ERR_PTR(-EINVAL);
if (get_from_user)
data = memdup_user(ptr + sizeof(*io), io->length);
diff --git a/drivers/usb/gadget/legacy/zero.c b/drivers/usb/gadget/legacy/zero.c
index a05785bdeb30..08a21bd0c2ba 100644
--- a/drivers/usb/gadget/legacy/zero.c
+++ b/drivers/usb/gadget/legacy/zero.c
@@ -147,6 +147,12 @@ static struct usb_gadget_strings *dev_strings[] = {
NULL,
};
+static struct usb_function *func_lb;
+static struct usb_function_instance *func_inst_lb;
+
+static struct usb_function *func_ss;
+static struct usb_function_instance *func_inst_ss;
+
/*-------------------------------------------------------------------------*/
static struct timer_list autoresume_timer;
@@ -156,6 +162,7 @@ static void zero_autoresume(struct timer_list *unused)
{
struct usb_composite_dev *cdev = autoresume_cdev;
struct usb_gadget *g = cdev->gadget;
+ int status;
/* unconfigured devices can't issue wakeups */
if (!cdev->config)
@@ -165,10 +172,18 @@ static void zero_autoresume(struct timer_list *unused)
* more significant than just a timer firing; likely
* because of some direct user request.
*/
- if (g->speed != USB_SPEED_UNKNOWN) {
- int status = usb_gadget_wakeup(g);
- INFO(cdev, "%s --> %d\n", __func__, status);
+ if (g->speed == USB_SPEED_UNKNOWN)
+ return;
+
+ if (g->speed >= USB_SPEED_SUPER) {
+ if (loopdefault)
+ status = usb_func_wakeup(func_lb);
+ else
+ status = usb_func_wakeup(func_ss);
+ } else {
+ status = usb_gadget_wakeup(g);
}
+ INFO(cdev, "%s --> %d\n", __func__, status);
}
static void zero_suspend(struct usb_composite_dev *cdev)
@@ -206,9 +221,6 @@ static struct usb_configuration loopback_driver = {
/* .iConfiguration = DYNAMIC */
};
-static struct usb_function *func_ss;
-static struct usb_function_instance *func_inst_ss;
-
static int ss_config_setup(struct usb_configuration *c,
const struct usb_ctrlrequest *ctrl)
{
@@ -248,9 +260,6 @@ module_param_named(isoc_maxburst, gzero_options.isoc_maxburst, uint,
S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(isoc_maxburst, "0 - 15 (ss only)");
-static struct usb_function *func_lb;
-static struct usb_function_instance *func_inst_lb;
-
module_param_named(qlen, gzero_options.qlen, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(qlen, "depth of loopback queue");
diff --git a/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c b/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c
index 7e69944ef18a..9b53daf76583 100644
--- a/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c
+++ b/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c
@@ -2415,7 +2415,6 @@ int cdns2_gadget_resume(struct cdns2_device *pdev, bool hibernated)
void cdns2_gadget_remove(struct cdns2_device *pdev)
{
- pm_runtime_mark_last_busy(pdev->dev);
pm_runtime_put_autosuspend(pdev->dev);
usb_del_gadget(&pdev->gadget);
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index 6aab45c8525c..f61f095cedab 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -27,6 +27,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/sys_soc.h>
@@ -111,8 +112,7 @@ static void ehci_platform_power_off(struct platform_device *dev)
int clk;
for (clk = EHCI_MAX_CLKS - 1; clk >= 0; clk--)
- if (priv->clks[clk])
- clk_disable_unprepare(priv->clks[clk]);
+ clk_disable_unprepare(priv->clks[clk]);
}
static struct hc_driver __read_mostly ehci_platform_hc_driver;
@@ -239,9 +239,11 @@ static int ehci_platform_probe(struct platform_device *dev)
struct usb_hcd *hcd;
struct resource *res_mem;
struct usb_ehci_pdata *pdata = dev_get_platdata(&dev->dev);
+ const struct of_device_id *match;
struct ehci_platform_priv *priv;
struct ehci_hcd *ehci;
int err, irq, clk = 0;
+ bool dma_mask_64;
if (usb_disabled())
return -ENODEV;
@@ -253,8 +255,13 @@ static int ehci_platform_probe(struct platform_device *dev)
if (!pdata)
pdata = &ehci_platform_defaults;
+ dma_mask_64 = pdata->dma_mask_64;
+ match = of_match_device(dev->dev.driver->of_match_table, &dev->dev);
+ if (match && match->data)
+ dma_mask_64 = true;
+
err = dma_coerce_mask_and_coherent(&dev->dev,
- pdata->dma_mask_64 ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32));
+ dma_mask_64 ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32));
if (err) {
dev_err(&dev->dev, "Error: DMA mask configuration failed\n");
return err;
@@ -298,7 +305,9 @@ static int ehci_platform_probe(struct platform_device *dev)
if (of_device_is_compatible(dev->dev.of_node,
"aspeed,ast2500-ehci") ||
of_device_is_compatible(dev->dev.of_node,
- "aspeed,ast2600-ehci"))
+ "aspeed,ast2600-ehci") ||
+ of_device_is_compatible(dev->dev.of_node,
+ "aspeed,ast2700-ehci"))
ehci->is_aspeed = 1;
if (soc_device_match(quirk_poll_match))
@@ -445,6 +454,17 @@ static int __maybe_unused ehci_platform_suspend(struct device *dev)
if (pdata->power_suspend)
pdata->power_suspend(pdev);
+ ret = reset_control_assert(priv->rsts);
+ if (ret) {
+ if (pdata->power_on)
+ pdata->power_on(pdev);
+
+ ehci_resume(hcd, false);
+
+ if (priv->quirk_poll)
+ quirk_poll_init(priv);
+ }
+
return ret;
}
@@ -455,11 +475,18 @@ static int __maybe_unused ehci_platform_resume(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
struct device *companion_dev;
+ int err;
+
+ err = reset_control_deassert(priv->rsts);
+ if (err)
+ return err;
if (pdata->power_on) {
- int err = pdata->power_on(pdev);
- if (err < 0)
+ err = pdata->power_on(pdev);
+ if (err < 0) {
+ reset_control_assert(priv->rsts);
return err;
+ }
}
companion_dev = usb_of_get_companion_dev(hcd->self.controller);
@@ -485,6 +512,7 @@ static const struct of_device_id vt8500_ehci_ids[] = {
{ .compatible = "wm,prizm-ehci", },
{ .compatible = "generic-ehci", },
{ .compatible = "cavium,octeon-6335-ehci", },
+ { .compatible = "aspeed,ast2700-ehci", .data = (void *)1 },
{}
};
MODULE_DEVICE_TABLE(of, vt8500_ehci_ids);
diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c
index 3c5ca2d7c92e..0938c0e7a8b6 100644
--- a/drivers/usb/host/ohci-da8xx.c
+++ b/drivers/usb/host/ohci-da8xx.c
@@ -18,7 +18,6 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
-#include <linux/platform_data/usb-davinci.h>
#include <linux/regulator/consumer.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
@@ -166,17 +165,6 @@ static int ohci_da8xx_has_oci(struct usb_hcd *hcd)
return 0;
}
-static int ohci_da8xx_has_potpgt(struct usb_hcd *hcd)
-{
- struct device *dev = hcd->self.controller;
- struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
-
- if (hub && hub->potpgt)
- return 1;
-
- return 0;
-}
-
static int ohci_da8xx_regulator_event(struct notifier_block *nb,
unsigned long event, void *data)
{
@@ -228,7 +216,6 @@ static int ohci_da8xx_register_notify(struct usb_hcd *hcd)
static int ohci_da8xx_reset(struct usb_hcd *hcd)
{
struct device *dev = hcd->self.controller;
- struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
int result;
u32 rh_a;
@@ -266,10 +253,6 @@ static int ohci_da8xx_reset(struct usb_hcd *hcd)
rh_a &= ~RH_A_NOCP;
rh_a |= RH_A_OCPM;
}
- if (ohci_da8xx_has_potpgt(hcd)) {
- rh_a &= ~RH_A_POTPGT;
- rh_a |= hub->potpgt << 24;
- }
ohci_writel(ohci, rh_a, &ohci->regs->roothub.a);
return result;
diff --git a/drivers/usb/host/ohci-platform.c b/drivers/usb/host/ohci-platform.c
index f47ae12cde6a..2e4bb5cc2165 100644
--- a/drivers/usb/host/ohci-platform.c
+++ b/drivers/usb/host/ohci-platform.c
@@ -69,8 +69,7 @@ static void ohci_platform_power_off(struct platform_device *dev)
int clk;
for (clk = OHCI_MAX_CLKS - 1; clk >= 0; clk--)
- if (priv->clks[clk])
- clk_disable_unprepare(priv->clks[clk]);
+ clk_disable_unprepare(priv->clks[clk]);
}
static struct hc_driver __read_mostly ohci_platform_hc_driver;
@@ -271,6 +270,7 @@ static int ohci_platform_suspend(struct device *dev)
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct usb_ohci_pdata *pdata = dev->platform_data;
struct platform_device *pdev = to_platform_device(dev);
+ struct ohci_platform_priv *priv = hcd_to_ohci_priv(hcd);
bool do_wakeup = device_may_wakeup(dev);
int ret;
@@ -281,6 +281,14 @@ static int ohci_platform_suspend(struct device *dev)
if (pdata->power_suspend)
pdata->power_suspend(pdev);
+ ret = reset_control_assert(priv->resets);
+ if (ret) {
+ if (pdata->power_on)
+ pdata->power_on(pdev);
+
+ ohci_resume(hcd, false);
+ }
+
return ret;
}
@@ -289,11 +297,19 @@ static int ohci_platform_resume_common(struct device *dev, bool hibernated)
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct usb_ohci_pdata *pdata = dev_get_platdata(dev);
struct platform_device *pdev = to_platform_device(dev);
+ struct ohci_platform_priv *priv = hcd_to_ohci_priv(hcd);
+ int err;
+
+ err = reset_control_deassert(priv->resets);
+ if (err)
+ return err;
if (pdata->power_on) {
- int err = pdata->power_on(pdev);
- if (err < 0)
+ err = pdata->power_on(pdev);
+ if (err < 0) {
+ reset_control_assert(priv->resets);
return err;
+ }
}
ohci_resume(hcd, hibernated);
diff --git a/drivers/usb/host/uhci-hcd.h b/drivers/usb/host/uhci-hcd.h
index 13ee2a6144b2..4326d1f3ca76 100644
--- a/drivers/usb/host/uhci-hcd.h
+++ b/drivers/usb/host/uhci-hcd.h
@@ -445,6 +445,7 @@ struct uhci_hcd {
short load[MAX_PHASE]; /* Periodic allocations */
struct clk *clk; /* (optional) clock source */
+ struct reset_control *rsts; /* (optional) clock reset */
/* Reset host controller */
void (*reset_hc) (struct uhci_hcd *uhci);
diff --git a/drivers/usb/host/uhci-platform.c b/drivers/usb/host/uhci-platform.c
index 62318291f566..5e02f2ceafb6 100644
--- a/drivers/usb/host/uhci-platform.c
+++ b/drivers/usb/host/uhci-platform.c
@@ -11,6 +11,7 @@
#include <linux/of.h>
#include <linux/device.h>
#include <linux/platform_device.h>
+#include <linux/reset.h>
static int uhci_platform_init(struct usb_hcd *hcd)
{
@@ -67,6 +68,7 @@ static const struct hc_driver uhci_platform_hc_driver = {
static int uhci_hcd_platform_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
+ bool dma_mask_64 = false;
struct usb_hcd *hcd;
struct uhci_hcd *uhci;
struct resource *res;
@@ -80,7 +82,11 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev)
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
- ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (of_device_get_match_data(&pdev->dev))
+ dma_mask_64 = true;
+
+ ret = dma_coerce_mask_and_coherent(&pdev->dev,
+ dma_mask_64 ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32));
if (ret)
return ret;
@@ -113,7 +119,8 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev)
}
if (of_device_is_compatible(np, "aspeed,ast2400-uhci") ||
of_device_is_compatible(np, "aspeed,ast2500-uhci") ||
- of_device_is_compatible(np, "aspeed,ast2600-uhci")) {
+ of_device_is_compatible(np, "aspeed,ast2600-uhci") ||
+ of_device_is_compatible(np, "aspeed,ast2700-uhci")) {
uhci->is_aspeed = 1;
dev_info(&pdev->dev,
"Enabled Aspeed implementation workarounds\n");
@@ -132,17 +139,28 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev)
goto err_rmr;
}
+ uhci->rsts = devm_reset_control_array_get_optional_shared(&pdev->dev);
+ if (IS_ERR(uhci->rsts)) {
+ ret = PTR_ERR(uhci->rsts);
+ goto err_clk;
+ }
+ ret = reset_control_deassert(uhci->rsts);
+ if (ret)
+ goto err_clk;
+
ret = platform_get_irq(pdev, 0);
if (ret < 0)
- goto err_clk;
+ goto err_reset;
ret = usb_add_hcd(hcd, ret, IRQF_SHARED);
if (ret)
- goto err_clk;
+ goto err_reset;
device_wakeup_enable(hcd->self.controller);
return 0;
+err_reset:
+ reset_control_assert(uhci->rsts);
err_clk:
clk_disable_unprepare(uhci->clk);
err_rmr:
@@ -156,6 +174,7 @@ static void uhci_hcd_platform_remove(struct platform_device *pdev)
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
+ reset_control_assert(uhci->rsts);
clk_disable_unprepare(uhci->clk);
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
@@ -178,6 +197,7 @@ static void uhci_hcd_platform_shutdown(struct platform_device *op)
static const struct of_device_id platform_uhci_ids[] = {
{ .compatible = "generic-uhci", },
{ .compatible = "platform-uhci", },
+ { .compatible = "aspeed,ast2700-uhci", .data = (void *)1 },
{}
};
MODULE_DEVICE_TABLE(of, platform_uhci_ids);
diff --git a/drivers/usb/host/xen-hcd.c b/drivers/usb/host/xen-hcd.c
index 1c2a95fe41e5..0a94d302911a 100644
--- a/drivers/usb/host/xen-hcd.c
+++ b/drivers/usb/host/xen-hcd.c
@@ -1388,7 +1388,7 @@ static int xenhcd_get_frame(struct usb_hcd *hcd)
return 0;
}
-static struct hc_driver xenhcd_usb20_hc_driver = {
+static const struct hc_driver xenhcd_usb20_hc_driver = {
.description = "xen-hcd",
.product_desc = "Xen USB2.0 Virtual Host Controller",
.hcd_priv_size = sizeof(struct xenhcd_info),
@@ -1413,7 +1413,7 @@ static struct hc_driver xenhcd_usb20_hc_driver = {
#endif
};
-static struct hc_driver xenhcd_usb11_hc_driver = {
+static const struct hc_driver xenhcd_usb11_hc_driver = {
.description = "xen-hcd",
.product_desc = "Xen USB1.1 Virtual Host Controller",
.hcd_priv_size = sizeof(struct xenhcd_info),
diff --git a/drivers/usb/host/xhci-caps.h b/drivers/usb/host/xhci-caps.h
index 89bc83e4f1eb..2f59b6ab1e45 100644
--- a/drivers/usb/host/xhci-caps.h
+++ b/drivers/usb/host/xhci-caps.h
@@ -1,93 +1,120 @@
/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * xHCI Host Controller Capability Registers.
+ * xHCI Specification Section 5.3, Revision 1.2.
+ */
+
+#include <linux/bits.h>
-/* hc_capbase bitmasks */
-/* bits 7:0 - how long is the Capabilities register */
-#define HC_LENGTH(p) XHCI_HC_LENGTH(p)
-/* bits 31:16 */
+/* hc_capbase - bitmasks */
+/* bits 7:0 - Capability Registers Length */
+#define HC_LENGTH(p) ((p) & 0xff)
+/* bits 15:8 - Rsvd */
+/* bits 31:16 - Host Controller Interface Version Number */
#define HC_VERSION(p) (((p) >> 16) & 0xffff)
/* HCSPARAMS1 - hcs_params1 - bitmasks */
-/* bits 0:7, Max Device Slots */
+/* bits 7:0 - Number of Device Slots */
#define HCS_MAX_SLOTS(p) (((p) >> 0) & 0xff)
#define HCS_SLOTS_MASK 0xff
-/* bits 8:18, Max Interrupters */
+/* bits 18:8 - Number of Interrupters, max values is 1024 */
#define HCS_MAX_INTRS(p) (((p) >> 8) & 0x7ff)
-/* bits 24:31, Max Ports - max value is 0x7F = 127 ports */
-#define HCS_MAX_PORTS(p) (((p) >> 24) & 0x7f)
+/* bits 31:24, Max Ports - max value is 255 */
+#define HCS_MAX_PORTS(p) (((p) >> 24) & 0xff)
/* HCSPARAMS2 - hcs_params2 - bitmasks */
-/* bits 0:3, frames or uframes that SW needs to queue transactions
- * ahead of the HW to meet periodic deadlines */
-#define HCS_IST(p) (((p) >> 0) & 0xf)
-/* bits 4:7, max number of Event Ring segments */
+/*
+ * bits 3:0 - Isochronous Scheduling Threshold, frames or uframes that SW
+ * needs to queue transactions ahead of the HW to meet periodic deadlines.
+ * - Bits 2:0: Threshold value
+ * - Bit 3: Unit indicator
+ * - '1': Threshold in Frames
+ * - '0': Threshold in Microframes (uframes)
+ * Note: 1 Frame = 8 Microframes
+ * xHCI specification section 5.3.4.
+ */
+#define HCS_IST_VALUE(p) ((p) & 0x7)
+#define HCS_IST_UNIT BIT(3)
+/* bits 7:4 - Event Ring Segment Table Max, 2^(n) */
#define HCS_ERST_MAX(p) (((p) >> 4) & 0xf)
-/* bits 21:25 Hi 5 bits of Scratchpad buffers SW must allocate for the HW */
-/* bit 26 Scratchpad restore - for save/restore HW state - not used yet */
-/* bits 27:31 Lo 5 bits of Scratchpad buffers SW must allocate for the HW */
-#define HCS_MAX_SCRATCHPAD(p) ((((p) >> 16) & 0x3e0) | (((p) >> 27) & 0x1f))
+/* bits 20:8 - Rsvd */
+/* bits 25:21 - Max Scratchpad Buffers (Hi), 5 Most significant bits */
+#define HCS_MAX_SP_HI(p) (((p) >> 21) & 0x1f)
+/* bit 26 - Scratchpad restore, for save/restore HW state */
+/* bits 31:27 - Max Scratchpad Buffers (Lo), 5 Least significant bits */
+#define HCS_MAX_SP_LO(p) (((p) >> 27) & 0x1f)
+#define HCS_MAX_SCRATCHPAD(p) (HCS_MAX_SP_HI(p) << 5 | HCS_MAX_SP_LO(p))
/* HCSPARAMS3 - hcs_params3 - bitmasks */
-/* bits 0:7, Max U1 to U0 latency for the roothub ports */
+/* bits 7:0 - U1 Device Exit Latency, Max U1 to U0 latency for the roothub ports */
#define HCS_U1_LATENCY(p) (((p) >> 0) & 0xff)
-/* bits 16:31, Max U2 to U0 latency for the roothub ports */
+/* bits 15:8 - Rsvd */
+/* bits 31:16 - U2 Device Exit Latency, Max U2 to U0 latency for the roothub ports */
#define HCS_U2_LATENCY(p) (((p) >> 16) & 0xffff)
-/* HCCPARAMS - hcc_params - bitmasks */
-/* true: HC can use 64-bit address pointers */
-#define HCC_64BIT_ADDR(p) ((p) & (1 << 0))
-/* true: HC can do bandwidth negotiation */
-#define HCC_BANDWIDTH_NEG(p) ((p) & (1 << 1))
-/* true: HC uses 64-byte Device Context structures
- * FIXME 64-byte context structures aren't supported yet.
- */
-#define HCC_64BYTE_CONTEXT(p) ((p) & (1 << 2))
-/* true: HC has port power switches */
-#define HCC_PPC(p) ((p) & (1 << 3))
-/* true: HC has port indicators */
-#define HCS_INDICATOR(p) ((p) & (1 << 4))
-/* true: HC has Light HC Reset Capability */
-#define HCC_LIGHT_RESET(p) ((p) & (1 << 5))
-/* true: HC supports latency tolerance messaging */
-#define HCC_LTC(p) ((p) & (1 << 6))
-/* true: no secondary Stream ID Support */
-#define HCC_NSS(p) ((p) & (1 << 7))
-/* true: HC supports Stopped - Short Packet */
-#define HCC_SPC(p) ((p) & (1 << 9))
-/* true: HC has Contiguous Frame ID Capability */
-#define HCC_CFC(p) ((p) & (1 << 11))
-/* Max size for Primary Stream Arrays - 2^(n+1), where n is bits 12:15 */
+/* HCCPARAMS1 - hcc_params - bitmasks */
+/* bit 0 - 64-bit Addressing Capability */
+#define HCC_64BIT_ADDR BIT(0)
+/* bit 1 - BW Negotiation Capability */
+#define HCC_BANDWIDTH_NEG BIT(1)
+/* bit 2 - Context Size */
+#define HCC_64BYTE_CONTEXT BIT(2)
+#define CTX_SIZE(_hcc) (_hcc & HCC_64BYTE_CONTEXT ? 64 : 32)
+/* bit 3 - Port Power Control */
+#define HCC_PPC BIT(3)
+/* bit 4 - Port Indicators */
+#define HCS_INDICATOR BIT(4)
+/* bit 5 - Light HC Reset Capability */
+#define HCC_LIGHT_RESET BIT(5)
+/* bit 6 - Latency Tolerance Messaging Capability */
+#define HCC_LTC BIT(6)
+/* bit 7 - No Secondary Stream ID Support */
+#define HCC_NSS BIT(7)
+/* bit 8 - Parse All Event Data */
+/* bit 9 - Short Packet Capability */
+#define HCC_SPC BIT(9)
+/* bit 10 - Stopped EDTLA Capability */
+/* bit 11 - Contiguous Frame ID Capability */
+#define HCC_CFC BIT(11)
+/* bits 15:12 - Max size for Primary Stream Arrays, 2^(n+1) */
#define HCC_MAX_PSA(p) (1 << ((((p) >> 12) & 0xf) + 1))
-/* Extended Capabilities pointer from PCI base - section 5.3.6 */
-#define HCC_EXT_CAPS(p) XHCI_HCC_EXT_CAPS(p)
-
-#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
+/* bits 31:16 - xHCI Extended Capabilities Pointer, from PCI base: 2^(n) */
+#define HCC_EXT_CAPS(p) (((p) >> 16) & 0xffff)
-/* db_off bitmask - bits 31:2 Doorbell Array Offset */
+/* DBOFF - db_off - bitmasks */
+/* bits 1:0 - Rsvd */
+/* bits 31:2 - Doorbell Array Offset */
#define DBOFF_MASK (0xfffffffc)
-/* run_regs_off bitmask - bits 0:4 reserved */
+/* RTSOFF - run_regs_off - bitmasks */
+/* bits 4:0 - Rsvd */
+/* bits 31:5 - Runtime Register Space Offse */
#define RTSOFF_MASK (~0x1f)
/* HCCPARAMS2 - hcc_params2 - bitmasks */
-/* true: HC supports U3 entry Capability */
-#define HCC2_U3C(p) ((p) & (1 << 0))
-/* true: HC supports Configure endpoint command Max exit latency too large */
-#define HCC2_CMC(p) ((p) & (1 << 1))
-/* true: HC supports Force Save context Capability */
-#define HCC2_FSC(p) ((p) & (1 << 2))
-/* true: HC supports Compliance Transition Capability */
-#define HCC2_CTC(p) ((p) & (1 << 3))
-/* true: HC support Large ESIT payload Capability > 48k */
-#define HCC2_LEC(p) ((p) & (1 << 4))
-/* true: HC support Configuration Information Capability */
-#define HCC2_CIC(p) ((p) & (1 << 5))
-/* true: HC support Extended TBC Capability, Isoc burst count > 65535 */
-#define HCC2_ETC(p) ((p) & (1 << 6))
-/* true: HC support Extended TBC TRB Status Capability */
-#define HCC2_ETC_TSC(p) ((p) & (1 << 7))
-/* true: HC support Get/Set Extended Property Capability */
-#define HCC2_GSC(p) ((p) & (1 << 8))
-/* true: HC support Virtualization Based Trusted I/O Capability */
-#define HCC2_VTC(p) ((p) & (1 << 9))
-/* true: HC support Double BW on a eUSB2 HS ISOC EP */
-#define HCC2_EUSB2_DIC(p) ((p) & (1 << 11))
+/* bit 0 - U3 Entry Capability */
+#define HCC2_U3C BIT(0)
+/* bit 1 - Configure Endpoint Command Max Exit Latency Too Large Capability */
+#define HCC2_CMC BIT(1)
+/* bit 2 - Force Save Context Capabilitu */
+#define HCC2_FSC BIT(2)
+/* bit 3 - Compliance Transition Capability, false: compliance is enabled by default */
+#define HCC2_CTC BIT(3)
+/* bit 4 - Large ESIT Payload Capability, true: HC support ESIT payload > 48k */
+#define HCC2_LEC BIT(4)
+/* bit 5 - Configuration Information Capability */
+#define HCC2_CIC BIT(5)
+/* bit 6 - Extended TBC Capability, true: Isoc burst count > 65535 */
+#define HCC2_ETC BIT(6)
+/* bit 7 - Extended TBC TRB Status Capability */
+#define HCC2_ETC_TSC BIT(7)
+/* bit 8 - Get/Set Extended Property Capability */
+#define HCC2_GSC BIT(8)
+/* bit 9 - Virtualization Based Trusted I/O Capability */
+#define HCC2_VTC BIT(9)
+/* bit 10 - Rsvd */
+/* bit 11 - HC support Double BW on a eUSB2 HS ISOC EP */
+#define HCC2_EUSB2_DIC BIT(11)
+/* bit 12 - HC support eUSB2V2 capability */
+#define HCC2_E2V2C BIT(12)
+/* bits 31:13 - Rsvd */
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
index 63edf2d8f245..9da4f3b452cb 100644
--- a/drivers/usb/host/xhci-dbgcap.c
+++ b/drivers/usb/host/xhci-dbgcap.c
@@ -374,7 +374,7 @@ int dbc_ep_queue(struct dbc_request *req)
ret = dbc_ep_do_queue(req);
spin_unlock_irqrestore(&dbc->lock, flags);
- mod_delayed_work(system_wq, &dbc->event_work, 0);
+ mod_delayed_work(system_percpu_wq, &dbc->event_work, 0);
trace_xhci_dbc_queue_request(req);
@@ -677,7 +677,7 @@ static int xhci_dbc_start(struct xhci_dbc *dbc)
return ret;
}
- return mod_delayed_work(system_wq, &dbc->event_work,
+ return mod_delayed_work(system_percpu_wq, &dbc->event_work,
msecs_to_jiffies(dbc->poll_interval));
}
@@ -892,7 +892,8 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
dev_info(dbc->dev, "DbC configured\n");
portsc = readl(&dbc->regs->portsc);
writel(portsc, &dbc->regs->portsc);
- return EVT_GSER;
+ ret = EVT_GSER;
+ break;
}
return EVT_DONE;
@@ -954,7 +955,8 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
break;
case TRB_TYPE(TRB_TRANSFER):
dbc_handle_xfer_event(dbc, evt);
- ret = EVT_XFER_DONE;
+ if (ret != EVT_GSER)
+ ret = EVT_XFER_DONE;
break;
default:
break;
@@ -1021,7 +1023,7 @@ static void xhci_dbc_handle_events(struct work_struct *work)
return;
}
- mod_delayed_work(system_wq, &dbc->event_work,
+ mod_delayed_work(system_percpu_wq, &dbc->event_work,
msecs_to_jiffies(poll_interval));
}
@@ -1272,7 +1274,7 @@ static ssize_t dbc_poll_interval_ms_store(struct device *dev,
dbc->poll_interval = value;
- mod_delayed_work(system_wq, &dbc->event_work, 0);
+ mod_delayed_work(system_percpu_wq, &dbc->event_work, 0);
return size;
}
@@ -1390,8 +1392,15 @@ int xhci_dbc_suspend(struct xhci_hcd *xhci)
if (!dbc)
return 0;
- if (dbc->state == DS_CONFIGURED)
+ switch (dbc->state) {
+ case DS_ENABLED:
+ case DS_CONNECTED:
+ case DS_CONFIGURED:
dbc->resume_required = 1;
+ break;
+ default:
+ break;
+ }
xhci_dbc_stop(dbc);
diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c
index c6d44977193f..c1eb1036ede9 100644
--- a/drivers/usb/host/xhci-debugfs.c
+++ b/drivers/usb/host/xhci-debugfs.c
@@ -329,7 +329,7 @@ static int xhci_portsc_show(struct seq_file *s, void *unused)
u32 portsc;
char str[XHCI_MSG_MAX];
- portsc = readl(port->addr);
+ portsc = xhci_portsc_readl(port);
seq_printf(s, "%s\n", xhci_decode_portsc(str, portsc));
return 0;
@@ -355,11 +355,11 @@ static ssize_t xhci_port_write(struct file *file, const char __user *ubuf,
if (!strncmp(buf, "compliance", 10)) {
/* If CTC is clear, compliance is enabled by default */
- if (!HCC2_CTC(xhci->hcc_params2))
+ if (!(xhci->hcc_params2 & HCC2_CTC))
return count;
spin_lock_irqsave(&xhci->lock, flags);
/* compliance mode can only be enabled on ports in RxDetect */
- portsc = readl(port->addr);
+ portsc = xhci_portsc_readl(port);
if ((portsc & PORT_PLS_MASK) != XDEV_RXDETECT) {
spin_unlock_irqrestore(&xhci->lock, flags);
return -EPERM;
@@ -367,7 +367,7 @@ static ssize_t xhci_port_write(struct file *file, const char __user *ubuf,
portsc = xhci_port_state_to_neutral(portsc);
portsc &= ~PORT_PLS_MASK;
portsc |= PORT_LINK_STROBE | XDEV_COMP_MODE;
- writel(portsc, port->addr);
+ xhci_portsc_writel(port, portsc);
spin_unlock_irqrestore(&xhci->lock, flags);
} else {
return -EINVAL;
@@ -383,6 +383,39 @@ static const struct file_operations port_fops = {
.release = single_release,
};
+static int xhci_portli_show(struct seq_file *s, void *unused)
+{
+ struct xhci_port *port = s->private;
+ struct xhci_hcd *xhci = hcd_to_xhci(port->rhub->hcd);
+ u32 portli;
+
+ portli = readl(&port->port_reg->portli);
+
+ /* PORTLI fields are valid if port is a USB3 or eUSB2V2 port */
+ if (port->rhub == &xhci->usb3_rhub)
+ seq_printf(s, "0x%08x LEC=%u RLC=%u TLC=%u\n", portli,
+ PORT_LEC(portli), PORT_RX_LANES(portli), PORT_TX_LANES(portli));
+ else if (xhci->hcc_params2 & HCC2_E2V2C)
+ seq_printf(s, "0x%08x RDR=%u TDR=%u\n", portli,
+ PORTLI_RDR(portli), PORTLI_TDR(portli));
+ else
+ seq_printf(s, "0x%08x RsvdP\n", portli);
+
+ return 0;
+}
+
+static int xhci_portli_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, xhci_portli_show, inode->i_private);
+}
+
+static const struct file_operations portli_fops = {
+ .open = xhci_portli_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static void xhci_debugfs_create_files(struct xhci_hcd *xhci,
struct xhci_file_map *files,
size_t nentries, void *data,
@@ -613,28 +646,24 @@ void xhci_debugfs_remove_slot(struct xhci_hcd *xhci, int slot_id)
static void xhci_debugfs_create_ports(struct xhci_hcd *xhci,
struct dentry *parent)
{
- unsigned int num_ports;
char port_name[8];
struct xhci_port *port;
struct dentry *dir;
- num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
-
parent = debugfs_create_dir("ports", parent);
- while (num_ports--) {
- scnprintf(port_name, sizeof(port_name), "port%02d",
- num_ports + 1);
+ for (int i = 0; i < xhci->max_ports; i++) {
+ scnprintf(port_name, sizeof(port_name), "port%02d", i + 1);
dir = debugfs_create_dir(port_name, parent);
- port = &xhci->hw_ports[num_ports];
+ port = &xhci->hw_ports[i];
debugfs_create_file("portsc", 0644, dir, port, &port_fops);
+ debugfs_create_file("portli", 0444, dir, port, &portli_fops);
}
}
static int xhci_port_bw_show(struct xhci_hcd *xhci, u8 dev_speed,
struct seq_file *s)
{
- unsigned int num_ports;
unsigned int i;
int ret;
struct xhci_container_ctx *ctx;
@@ -645,8 +674,6 @@ static int xhci_port_bw_show(struct xhci_hcd *xhci, u8 dev_speed,
if (ret < 0)
return ret;
- num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
-
ctx = xhci_alloc_port_bw_ctx(xhci, 0);
if (!ctx) {
pm_runtime_put_sync(dev);
@@ -661,7 +688,7 @@ static int xhci_port_bw_show(struct xhci_hcd *xhci, u8 dev_speed,
/* print all roothub ports available bandwidth
* refer to xhci rev1_2 protocol 6.2.6 , byte 0 is reserved
*/
- for (i = 1; i < num_ports+1; i++)
+ for (i = 1; i <= xhci->max_ports; i++)
seq_printf(s, "port[%d] available bw: %d%%.\n", i,
ctx->bytes[i]);
err_out:
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index b3a59ce1b3f4..04cc3d681495 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -110,7 +110,7 @@ static int xhci_create_usb3x_bos_desc(struct xhci_hcd *xhci, char *buf,
ss_cap->bU2DevExitLat = 0; /* set later */
reg = readl(&xhci->cap_regs->hcc_params);
- if (HCC_LTC(reg))
+ if (reg & HCC_LTC)
ss_cap->bmAttributes |= USB_LTM_SUPPORT;
if ((xhci->quirks & XHCI_LPM_SUPPORT)) {
@@ -263,7 +263,7 @@ static void xhci_common_hub_descriptor(struct xhci_hcd *xhci,
desc->bNbrPorts = ports;
temp = 0;
/* Bits 1:0 - support per-port power switching, or power always on */
- if (HCC_PPC(xhci->hcc_params))
+ if (xhci->hcc_params & HCC_PPC)
temp |= HUB_CHAR_INDV_PORT_LPSM;
else
temp |= HUB_CHAR_NO_LPSM;
@@ -299,7 +299,7 @@ static void xhci_usb2_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci,
*/
memset(port_removable, 0, sizeof(port_removable));
for (i = 0; i < ports; i++) {
- portsc = readl(rhub->ports[i]->addr);
+ portsc = xhci_portsc_readl(rhub->ports[i]);
/* If a device is removable, PORTSC reports a 0, same as in the
* hub descriptor DeviceRemovable bits.
*/
@@ -356,7 +356,7 @@ static void xhci_usb3_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci,
port_removable = 0;
/* bit 0 is reserved, bit 1 is for port 1, etc. */
for (i = 0; i < ports; i++) {
- portsc = readl(rhub->ports[i]->addr);
+ portsc = xhci_portsc_readl(rhub->ports[i]);
if (portsc & PORT_DEV_REMOVE)
port_removable |= 1 << (i + 1);
}
@@ -566,19 +566,19 @@ static void xhci_disable_port(struct xhci_hcd *xhci, struct xhci_port *port)
return;
}
- portsc = readl(port->addr);
+ portsc = xhci_portsc_readl(port);
portsc = xhci_port_state_to_neutral(portsc);
/* Write 1 to disable the port */
- writel(portsc | PORT_PE, port->addr);
+ xhci_portsc_writel(port, portsc | PORT_PE);
- portsc = readl(port->addr);
+ portsc = xhci_portsc_readl(port);
xhci_dbg(xhci, "disable port %d-%d, portsc: 0x%x\n",
hcd->self.busnum, port->hcd_portnum + 1, portsc);
}
static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue,
- u16 wIndex, __le32 __iomem *addr, u32 port_status)
+ u16 wIndex, struct xhci_port *port, u32 port_status)
{
char *port_change_bit;
u32 status;
@@ -621,8 +621,8 @@ static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue,
return;
}
/* Change bits are all write 1 to clear */
- writel(port_status | status, addr);
- port_status = readl(addr);
+ xhci_portsc_writel(port, port_status | status);
+ port_status = xhci_portsc_readl(port);
xhci_dbg(xhci, "clear port%d %s change, portsc: 0x%x\n",
wIndex + 1, port_change_bit, port_status);
@@ -650,7 +650,7 @@ static void xhci_set_port_power(struct xhci_hcd *xhci, struct xhci_port *port,
u32 temp;
hcd = port->rhub->hcd;
- temp = readl(port->addr);
+ temp = xhci_portsc_readl(port);
xhci_dbg(xhci, "set port power %d-%d %s, portsc: 0x%x\n",
hcd->self.busnum, port->hcd_portnum + 1, on ? "ON" : "OFF", temp);
@@ -659,11 +659,11 @@ static void xhci_set_port_power(struct xhci_hcd *xhci, struct xhci_port *port,
if (on) {
/* Power on */
- writel(temp | PORT_POWER, port->addr);
- readl(port->addr);
+ xhci_portsc_writel(port, temp | PORT_POWER);
+ xhci_portsc_readl(port);
} else {
/* Power off */
- writel(temp & ~PORT_POWER, port->addr);
+ xhci_portsc_writel(port, temp & ~PORT_POWER);
}
spin_unlock_irqrestore(&xhci->lock, *flags);
@@ -683,9 +683,9 @@ static void xhci_port_set_test_mode(struct xhci_hcd *xhci,
/* xhci only supports test mode for usb2 ports */
port = xhci->usb2_rhub.ports[wIndex];
- temp = readl(port->addr + PORTPMSC);
+ temp = readl(&port->port_reg->portpmsc);
temp |= test_mode << PORT_TEST_MODE_SHIFT;
- writel(temp, port->addr + PORTPMSC);
+ writel(temp, &port->port_reg->portpmsc);
xhci->test_mode = test_mode;
if (test_mode == USB_TEST_FORCE_ENABLE)
xhci_start(xhci);
@@ -700,7 +700,7 @@ static int xhci_enter_test_mode(struct xhci_hcd *xhci,
/* Disable all Device Slots */
xhci_dbg(xhci, "Disable all slots\n");
spin_unlock_irqrestore(&xhci->lock, *flags);
- for (i = 1; i <= HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
+ for (i = 1; i <= xhci->max_slots; i++) {
if (!xhci->devs[i])
continue;
@@ -801,11 +801,11 @@ void xhci_set_link_state(struct xhci_hcd *xhci, struct xhci_port *port,
u32 temp;
u32 portsc;
- portsc = readl(port->addr);
+ portsc = xhci_portsc_readl(port);
temp = xhci_port_state_to_neutral(portsc);
temp &= ~PORT_PLS_MASK;
temp |= PORT_LINK_STROBE | link_state;
- writel(temp, port->addr);
+ xhci_portsc_writel(port, temp);
xhci_dbg(xhci, "Set port %d-%d link state, portsc: 0x%x, write 0x%x",
port->rhub->hcd->self.busnum, port->hcd_portnum + 1,
@@ -817,7 +817,7 @@ static void xhci_set_remote_wake_mask(struct xhci_hcd *xhci,
{
u32 temp;
- temp = readl(port->addr);
+ temp = xhci_portsc_readl(port);
temp = xhci_port_state_to_neutral(temp);
if (wake_mask & USB_PORT_FEAT_REMOTE_WAKE_CONNECT)
@@ -835,7 +835,7 @@ static void xhci_set_remote_wake_mask(struct xhci_hcd *xhci,
else
temp &= ~PORT_WKOC_E;
- writel(temp, port->addr);
+ xhci_portsc_writel(port, temp);
}
/* Test and clear port RWC bit */
@@ -844,11 +844,11 @@ void xhci_test_and_clear_bit(struct xhci_hcd *xhci, struct xhci_port *port,
{
u32 temp;
- temp = readl(port->addr);
+ temp = xhci_portsc_readl(port);
if (temp & port_bit) {
temp = xhci_port_state_to_neutral(temp);
temp |= port_bit;
- writel(temp, port->addr);
+ xhci_portsc_writel(port, temp);
}
}
@@ -1002,7 +1002,7 @@ static int xhci_handle_usb2_port_link_resume(struct xhci_port *port,
}
xhci_ring_device(xhci, port->slot_id);
} else {
- int port_status = readl(port->addr);
+ int port_status = xhci_portsc_readl(port);
xhci_warn(xhci, "Port resume timed out, port %d-%d: 0x%x\n",
hcd->self.busnum, wIndex + 1, port_status);
@@ -1263,7 +1263,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
wIndex--;
port = ports[portnum1 - 1];
- temp = readl(port->addr);
+ temp = xhci_portsc_readl(port);
if (temp == ~(u32)0) {
xhci_hc_died(xhci);
retval = -ENODEV;
@@ -1288,7 +1288,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
retval = -EINVAL;
break;
}
- port_li = readl(port->addr + PORTLI);
+ port_li = readl(&port->port_reg->portli);
status = xhci_get_ext_port_status(temp, port_li);
put_unaligned_le32(status, &buf[4]);
}
@@ -1309,7 +1309,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
port = ports[portnum1 - 1];
wIndex--;
- temp = readl(port->addr);
+ temp = xhci_portsc_readl(port);
if (temp == ~(u32)0) {
xhci_hc_died(xhci);
retval = -ENODEV;
@@ -1319,7 +1319,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
/* FIXME: What new port features do we need to support? */
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
- temp = readl(port->addr);
+ temp = xhci_portsc_readl(port);
if ((temp & PORT_PLS_MASK) != XDEV_U0) {
/* Resume the port to U0 first */
xhci_set_link_state(xhci, port, XDEV_U0);
@@ -1331,7 +1331,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
* a port unless the port reports that it is in the
* enabled (PED = ‘1’,PLS < ‘3’) state.
*/
- temp = readl(port->addr);
+ temp = xhci_portsc_readl(port);
if ((temp & PORT_PE) == 0 || (temp & PORT_RESET)
|| (temp & PORT_PLS_MASK) >= XDEV_U3) {
xhci_warn(xhci, "USB core suspending port %d-%d not in U0/U1/U2\n",
@@ -1354,11 +1354,11 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
msleep(10); /* wait device to enter */
spin_lock_irqsave(&xhci->lock, flags);
- temp = readl(port->addr);
+ temp = xhci_portsc_readl(port);
bus_state->suspended_ports |= 1 << wIndex;
break;
case USB_PORT_FEAT_LINK_STATE:
- temp = readl(port->addr);
+ temp = xhci_portsc_readl(port);
/* Disable port */
if (link_state == USB_SS_PORT_LS_SS_DISABLED) {
xhci_dbg(xhci, "Disable port %d-%d\n",
@@ -1371,8 +1371,8 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
temp |= PORT_CSC | PORT_PEC | PORT_WRC |
PORT_OCC | PORT_RC | PORT_PLC |
PORT_CEC;
- writel(temp | PORT_PE, port->addr);
- temp = readl(port->addr);
+ xhci_portsc_writel(port, temp | PORT_PE);
+ temp = xhci_portsc_readl(port);
break;
}
@@ -1381,7 +1381,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
xhci_dbg(xhci, "Enable port %d-%d\n",
hcd->self.busnum, portnum1);
xhci_set_link_state(xhci, port, link_state);
- temp = readl(port->addr);
+ temp = xhci_portsc_readl(port);
break;
}
@@ -1400,7 +1400,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
* automatically entered as on 1.0 and prior.
*/
if (link_state == USB_SS_PORT_LS_COMP_MOD) {
- if (!HCC2_CTC(xhci->hcc_params2)) {
+ if (!(xhci->hcc_params2 & HCC2_CTC)) {
xhci_dbg(xhci, "CTC flag is 0, port already supports entering compliance mode\n");
break;
}
@@ -1414,7 +1414,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
hcd->self.busnum, portnum1);
xhci_set_link_state(xhci, port, link_state);
- temp = readl(port->addr);
+ temp = xhci_portsc_readl(port);
break;
}
/* Port must be enabled */
@@ -1462,7 +1462,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
xhci_dbg(xhci, "missing U0 port change event for port %d-%d\n",
hcd->self.busnum, portnum1);
spin_lock_irqsave(&xhci->lock, flags);
- temp = readl(port->addr);
+ temp = xhci_portsc_readl(port);
break;
}
@@ -1480,12 +1480,12 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
spin_unlock_irqrestore(&xhci->lock, flags);
while (retries--) {
usleep_range(4000, 8000);
- temp = readl(port->addr);
+ temp = xhci_portsc_readl(port);
if ((temp & PORT_PLS_MASK) == XDEV_U3)
break;
}
spin_lock_irqsave(&xhci->lock, flags);
- temp = readl(port->addr);
+ temp = xhci_portsc_readl(port);
bus_state->suspended_ports |= 1 << wIndex;
}
break;
@@ -1500,38 +1500,38 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
break;
case USB_PORT_FEAT_RESET:
temp = (temp | PORT_RESET);
- writel(temp, port->addr);
+ xhci_portsc_writel(port, temp);
- temp = readl(port->addr);
+ temp = xhci_portsc_readl(port);
xhci_dbg(xhci, "set port reset, actual port %d-%d status = 0x%x\n",
hcd->self.busnum, portnum1, temp);
break;
case USB_PORT_FEAT_REMOTE_WAKE_MASK:
xhci_set_remote_wake_mask(xhci, port, wake_mask);
- temp = readl(port->addr);
+ temp = xhci_portsc_readl(port);
xhci_dbg(xhci, "set port remote wake mask, actual port %d-%d status = 0x%x\n",
hcd->self.busnum, portnum1, temp);
break;
case USB_PORT_FEAT_BH_PORT_RESET:
temp |= PORT_WR;
- writel(temp, port->addr);
- temp = readl(port->addr);
+ xhci_portsc_writel(port, temp);
+ temp = xhci_portsc_readl(port);
break;
case USB_PORT_FEAT_U1_TIMEOUT:
if (hcd->speed < HCD_USB3)
goto error;
- temp = readl(port->addr + PORTPMSC);
+ temp = readl(&port->port_reg->portpmsc);
temp &= ~PORT_U1_TIMEOUT_MASK;
temp |= PORT_U1_TIMEOUT(timeout);
- writel(temp, port->addr + PORTPMSC);
+ writel(temp, &port->port_reg->portpmsc);
break;
case USB_PORT_FEAT_U2_TIMEOUT:
if (hcd->speed < HCD_USB3)
goto error;
- temp = readl(port->addr + PORTPMSC);
+ temp = readl(&port->port_reg->portpmsc);
temp &= ~PORT_U2_TIMEOUT_MASK;
temp |= PORT_U2_TIMEOUT(timeout);
- writel(temp, port->addr + PORTPMSC);
+ writel(temp, &port->port_reg->portpmsc);
break;
case USB_PORT_FEAT_TEST:
/* 4.19.6 Port Test Modes (USB2 Test Mode) */
@@ -1547,7 +1547,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
goto error;
}
/* unblock any posted writes */
- temp = readl(port->addr);
+ temp = xhci_portsc_readl(port);
break;
case ClearPortFeature:
if (!portnum1 || portnum1 > max_ports)
@@ -1556,7 +1556,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
port = ports[portnum1 - 1];
wIndex--;
- temp = readl(port->addr);
+ temp = xhci_portsc_readl(port);
if (temp == ~(u32)0) {
xhci_hc_died(xhci);
retval = -ENODEV;
@@ -1566,7 +1566,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
temp = xhci_port_state_to_neutral(temp);
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
- temp = readl(port->addr);
+ temp = xhci_portsc_readl(port);
xhci_dbg(xhci, "clear USB_PORT_FEAT_SUSPEND\n");
xhci_dbg(xhci, "PORTSC %04x\n", temp);
if (temp & PORT_RESET)
@@ -1603,8 +1603,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
case USB_PORT_FEAT_C_ENABLE:
case USB_PORT_FEAT_C_PORT_LINK_STATE:
case USB_PORT_FEAT_C_PORT_CONFIG_ERROR:
- xhci_clear_port_change_bit(xhci, wValue, wIndex,
- port->addr, temp);
+ xhci_clear_port_change_bit(xhci, wValue, wIndex, port, temp);
break;
case USB_PORT_FEAT_ENABLE:
xhci_disable_port(xhci, port);
@@ -1671,7 +1670,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
* SS devices are only visible to roothub after link training completes.
* Keep polling roothubs for a grace period after xHC start
*/
- if (xhci->run_graceperiod) {
+ if (hcd->speed >= HCD_USB3 && xhci->run_graceperiod) {
if (time_before(jiffies, xhci->run_graceperiod))
status = 1;
else
@@ -1682,7 +1681,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
/* For each port, did anything change? If so, set that bit in buf. */
for (i = 0; i < max_ports; i++) {
- temp = readl(ports[i]->addr);
+ temp = xhci_portsc_readl(ports[i]);
if (temp == ~(u32)0) {
xhci_hc_died(xhci);
retval = -ENODEV;
@@ -1751,7 +1750,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
u32 t1, t2;
int retries = 10;
retry:
- t1 = readl(ports[port_index]->addr);
+ t1 = xhci_portsc_readl(ports[port_index]);
t2 = xhci_port_state_to_neutral(t1);
portsc_buf[port_index] = 0;
@@ -1829,7 +1828,7 @@ retry:
spin_lock_irqsave(&xhci->lock, flags);
}
}
- writel(portsc_buf[port_index], ports[port_index]->addr);
+ xhci_portsc_writel(ports[port_index], portsc_buf[port_index]);
}
hcd->state = HC_STATE_SUSPENDED;
bus_state->next_statechange = jiffies + msecs_to_jiffies(10);
@@ -1850,7 +1849,7 @@ static bool xhci_port_missing_cas_quirk(struct xhci_port *port)
{
u32 portsc;
- portsc = readl(port->addr);
+ portsc = xhci_portsc_readl(port);
/* if any of these are set we are not stuck */
if (portsc & (PORT_CONNECT | PORT_CAS))
@@ -1863,9 +1862,9 @@ static bool xhci_port_missing_cas_quirk(struct xhci_port *port)
/* clear wakeup/change bits, and do a warm port reset */
portsc &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS);
portsc |= PORT_WR;
- writel(portsc, port->addr);
+ xhci_portsc_writel(port, portsc);
/* flush write */
- readl(port->addr);
+ xhci_portsc_readl(port);
return true;
}
@@ -1912,7 +1911,7 @@ int xhci_bus_resume(struct usb_hcd *hcd)
}
port_index = max_ports;
while (port_index--) {
- portsc = readl(ports[port_index]->addr);
+ portsc = xhci_portsc_readl(ports[port_index]);
/* warm reset CAS limited ports stuck in polling/compliance */
if ((xhci->quirks & XHCI_MISSING_CAS) &&
@@ -1942,7 +1941,7 @@ int xhci_bus_resume(struct usb_hcd *hcd)
}
/* disable wake for all ports, write new link state if needed */
portsc &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS);
- writel(portsc, ports[port_index]->addr);
+ xhci_portsc_writel(ports[port_index], portsc);
}
/* USB2 specific resume signaling delay and U0 link state transition */
@@ -1963,7 +1962,7 @@ int xhci_bus_resume(struct usb_hcd *hcd)
/* poll for U0 link state complete, both USB2 and USB3 */
for_each_set_bit(port_index, &bus_state->bus_suspended, BITS_PER_LONG) {
- sret = xhci_handshake(ports[port_index]->addr, PORT_PLC,
+ sret = xhci_handshake(&ports[port_index]->port_reg->portsc, PORT_PLC,
PORT_PLC, 10 * 1000);
if (sret) {
xhci_warn(xhci, "port %d-%d resume PLC timeout\n",
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 6e5b6057de79..c708bdd69f16 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -463,7 +463,7 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
return NULL;
ctx->type = type;
- ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
+ ctx->size = xhci->hcc_params & HCC_64BYTE_CONTEXT ? 2048 : 1024;
if (type == XHCI_CTX_TYPE_INPUT)
ctx->size += CTX_SIZE(xhci->hcc_params);
@@ -951,7 +951,7 @@ static void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_i
/* is this a hub device that added a tt_info to the tts list */
if (tt_info->slot_id == slot_id) {
/* are any devices using this tt_info? */
- for (i = 1; i < HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
+ for (i = 1; i < xhci->max_slots; i++) {
vdev = xhci->devs[i];
if (vdev && (vdev->tt_info == tt_info))
xhci_free_virt_devices_depth_first(
@@ -1344,7 +1344,7 @@ static u32 xhci_get_endpoint_mult(struct xhci_hcd *xhci,
bool lec;
/* xHCI 1.1 with LEC set does not use mult field, except intel eUSB2 */
- lec = xhci->hci_version > 0x100 && HCC2_LEC(xhci->hcc_params2);
+ lec = xhci->hci_version > 0x100 && (xhci->hcc_params2 & HCC2_LEC);
/* eUSB2 double isoc bw devices are the only USB2 devices using mult */
if (usb_endpoint_is_hs_isoc_double(udev, ep) &&
@@ -1433,8 +1433,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
ring_type = usb_endpoint_type(&ep->desc);
/* Ensure host supports double isoc bandwidth for eUSB2 devices */
- if (usb_endpoint_is_hs_isoc_double(udev, ep) &&
- !HCC2_EUSB2_DIC(xhci->hcc_params2)) {
+ if (usb_endpoint_is_hs_isoc_double(udev, ep) && !(xhci->hcc_params2 & HCC2_EUSB2_DIC)) {
dev_dbg(&udev->dev, "Double Isoc Bandwidth not supported by xhci\n");
return -EINVAL;
}
@@ -1899,7 +1898,7 @@ EXPORT_SYMBOL_GPL(xhci_remove_secondary_interrupter);
void xhci_mem_cleanup(struct xhci_hcd *xhci)
{
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
- int i, j, num_ports;
+ int i, j;
cancel_delayed_work_sync(&xhci->cmd_timer);
@@ -1918,8 +1917,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring");
xhci_cleanup_command_queue(xhci);
- num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
- for (i = 0; i < num_ports && xhci->rh_bw; i++) {
+ for (i = 0; i < xhci->max_ports && xhci->rh_bw; i++) {
struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
struct list_head *ep = &bwt->interval_bw[j].endpoints;
@@ -1928,7 +1926,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
}
}
- for (i = HCS_MAX_SLOTS(xhci->hcs_params1); i > 0; i--)
+ for (i = xhci->max_slots; i > 0; i--)
xhci_free_virt_devices_depth_first(xhci, i);
dma_pool_destroy(xhci->segment_pool);
@@ -1964,7 +1962,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
if (!xhci->rh_bw)
goto no_bw;
- for (i = 0; i < num_ports; i++) {
+ for (i = 0; i < xhci->max_ports; i++) {
struct xhci_tt_bw_info *tt, *n;
list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
list_del(&tt->tt_list);
@@ -2165,7 +2163,7 @@ static void xhci_create_rhub_port_array(struct xhci_hcd *xhci,
if (!rhub->ports)
return;
- for (i = 0; i < HCS_MAX_PORTS(xhci->hcs_params1); i++) {
+ for (i = 0; i < xhci->max_ports; i++) {
if (xhci->hw_ports[i].rhub != rhub ||
xhci->hw_ports[i].hcd_portnum == DUPLICATE_ENTRY)
continue;
@@ -2188,32 +2186,28 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
{
void __iomem *base;
u32 offset;
- unsigned int num_ports;
int i, j;
int cap_count = 0;
u32 cap_start;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
- num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
- xhci->hw_ports = kcalloc_node(num_ports, sizeof(*xhci->hw_ports),
- flags, dev_to_node(dev));
+ xhci->hw_ports = kcalloc_node(xhci->max_ports, sizeof(*xhci->hw_ports),
+ flags, dev_to_node(dev));
if (!xhci->hw_ports)
return -ENOMEM;
- for (i = 0; i < num_ports; i++) {
- xhci->hw_ports[i].addr = &xhci->op_regs->port_status_base +
- NUM_PORT_REGS * i;
+ for (i = 0; i < xhci->max_ports; i++) {
+ xhci->hw_ports[i].port_reg = &xhci->op_regs->port_regs[i];
xhci->hw_ports[i].hw_portnum = i;
init_completion(&xhci->hw_ports[i].rexit_done);
init_completion(&xhci->hw_ports[i].u3exit_done);
}
- xhci->rh_bw = kcalloc_node(num_ports, sizeof(*xhci->rh_bw), flags,
- dev_to_node(dev));
+ xhci->rh_bw = kcalloc_node(xhci->max_ports, sizeof(*xhci->rh_bw), flags, dev_to_node(dev));
if (!xhci->rh_bw)
return -ENOMEM;
- for (i = 0; i < num_ports; i++) {
+ for (i = 0; i < xhci->max_ports; i++) {
struct xhci_interval_bw_table *bw_table;
INIT_LIST_HEAD(&xhci->rh_bw[i].tts);
@@ -2245,9 +2239,8 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
offset = cap_start;
while (offset) {
- xhci_add_in_port(xhci, num_ports, base + offset, cap_count);
- if (xhci->usb2_rhub.num_ports + xhci->usb3_rhub.num_ports ==
- num_ports)
+ xhci_add_in_port(xhci, xhci->max_ports, base + offset, cap_count);
+ if (xhci->usb2_rhub.num_ports + xhci->usb3_rhub.num_ports == xhci->max_ports)
break;
offset = xhci_find_next_ext_cap(base, offset,
XHCI_EXT_CAPS_PROTOCOL);
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index 208558cf822d..06043c7c3100 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -670,7 +670,6 @@ static int xhci_mtk_probe(struct platform_device *pdev)
}
device_enable_async_suspend(dev);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
pm_runtime_forbid(dev);
diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
index f5e2bd66bb1b..2274f5995171 100644
--- a/drivers/usb/host/xhci-mtk.h
+++ b/drivers/usb/host/xhci-mtk.h
@@ -21,7 +21,7 @@
/* support at most 64 ep, use 32 size hash table */
#define SCH_EP_HASH_BITS 5
-/**
+/*
* To simplify scheduler algorithm, set a upper limit for ESIT,
* if a synchromous ep's ESIT is larger than @XHCI_MTK_MAX_ESIT,
* round down to the limit value, that means allocating more
@@ -34,6 +34,7 @@
#define XHCI_MTK_FRAMES_CNT (XHCI_MTK_MAX_ESIT / UFRAMES_PER_FRAME)
/**
+ * struct mu3h_sch_tt - TT scheduling data
* @fs_bus_bw_out: save bandwidth used by FS/LS OUT eps in each uframes
* @fs_bus_bw_in: save bandwidth used by FS/LS IN eps in each uframes
* @ls_bus_bw: save bandwidth used by LS eps in each uframes
@@ -51,7 +52,7 @@ struct mu3h_sch_tt {
};
/**
- * struct mu3h_sch_bw_info: schedule information for bandwidth domain
+ * struct mu3h_sch_bw_info - schedule information for bandwidth domain
*
* @bus_bw: array to keep track of bandwidth already used at each uframes
*
@@ -63,7 +64,7 @@ struct mu3h_sch_bw_info {
};
/**
- * struct mu3h_sch_ep_info: schedule information for endpoint
+ * struct mu3h_sch_ep_info - schedule information for endpoint
*
* @esit: unit is 125us, equal to 2 << Interval field in ep-context
* @num_esit: number of @esit in a period
@@ -77,6 +78,7 @@ struct mu3h_sch_bw_info {
* @ep_type: endpoint type
* @maxpkt: max packet size of endpoint
* @ep: address of usb_host_endpoint struct
+ * @speed: usb device speed
* @allocated: the bandwidth is aready allocated from bus_bw
* @offset: which uframe of the interval that transfer should be
* scheduled first time within the interval
@@ -125,7 +127,7 @@ struct mu3h_sch_ep_info {
#define MU3C_U2_PORT_MAX 5
/**
- * struct mu3c_ippc_regs: MTK ssusb ip port control registers
+ * struct mu3c_ippc_regs - MTK ssusb ip port control registers
* @ip_pw_ctr0~3: ip power and clock control registers
* @ip_pw_sts1~2: ip power and clock status registers
* @ip_xhci_cap: ip xHCI capability register
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 5c8ab519f497..585b2f3117b0 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -582,6 +582,8 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
if (!usb_hcd_is_primary_hcd(hcd))
return 0;
+ xhci->allow_single_roothub = 1;
+
if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
xhci_pme_acpi_rtd3_enable(pdev);
@@ -637,7 +639,6 @@ int xhci_pci_common_probe(struct pci_dev *dev, const struct pci_device_id *id)
xhci = hcd_to_xhci(hcd);
xhci->reset = reset;
- xhci->allow_single_roothub = 1;
if (!xhci_has_one_roothub(xhci)) {
xhci->shared_hcd = usb_create_shared_hcd(&xhci_pci_hc_driver, &dev->dev,
pci_name(dev), hcd);
@@ -895,9 +896,9 @@ static int xhci_pci_poweroff_late(struct usb_hcd *hcd, bool do_wakeup)
if (!(xhci->quirks & XHCI_RESET_TO_DEFAULT))
return 0;
- for (i = 0; i < HCS_MAX_PORTS(xhci->hcs_params1); i++) {
+ for (i = 0; i < xhci->max_ports; i++) {
port = &xhci->hw_ports[i];
- portsc = readl(port->addr);
+ portsc = xhci_portsc_readl(port);
if ((portsc & PORT_PLS_MASK) != XDEV_U3)
continue;
@@ -918,7 +919,7 @@ static int xhci_pci_poweroff_late(struct usb_hcd *hcd, bool do_wakeup)
xhci_dbg(xhci, "port %d-%d in U3 without wakeup, disable it\n",
port->rhub->hcd->self.busnum, port->hcd_portnum + 1);
portsc = xhci_port_state_to_neutral(portsc);
- writel(portsc | PORT_PE, port->addr);
+ xhci_portsc_writel(port, portsc | PORT_PE);
}
return 0;
diff --git a/drivers/usb/host/xhci-port.h b/drivers/usb/host/xhci-port.h
index f19efb966d18..889b5fb0fcd8 100644
--- a/drivers/usb/host/xhci-port.h
+++ b/drivers/usb/host/xhci-port.h
@@ -144,9 +144,14 @@
#define PORT_TEST_MODE_SHIFT 28
/* USB3 Protocol PORTLI Port Link Information */
+#define PORT_LEC(p) ((p) & 0xffff)
#define PORT_RX_LANES(p) (((p) >> 16) & 0xf)
#define PORT_TX_LANES(p) (((p) >> 20) & 0xf)
+/* eUSB2v2 protocol PORTLI Port Link information, RsvdP for normal USB2 */
+#define PORTLI_RDR(p) ((p) & 0xf)
+#define PORTLI_TDR(p) (((p) >> 4) & 0xf)
+
/* USB2 Protocol PORTHLPMC */
#define PORT_HIRDM(p)((p) & 3)
#define PORT_L1_TIMEOUT(p)(((p) & 0xff) << 2)
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 8e209aa33ea7..2247fd9dd163 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -82,6 +82,23 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
return seg->dma + (segment_offset * sizeof(*trb));
}
+static union xhci_trb *xhci_dma_to_trb(struct xhci_segment *start_seg,
+ dma_addr_t dma,
+ struct xhci_segment **match_seg)
+{
+ struct xhci_segment *seg;
+
+ xhci_for_each_ring_seg(start_seg, seg) {
+ if (in_range(dma, seg->dma, TRB_SEGMENT_SIZE)) {
+ if (match_seg)
+ *match_seg = seg;
+ return &seg->trbs[(dma - seg->dma) / sizeof(union xhci_trb)];
+ }
+ }
+
+ return NULL;
+}
+
static bool trb_is_noop(union xhci_trb *trb)
{
return TRB_TYPE_NOOP_LE32(trb->generic.field[3]);
@@ -128,11 +145,11 @@ static void inc_td_cnt(struct urb *urb)
urb_priv->num_tds_done++;
}
-static void trb_to_noop(union xhci_trb *trb, u32 noop_type)
+static void trb_to_noop(union xhci_trb *trb, u32 noop_type, bool unchain_links)
{
if (trb_is_link(trb)) {
- /* unchain chained link TRBs */
- trb->link.control &= cpu_to_le32(~TRB_CHAIN);
+ if (unchain_links)
+ trb->link.control &= cpu_to_le32(~TRB_CHAIN);
} else {
trb->generic.field[0] = 0;
trb->generic.field[1] = 0;
@@ -143,6 +160,11 @@ static void trb_to_noop(union xhci_trb *trb, u32 noop_type)
}
}
+static unsigned int trb_to_pos(struct xhci_segment *seg, union xhci_trb *trb)
+{
+ return seg->num * TRBS_PER_SEGMENT + (trb - seg->trbs);
+}
+
/* Updates trb to point to the next TRB in the ring, and updates seg if the next
* TRB is in a new segment. This does not skip over link TRBs, and it does not
* effect the ring dequeue or enqueue pointers.
@@ -282,55 +304,34 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
inc_enq_past_link(xhci, ring, chain);
}
-/*
- * If the suspect DMA address is a TRB in this TD, this function returns that
- * TRB's segment. Otherwise it returns 0.
- */
-static struct xhci_segment *trb_in_td(struct xhci_td *td, dma_addr_t suspect_dma)
+static bool dma_in_range(dma_addr_t dma,
+ struct xhci_segment *start_seg, union xhci_trb *start_trb,
+ struct xhci_segment *end_seg, union xhci_trb *end_trb)
{
- dma_addr_t start_dma;
- dma_addr_t end_seg_dma;
- dma_addr_t end_trb_dma;
- struct xhci_segment *cur_seg;
+ unsigned int pos, start, end;
+ struct xhci_segment *pos_seg;
+ union xhci_trb *pos_trb = xhci_dma_to_trb(start_seg, dma, &pos_seg);
+
+ /* Is the trb dma address even part of the whole ring? */
+ if (!pos_trb)
+ return false;
- start_dma = xhci_trb_virt_to_dma(td->start_seg, td->start_trb);
- cur_seg = td->start_seg;
+ pos = trb_to_pos(pos_seg, pos_trb);
+ start = trb_to_pos(start_seg, start_trb);
+ end = trb_to_pos(end_seg, end_trb);
- do {
- if (start_dma == 0)
- return NULL;
- /* We may get an event for a Link TRB in the middle of a TD */
- end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
- &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
- /* If the end TRB isn't in this segment, this is set to 0 */
- end_trb_dma = xhci_trb_virt_to_dma(cur_seg, td->end_trb);
-
- if (end_trb_dma > 0) {
- /* The end TRB is in this segment, so suspect should be here */
- if (start_dma <= end_trb_dma) {
- if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
- return cur_seg;
- } else {
- /* Case for one segment with
- * a TD wrapped around to the top
- */
- if ((suspect_dma >= start_dma &&
- suspect_dma <= end_seg_dma) ||
- (suspect_dma >= cur_seg->dma &&
- suspect_dma <= end_trb_dma))
- return cur_seg;
- }
- return NULL;
- }
- /* Might still be somewhere in this segment */
- if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
- return cur_seg;
+ /* end position is smaller than start, search range wraps around */
+ if (end < start)
+ return !(pos > end && pos < start);
- cur_seg = cur_seg->next;
- start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
- } while (cur_seg != td->start_seg);
+ return (pos >= start && pos <= end);
+}
- return NULL;
+/* If the suspect DMA address is a TRB in this TD, this function returns true */
+static bool trb_in_td(struct xhci_td *td, dma_addr_t suspect_dma)
+{
+ return dma_in_range(suspect_dma, td->start_seg, td->start_trb,
+ td->end_seg, td->end_trb);
}
/*
@@ -434,7 +435,7 @@ void xhci_ring_cmd_db(struct xhci_hcd *xhci)
static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci)
{
- return mod_delayed_work(system_wq, &xhci->cmd_timer,
+ return mod_delayed_work(system_percpu_wq, &xhci->cmd_timer,
msecs_to_jiffies(xhci->current_cmd->timeout_ms));
}
@@ -465,7 +466,7 @@ static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
i_cmd->command_trb);
- trb_to_noop(i_cmd->command_trb, TRB_CMD_NOOP);
+ trb_to_noop(i_cmd->command_trb, TRB_CMD_NOOP, false);
/*
* caller waiting for completion is called when command
@@ -797,13 +798,18 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci,
* (The last TRB actually points to the ring enqueue pointer, which is not part
* of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
*/
-static void td_to_noop(struct xhci_td *td, bool flip_cycle)
+static void td_to_noop(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
+ struct xhci_td *td, bool flip_cycle)
{
+ bool unchain_links;
struct xhci_segment *seg = td->start_seg;
union xhci_trb *trb = td->start_trb;
+ /* link TRBs should now be unchained, but some old HCs expect otherwise */
+ unchain_links = !xhci_link_chain_quirk(xhci, ep->ring ? ep->ring->type : TYPE_STREAM);
+
while (1) {
- trb_to_noop(trb, TRB_TR_NOOP);
+ trb_to_noop(trb, TRB_TR_NOOP, unchain_links);
/* flip cycle if asked to */
if (flip_cycle && trb != td->start_trb && trb != td->end_trb)
@@ -1091,16 +1097,16 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
"Found multiple active URBs %p and %p in stream %u?\n",
td->urb, cached_td->urb,
td->urb->stream_id);
- td_to_noop(cached_td, false);
+ td_to_noop(xhci, ep, cached_td, false);
cached_td->cancel_status = TD_CLEARED;
}
- td_to_noop(td, false);
+ td_to_noop(xhci, ep, td, false);
td->cancel_status = TD_CLEARING_CACHE;
cached_td = td;
break;
}
} else {
- td_to_noop(td, false);
+ td_to_noop(xhci, ep, td, false);
td->cancel_status = TD_CLEARED;
}
}
@@ -1125,7 +1131,7 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
continue;
xhci_warn(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n",
td->urb);
- td_to_noop(td, false);
+ td_to_noop(xhci, ep, td, false);
td->cancel_status = TD_CLEARED;
}
}
@@ -1388,7 +1394,7 @@ void xhci_hc_died(struct xhci_hcd *xhci)
xhci_cleanup_command_queue(xhci);
/* return any pending urbs, remove may be waiting for them */
- for (i = 0; i <= HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
+ for (i = 0; i <= xhci->max_slots; i++) {
if (!xhci->devs[i])
continue;
for (j = 0; j < 31; j++)
@@ -1988,7 +1994,6 @@ static void handle_port_status(struct xhci_hcd *xhci, union xhci_trb *event)
struct usb_hcd *hcd;
u32 port_id;
u32 portsc, cmd_reg;
- int max_ports;
unsigned int hcd_portnum;
struct xhci_bus_state *bus_state;
bool bogus_port_status = false;
@@ -2000,9 +2005,8 @@ static void handle_port_status(struct xhci_hcd *xhci, union xhci_trb *event)
"WARN: xHC returned failed port status event\n");
port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
- max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
- if ((port_id <= 0) || (port_id > max_ports)) {
+ if ((port_id <= 0) || (port_id > xhci->max_ports)) {
xhci_warn(xhci, "Port change event with invalid port ID %d\n",
port_id);
return;
@@ -2026,7 +2030,7 @@ static void handle_port_status(struct xhci_hcd *xhci, union xhci_trb *event)
hcd = port->rhub->hcd;
bus_state = &port->rhub->bus_state;
hcd_portnum = port->hcd_portnum;
- portsc = readl(port->addr);
+ portsc = xhci_portsc_readl(port);
xhci_dbg(xhci, "Port change event, %d-%d, id %d, portsc: 0x%x\n",
hcd->self.busnum, hcd_portnum + 1, port_id, portsc);
@@ -2179,24 +2183,31 @@ static void xhci_clear_hub_tt_buffer(struct xhci_hcd *xhci, struct xhci_td *td,
* External device side is also halted in functional stall cases. Class driver
* will clear the device halt with a CLEAR_FEATURE(ENDPOINT_HALT) request later.
*/
-static bool xhci_halted_host_endpoint(struct xhci_ep_ctx *ep_ctx, unsigned int comp_code)
+static bool xhci_halted_host_endpoint(struct xhci_hcd *xhci, struct xhci_ep_ctx *ep_ctx,
+ unsigned int comp_code)
{
- /* Stall halts both internal and device side endpoint */
- if (comp_code == COMP_STALL_ERROR)
- return true;
+ int ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
- /* TRB completion codes that may require internal halt cleanup */
- if (comp_code == COMP_USB_TRANSACTION_ERROR ||
- comp_code == COMP_BABBLE_DETECTED_ERROR ||
- comp_code == COMP_SPLIT_TRANSACTION_ERROR)
+ switch (comp_code) {
+ case COMP_STALL_ERROR:
+ /* on xHCI this always halts, including protocol stall */
+ return true;
+ case COMP_BABBLE_DETECTED_ERROR:
/*
* The 0.95 spec says a babbling control endpoint is not halted.
* The 0.96 spec says it is. Some HW claims to be 0.95
* compliant, but it halts the control endpoint anyway.
* Check endpoint context if endpoint is halted.
*/
- if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_HALTED)
- return true;
+ if (xhci->hci_version <= 0x95 && ep_type == CTRL_EP)
+ return GET_EP_CTX_STATE(ep_ctx) == EP_STATE_HALTED;
+
+ fallthrough;
+ case COMP_USB_TRANSACTION_ERROR:
+ case COMP_SPLIT_TRANSACTION_ERROR:
+ /* these errors halt all non-isochronous endpoints */
+ return ep_type != ISOC_IN_EP && ep_type != ISOC_OUT_EP;
+ }
return false;
}
@@ -2233,41 +2244,9 @@ static void finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
* the ring dequeue pointer or take this TD off any lists yet.
*/
return;
- case COMP_USB_TRANSACTION_ERROR:
- case COMP_BABBLE_DETECTED_ERROR:
- case COMP_SPLIT_TRANSACTION_ERROR:
- /*
- * If endpoint context state is not halted we might be
- * racing with a reset endpoint command issued by a unsuccessful
- * stop endpoint completion (context error). In that case the
- * td should be on the cancelled list, and EP_HALTED flag set.
- *
- * Or then it's not halted due to the 0.95 spec stating that a
- * babbling control endpoint should not halt. The 0.96 spec
- * again says it should. Some HW claims to be 0.95 compliant,
- * but it halts the control endpoint anyway.
- */
- if (GET_EP_CTX_STATE(ep_ctx) != EP_STATE_HALTED) {
- /*
- * If EP_HALTED is set and TD is on the cancelled list
- * the TD and dequeue pointer will be handled by reset
- * ep command completion
- */
- if ((ep->ep_state & EP_HALTED) &&
- !list_empty(&td->cancelled_td_list)) {
- xhci_dbg(xhci, "Already resolving halted ep for 0x%llx\n",
- (unsigned long long)xhci_trb_virt_to_dma(
- td->start_seg, td->start_trb));
- return;
- }
- /* endpoint not halted, don't reset it */
- break;
- }
- /* Almost same procedure as for STALL_ERROR below */
- xhci_clear_hub_tt_buffer(xhci, td, ep);
- xhci_handle_halted_endpoint(xhci, ep, td, EP_HARD_RESET);
- return;
- case COMP_STALL_ERROR:
+ }
+
+ if (xhci_halted_host_endpoint(xhci, ep_ctx, trb_comp_code)) {
/*
* xhci internal endpoint state will go to a "halt" state for
* any stall, including default control pipe protocol stall.
@@ -2278,14 +2257,12 @@ static void finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
* stall later. Hub TT buffer should only be cleared for FS/LS
* devices behind HS hubs for functional stalls.
*/
- if (ep->ep_index != 0)
+ if (!(ep->ep_index == 0 && trb_comp_code == COMP_STALL_ERROR))
xhci_clear_hub_tt_buffer(xhci, td, ep);
xhci_handle_halted_endpoint(xhci, ep, td, EP_HARD_RESET);
return; /* xhci_handle_halted_endpoint marked td cancelled */
- default:
- break;
}
xhci_dequeue_td(xhci, td, ep_ring, td->status);
@@ -2362,7 +2339,7 @@ static void process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
case COMP_STOPPED_LENGTH_INVALID:
goto finish_td;
default:
- if (!xhci_halted_host_endpoint(ep_ctx, trb_comp_code))
+ if (!xhci_halted_host_endpoint(xhci, ep_ctx, trb_comp_code))
break;
xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n",
trb_comp_code, ep->ep_index);
@@ -2658,7 +2635,6 @@ static int handle_tx_event(struct xhci_hcd *xhci,
int ep_index;
struct xhci_td *td = NULL;
dma_addr_t ep_trb_dma;
- struct xhci_segment *ep_seg;
union xhci_trb *ep_trb;
int status = -EINPROGRESS;
struct xhci_ep_ctx *ep_ctx;
@@ -2689,6 +2665,9 @@ static int handle_tx_event(struct xhci_hcd *xhci,
if (!ep_ring)
return handle_transferless_tx_event(xhci, ep, trb_comp_code);
+ /* find the transfer trb this events points to */
+ ep_trb = xhci_dma_to_trb(ep_ring->deq_seg, ep_trb_dma, NULL);
+
/* Look for common error cases */
switch (trb_comp_code) {
/* Skip codes that require special handling depending on
@@ -2862,10 +2841,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
td = list_first_entry(&ep_ring->td_list, struct xhci_td,
td_list);
- /* Is this a TRB in the currently executing TD? */
- ep_seg = trb_in_td(td, ep_trb_dma);
-
- if (!ep_seg) {
+ /* Is this TRB not part of the currently executing TD? */
+ if (!trb_in_td(td, ep_trb_dma)) {
if (ep->skip && usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
/* this event is unlikely to match any TD, don't skip them all */
@@ -2948,7 +2925,6 @@ static int handle_tx_event(struct xhci_hcd *xhci,
if (ring_xrun_event)
return 0;
- ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) / sizeof(*ep_trb)];
trace_xhci_handle_transfer(ep_ring, (struct xhci_generic_trb *) ep_trb, ep_trb_dma);
/*
@@ -2973,7 +2949,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
return 0;
check_endpoint_halted:
- if (xhci_halted_host_endpoint(ep_ctx, trb_comp_code))
+ if (xhci_halted_host_endpoint(xhci, ep_ctx, trb_comp_code))
xhci_handle_halted_endpoint(xhci, ep, td, EP_HARD_RESET);
return 0;
@@ -3985,6 +3961,16 @@ static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
return total_packet_count - 1;
}
+/* Returns the Isochronous Scheduling Threshold in Microframes. 1 Frame is 8 Microframes. */
+static int xhci_ist_microframes(struct xhci_hcd *xhci)
+{
+ int ist = HCS_IST_VALUE(xhci->hcs_params2);
+
+ if (xhci->hcs_params2 & HCS_IST_UNIT)
+ ist *= 8;
+ return ist;
+}
+
/*
* Calculates Frame ID field of the isochronous TRB identifies the
* target frame that the Interval associated with this Isochronous
@@ -4004,17 +3990,7 @@ static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci,
else
start_frame = (urb->start_frame + index * urb->interval) >> 3;
- /* Isochronous Scheduling Threshold (IST, bits 0~3 in HCSPARAMS2):
- *
- * If bit [3] of IST is cleared to '0', software can add a TRB no
- * later than IST[2:0] Microframes before that TRB is scheduled to
- * be executed.
- * If bit [3] of IST is set to '1', software can add a TRB no later
- * than IST[2:0] Frames before that TRB is scheduled to be executed.
- */
- ist = HCS_IST(xhci->hcs_params2) & 0x7;
- if (HCS_IST(xhci->hcs_params2) & (1 << 3))
- ist <<= 3;
+ ist = xhci_ist_microframes(xhci);
/* Software shall not schedule an Isoch TD with a Frame ID value that
* is less than the Start Frame ID or greater than the End Frame ID,
@@ -4159,7 +4135,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
/* use SIA as default, if frame id is used overwrite it */
sia_frame_id = TRB_SIA;
if (!(urb->transfer_flags & URB_ISO_ASAP) &&
- HCC_CFC(xhci->hcc_params)) {
+ (xhci->hcc_params & HCC_CFC)) {
frame_id = xhci_get_isoc_frame_id(xhci, urb, i);
if (frame_id >= 0)
sia_frame_id = TRB_FRAME_ID(frame_id);
@@ -4243,7 +4219,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
}
/* store the next frame id */
- if (HCC_CFC(xhci->hcc_params))
+ if (xhci->hcc_params & HCC_CFC)
xep->next_frame_id = urb->start_frame + num_tds * urb->interval;
if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
@@ -4268,7 +4244,7 @@ cleanup:
*/
urb_priv->td[0].end_trb = ep_ring->enqueue;
/* Every TRB except the first & last will have its cycle bit flipped. */
- td_to_noop(&urb_priv->td[0], true);
+ td_to_noop(xhci, xep, &urb_priv->td[0], true);
/* Reset the ring enqueue back to the first TRB and its cycle bit. */
ep_ring->enqueue = urb_priv->td[0].start_trb;
@@ -4322,7 +4298,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
check_interval(urb, ep_ctx);
/* Calculate the start frame and put it in urb->start_frame. */
- if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) {
+ if ((xhci->hcc_params & HCC_CFC) && !list_empty(&ep_ring->td_list)) {
if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_RUNNING) {
urb->start_frame = xep->next_frame_id;
goto skip_start_over;
@@ -4335,9 +4311,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
* Round up to the next frame and consider the time before trb really
* gets scheduled by hardare.
*/
- ist = HCS_IST(xhci->hcs_params2) & 0x7;
- if (HCS_IST(xhci->hcs_params2) & (1 << 3))
- ist <<= 3;
+ ist = xhci_ist_microframes(xhci);
start_frame += ist + XHCI_CFC_DELAY;
start_frame = roundup(start_frame, 8);
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index 5255b1002893..31ccced5125e 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -1399,7 +1399,6 @@ static void tegra_xhci_id_work(struct work_struct *work)
}
tegra_xhci_set_port_power(tegra, true, true);
- pm_runtime_mark_last_busy(tegra->dev);
} else {
if (tegra->otg_usb3_port >= 0)
@@ -2036,7 +2035,7 @@ static bool xhci_hub_ports_suspended(struct xhci_hub *hub)
u32 value;
for (i = 0; i < hub->num_ports; i++) {
- value = readl(hub->ports[i]->addr);
+ value = xhci_portsc_readl(hub->ports[i]);
if ((value & PORT_PE) == 0)
continue;
@@ -2162,7 +2161,7 @@ static void tegra_xhci_enable_phy_sleepwalk_wake(struct tegra_xusb *tegra)
if (!is_host_mode_phy(tegra, i, j))
continue;
- portsc = readl(rhub->ports[index]->addr);
+ portsc = xhci_portsc_readl(rhub->ports[index]);
speed = tegra_xhci_portsc_to_speed(tegra, portsc);
tegra_xusb_padctl_enable_phy_sleepwalk(padctl, phy, speed);
tegra_xusb_padctl_enable_phy_wake(padctl, phy);
@@ -2257,7 +2256,7 @@ static int tegra_xusb_enter_elpg(struct tegra_xusb *tegra, bool is_auto_resume)
for (i = 0; i < xhci->usb2_rhub.num_ports; i++) {
if (!xhci->usb2_rhub.ports[i])
continue;
- portsc = readl(xhci->usb2_rhub.ports[i]->addr);
+ portsc = xhci_portsc_readl(xhci->usb2_rhub.ports[i]);
tegra->lp0_utmi_pad_mask &= ~BIT(i);
if (((portsc & PORT_PLS_MASK) == XDEV_U3) || ((portsc & DEV_SPEED_MASK) == XDEV_FS))
tegra->lp0_utmi_pad_mask |= BIT(i);
@@ -2790,7 +2789,7 @@ static int tegra_xhci_hub_control(struct usb_hcd *hcd, u16 type_req, u16 value,
while (i--) {
if (!test_bit(i, &bus_state->resuming_ports))
continue;
- portsc = readl(ports[i]->addr);
+ portsc = xhci_portsc_readl(ports[i]);
if ((portsc & PORT_PLS_MASK) == XDEV_RESUME)
tegra_phy_xusb_utmi_pad_power_on(
tegra_xusb_get_phy(tegra, "usb2", (int) i));
@@ -2808,7 +2807,7 @@ static int tegra_xhci_hub_control(struct usb_hcd *hcd, u16 type_req, u16 value,
if (!index || index > rhub->num_ports)
return -EPIPE;
ports = rhub->ports;
- portsc = readl(ports[port]->addr);
+ portsc = xhci_portsc_readl(ports[port]);
if (portsc & PORT_CONNECT)
tegra_phy_xusb_utmi_pad_power_on(phy);
}
@@ -2827,7 +2826,7 @@ static int tegra_xhci_hub_control(struct usb_hcd *hcd, u16 type_req, u16 value,
if ((type_req == ClearPortFeature) && (value == USB_PORT_FEAT_C_CONNECTION)) {
ports = rhub->ports;
- portsc = readl(ports[port]->addr);
+ portsc = xhci_portsc_readl(ports[port]);
if (!(portsc & PORT_CONNECT)) {
/* We don't suspend the PAD while HNP role swap happens on the OTG
* port
diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h
index 9abc904f1749..724cba2dbb78 100644
--- a/drivers/usb/host/xhci-trace.h
+++ b/drivers/usb/host/xhci-trace.h
@@ -71,29 +71,20 @@ DEFINE_EVENT(xhci_log_msg, xhci_dbg_ring_expansion,
);
DECLARE_EVENT_CLASS(xhci_log_ctx,
- TP_PROTO(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
- unsigned int ep_num),
- TP_ARGS(xhci, ctx, ep_num),
+ TP_PROTO(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx),
+ TP_ARGS(xhci, ctx),
TP_STRUCT__entry(
__field(int, ctx_64)
__field(unsigned, ctx_type)
__field(dma_addr_t, ctx_dma)
__field(u8 *, ctx_va)
- __field(unsigned, ctx_ep_num)
- __dynamic_array(u32, ctx_data,
- ((HCC_64BYTE_CONTEXT(xhci->hcc_params) + 1) * 8) *
- ((ctx->type == XHCI_CTX_TYPE_INPUT) + ep_num + 1))
),
TP_fast_assign(
- __entry->ctx_64 = HCC_64BYTE_CONTEXT(xhci->hcc_params);
+ __entry->ctx_64 = xhci->hcc_params & HCC_64BYTE_CONTEXT;
__entry->ctx_type = ctx->type;
__entry->ctx_dma = ctx->dma;
__entry->ctx_va = ctx->bytes;
- __entry->ctx_ep_num = ep_num;
- memcpy(__get_dynamic_array(ctx_data), ctx->bytes,
- ((HCC_64BYTE_CONTEXT(xhci->hcc_params) + 1) * 32) *
- ((ctx->type == XHCI_CTX_TYPE_INPUT) + ep_num + 1));
),
TP_printk("ctx_64=%d, ctx_type=%u, ctx_dma=@%llx, ctx_va=@%p",
__entry->ctx_64, __entry->ctx_type,
@@ -102,9 +93,8 @@ DECLARE_EVENT_CLASS(xhci_log_ctx,
);
DEFINE_EVENT(xhci_log_ctx, xhci_address_ctx,
- TP_PROTO(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
- unsigned int ep_num),
- TP_ARGS(xhci, ctx, ep_num)
+ TP_PROTO(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx),
+ TP_ARGS(xhci, ctx)
);
DECLARE_EVENT_CLASS(xhci_log_trb,
@@ -575,6 +565,11 @@ DEFINE_EVENT(xhci_log_portsc, xhci_hub_status_data,
TP_ARGS(port, portsc)
);
+DEFINE_EVENT(xhci_log_portsc, xhci_portsc_writel,
+ TP_PROTO(struct xhci_port *port, u32 portsc),
+ TP_ARGS(port, portsc)
+);
+
DECLARE_EVENT_CLASS(xhci_log_doorbell,
TP_PROTO(u32 slot, u32 doorbell),
TP_ARGS(slot, doorbell),
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 0cb45b95e4f5..57a74ffadc24 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -41,6 +41,19 @@ static unsigned long long quirks;
module_param(quirks, ullong, S_IRUGO);
MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
+void xhci_portsc_writel(struct xhci_port *port, u32 val)
+{
+ trace_xhci_portsc_writel(port, val);
+ writel(val, &port->port_reg->portsc);
+}
+EXPORT_SYMBOL_GPL(xhci_portsc_writel);
+
+u32 xhci_portsc_readl(struct xhci_port *port)
+{
+ return readl(&port->port_reg->portsc);
+}
+EXPORT_SYMBOL_GPL(xhci_portsc_readl);
+
static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring)
{
struct xhci_segment *seg;
@@ -237,7 +250,6 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
struct iommu_domain *domain;
int err, i;
u64 val;
- u32 intrs;
/*
* Some Renesas controllers get into a weird state if they are
@@ -278,10 +290,7 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
if (upper_32_bits(val))
xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
- intrs = min_t(u32, HCS_MAX_INTRS(xhci->hcs_params1),
- ARRAY_SIZE(xhci->run_regs->ir_set));
-
- for (i = 0; i < intrs; i++) {
+ for (i = 0; i < xhci->max_interrupters; i++) {
struct xhci_intr_reg __iomem *ir;
ir = &xhci->run_regs->ir_set[i];
@@ -373,7 +382,7 @@ static void compliance_mode_recovery(struct timer_list *t)
return;
for (i = 0; i < rhub->num_ports; i++) {
- temp = readl(rhub->ports[i]->addr);
+ temp = xhci_portsc_readl(rhub->ports[i]);
if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
/*
* Compliance Mode Detected. Letting USB Core
@@ -471,15 +480,13 @@ static void xhci_hcd_page_size(struct xhci_hcd *xhci)
static void xhci_enable_max_dev_slots(struct xhci_hcd *xhci)
{
u32 config_reg;
- u32 max_slots;
- max_slots = HCS_MAX_SLOTS(xhci->hcs_params1);
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xHC can handle at most %d device slots",
- max_slots);
+ xhci->max_slots);
config_reg = readl(&xhci->op_regs->config_reg);
config_reg &= ~HCS_SLOTS_MASK;
- config_reg |= max_slots;
+ config_reg |= xhci->max_slots;
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Setting Max device slots reg = 0x%x",
config_reg);
@@ -896,7 +903,7 @@ static void xhci_disable_hub_port_wake(struct xhci_hcd *xhci,
spin_lock_irqsave(&xhci->lock, flags);
for (i = 0; i < rhub->num_ports; i++) {
- portsc = readl(rhub->ports[i]->addr);
+ portsc = xhci_portsc_readl(rhub->ports[i]);
t1 = xhci_port_state_to_neutral(portsc);
t2 = t1;
@@ -909,7 +916,7 @@ static void xhci_disable_hub_port_wake(struct xhci_hcd *xhci,
t2 |= PORT_CSC;
if (t1 != t2) {
- writel(t2, rhub->ports[i]->addr);
+ xhci_portsc_writel(rhub->ports[i], t2);
xhci_dbg(xhci, "config port %d-%d wake bits, portsc: 0x%x, write: 0x%x\n",
rhub->hcd->self.busnum, i + 1, portsc, t2);
}
@@ -936,7 +943,7 @@ static bool xhci_pending_portevent(struct xhci_hcd *xhci)
port_index = xhci->usb2_rhub.num_ports;
ports = xhci->usb2_rhub.ports;
while (port_index--) {
- portsc = readl(ports[port_index]->addr);
+ portsc = xhci_portsc_readl(ports[port_index]);
if (portsc & PORT_CHANGE_MASK ||
(portsc & PORT_PLS_MASK) == XDEV_RESUME)
return true;
@@ -944,7 +951,7 @@ static bool xhci_pending_portevent(struct xhci_hcd *xhci)
port_index = xhci->usb3_rhub.num_ports;
ports = xhci->usb3_rhub.ports;
while (port_index--) {
- portsc = readl(ports[port_index]->addr);
+ portsc = xhci_portsc_readl(ports[port_index]);
if (portsc & (PORT_CHANGE_MASK | PORT_CAS) ||
(portsc & PORT_PLS_MASK) == XDEV_RESUME)
return true;
@@ -4222,8 +4229,7 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
xhci_err(xhci, "Error while assigning device slot ID: %s\n",
xhci_trb_comp_code_string(command->status));
xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
- HCS_MAX_SLOTS(
- readl(&xhci->cap_regs->hcs_params1)));
+ xhci->max_slots);
xhci_free_command(xhci, command);
return 0;
}
@@ -4366,8 +4372,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
ctrl_ctx->drop_flags = 0;
- trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
- le32_to_cpu(slot_ctx->dev_info) >> 27);
+ trace_xhci_address_ctx(xhci, virt_dev->in_ctx);
trace_xhci_address_ctrl_ctx(ctrl_ctx);
spin_lock_irqsave(&xhci->lock, flags);
@@ -4427,7 +4432,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
xhci_err(xhci,
"ERROR: unexpected setup %s command completion code 0x%x.\n",
act, command->status);
- trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1);
+ trace_xhci_address_ctx(xhci, virt_dev->out_ctx);
ret = -EINVAL;
break;
}
@@ -4445,14 +4450,12 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
xhci_dbg_trace(xhci, trace_xhci_dbg_address,
"Output Context DMA address = %#08llx",
(unsigned long long)virt_dev->out_ctx->dma);
- trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
- le32_to_cpu(slot_ctx->dev_info) >> 27);
+ trace_xhci_address_ctx(xhci, virt_dev->in_ctx);
/*
* USB core uses address 1 for the roothubs, so we add one to the
* address given back to us by the HC.
*/
- trace_xhci_address_ctx(xhci, virt_dev->out_ctx,
- le32_to_cpu(slot_ctx->dev_info) >> 27);
+ trace_xhci_address_ctx(xhci, virt_dev->out_ctx);
/* Zero the input context control for later use */
ctrl_ctx->add_flags = 0;
ctrl_ctx->drop_flags = 0;
@@ -4636,7 +4639,7 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_port **ports;
- __le32 __iomem *pm_addr, *hlpm_addr;
+ struct xhci_port_regs __iomem *port_reg;
u32 pm_val, hlpm_val, field;
unsigned int port_num;
unsigned long flags;
@@ -4661,9 +4664,8 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
ports = xhci->usb2_rhub.ports;
port_num = udev->portnum - 1;
- pm_addr = ports[port_num]->addr + PORTPMSC;
- pm_val = readl(pm_addr);
- hlpm_addr = ports[port_num]->addr + PORTHLPMC;
+ port_reg = ports[port_num]->port_reg;
+ pm_val = readl(&port_reg->portpmsc);
xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
str_enable_disable(enable), port_num + 1);
@@ -4692,30 +4694,30 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
spin_lock_irqsave(&xhci->lock, flags);
hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev);
- writel(hlpm_val, hlpm_addr);
+ writel(hlpm_val, &port_reg->porthlmpc);
/* flush write */
- readl(hlpm_addr);
+ readl(&port_reg->porthlmpc);
} else {
hird = xhci_calculate_hird_besl(xhci, udev);
}
pm_val &= ~PORT_HIRD_MASK;
pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id);
- writel(pm_val, pm_addr);
- pm_val = readl(pm_addr);
+ writel(pm_val, &port_reg->portpmsc);
+ pm_val = readl(&port_reg->portpmsc);
pm_val |= PORT_HLE;
- writel(pm_val, pm_addr);
+ writel(pm_val, &port_reg->portpmsc);
/* flush write */
- readl(pm_addr);
+ readl(&port_reg->portpmsc);
} else {
pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK);
- writel(pm_val, pm_addr);
+ writel(pm_val, &port_reg->portpmsc);
/* flush write */
- readl(pm_addr);
+ readl(&port_reg->portpmsc);
if (udev->usb2_hw_lpm_besl_capable) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_change_max_exit_latency(xhci, udev, 0);
- readl_poll_timeout(ports[port_num]->addr, pm_val,
+ readl_poll_timeout(&ports[port_num]->port_reg->portsc, pm_val,
(pm_val & PORT_PLS_MASK) == XDEV_U0,
100, 10000);
return 0;
@@ -5408,6 +5410,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
*/
struct device *dev = hcd->self.sysdev;
int retval;
+ u32 hcs_params1;
/* Accept arbitrarily long scatter-gather lists */
hcd->self.sg_tablesize = ~0;
@@ -5433,7 +5436,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
xhci->run_regs = hcd->regs +
(readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
/* Cache read-only capability registers */
- xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1);
+ hcs_params1 = readl(&xhci->cap_regs->hcs_params1);
xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2);
xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3);
xhci->hci_version = HC_VERSION(readl(&xhci->cap_regs->hc_capbase));
@@ -5441,10 +5444,13 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
if (xhci->hci_version > 0x100)
xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2);
+ xhci->max_slots = HCS_MAX_SLOTS(hcs_params1);
+ xhci->max_ports = min(HCS_MAX_PORTS(hcs_params1), MAX_HC_PORTS);
/* xhci-plat or xhci-pci might have set max_interrupters already */
- if ((!xhci->max_interrupters) ||
- xhci->max_interrupters > HCS_MAX_INTRS(xhci->hcs_params1))
- xhci->max_interrupters = HCS_MAX_INTRS(xhci->hcs_params1);
+ if (!xhci->max_interrupters)
+ xhci->max_interrupters = min(HCS_MAX_INTRS(hcs_params1), MAX_HC_INTRS);
+ else if (xhci->max_interrupters > HCS_MAX_INTRS(hcs_params1))
+ xhci->max_interrupters = HCS_MAX_INTRS(hcs_params1);
xhci->quirks |= quirks;
@@ -5489,7 +5495,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
/* Set dma_mask and coherent_dma_mask to 64-bits,
* if xHC supports 64-bit addressing */
- if (HCC_64BIT_ADDR(xhci->hcc_params) &&
+ if ((xhci->hcc_params & HCC_64BIT_ADDR) &&
!dma_set_mask(dev, DMA_BIT_MASK(64))) {
xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
@@ -5663,8 +5669,8 @@ static int __init xhci_hcd_init(void)
BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 8*32/8);
BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
- /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
- BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
+ /* xhci_run_regs has eight fields and embeds 1024 xhci_intr_regs */
+ BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*1024)*32/8);
if (usb_disabled())
return -ENODEV;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 58a51f09cceb..2b0796f6d00e 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -34,8 +34,16 @@
/* Max number of USB devices for any host controller - limit in section 6.1 */
#define MAX_HC_SLOTS 256
-/* Section 5.3.3 - MaxPorts */
+/*
+ * Max Number of Ports. xHCI specification section 5.3.3
+ * Valid values are in the range of 1 to 255.
+ */
#define MAX_HC_PORTS 127
+/*
+ * Max number of Interrupter Register Sets. xHCI specification section 5.3.3
+ * Valid values are in the range of 1 to 1024.
+ */
+#define MAX_HC_INTRS 128
/*
* xHCI register interface.
@@ -66,13 +74,19 @@ struct xhci_cap_regs {
/* Reserved up to (CAPLENGTH - 0x1C) */
};
-/* Number of registers per port */
-#define NUM_PORT_REGS 4
-
-#define PORTSC 0
-#define PORTPMSC 1
-#define PORTLI 2
-#define PORTHLPMC 3
+/*
+ * struct xhci_port_regs - Host Controller USB Port Register Set. xHCI spec 5.4.8
+ * @portsc: Port Status and Control
+ * @portpmsc: Port Power Management Status and Control
+ * @portli: Port Link Info
+ * @porthlmpc: Port Hardware LPM Control
+ */
+struct xhci_port_regs {
+ __le32 portsc;
+ __le32 portpmsc;
+ __le32 portli;
+ __le32 porthlmpc;
+};
/**
* struct xhci_op_regs - xHCI Host Controller Operational Registers.
@@ -85,16 +99,7 @@ struct xhci_cap_regs {
* @cmd_ring: CRP - 64-bit Command Ring Pointer
* @dcbaa_ptr: DCBAAP - 64-bit Device Context Base Address Array Pointer
* @config_reg: CONFIG - Configure Register
- * @port_status_base: PORTSCn - base address for Port Status and Control
- * Each port has a Port Status and Control register,
- * followed by a Port Power Management Status and Control
- * register, a Port Link Info register, and a reserved
- * register.
- * @port_power_base: PORTPMSCn - base address for
- * Port Power Management Status and Control
- * @port_link_base: PORTLIn - base address for Port Link Info (current
- * Link PM state and control) for USB 2.1 and USB 3.0
- * devices.
+ * @port_regs: Port Register Sets, from 1 to MaxPorts (defined by HCSPARAMS1).
*/
struct xhci_op_regs {
__le32 command;
@@ -110,13 +115,7 @@ struct xhci_op_regs {
__le32 config_reg;
/* rsvd: offset 0x3C-3FF */
__le32 reserved4[241];
- /* port 1 registers, which serve as a base address for other ports */
- __le32 port_status_base;
- __le32 port_power_base;
- __le32 port_link_base;
- __le32 reserved5;
- /* registers for ports 2-255 */
- __le32 reserved6[NUM_PORT_REGS*254];
+ struct xhci_port_regs port_regs[];
};
/* USBCMD - USB command - command bitmasks */
@@ -284,7 +283,7 @@ struct xhci_intr_reg {
struct xhci_run_regs {
__le32 microframe_index;
__le32 rsvd[7];
- struct xhci_intr_reg ir_set[128];
+ struct xhci_intr_reg ir_set[1024];
};
/**
@@ -800,7 +799,6 @@ struct xhci_device_context_array {
/* private xHCD pointers */
dma_addr_t dma;
};
-/* TODO: write function to set the 64-bit device DMA address */
/*
* TODO: change this to be dynamically sized at HC mem init time since the HC
* might not be able to handle the maximum number of devices possible.
@@ -1474,7 +1472,7 @@ struct xhci_port_cap {
};
struct xhci_port {
- __le32 __iomem *addr;
+ struct xhci_port_regs __iomem *port_reg;
int hw_portnum;
int hcd_portnum;
struct xhci_hub *rhub;
@@ -1510,7 +1508,6 @@ struct xhci_hcd {
struct xhci_doorbell_array __iomem *dba;
/* Cached register copies of read-only HC data */
- __u32 hcs_params1;
__u32 hcs_params2;
__u32 hcs_params3;
__u32 hcc_params;
@@ -1521,6 +1518,8 @@ struct xhci_hcd {
/* packed release number */
u16 hci_version;
u16 max_interrupters;
+ u8 max_slots;
+ u8 max_ports;
/* imod_interval in ns (I * 250ns) */
u32 imod_interval;
u32 page_size;
@@ -1961,6 +1960,8 @@ void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
void xhci_add_interrupter(struct xhci_hcd *xhci, unsigned int intr_num);
int xhci_usb_endpoint_maxp(struct usb_device *udev,
struct usb_host_endpoint *host_ep);
+void xhci_portsc_writel(struct xhci_port *port, u32 val);
+u32 xhci_portsc_readl(struct xhci_port *port);
/* xHCI roothub code */
void xhci_set_link_state(struct xhci_hcd *xhci, struct xhci_port *port,
@@ -2399,25 +2400,48 @@ static inline const char *xhci_decode_portsc(char *str, u32 portsc)
if (portsc == ~(u32)0)
return str;
- ret += sprintf(str + ret, "%s %s %s Link:%s PortSpeed:%d ",
- portsc & PORT_POWER ? "Powered" : "Powered-off",
- portsc & PORT_CONNECT ? "Connected" : "Not-connected",
- portsc & PORT_PE ? "Enabled" : "Disabled",
- xhci_portsc_link_state_string(portsc),
- DEV_PORT_SPEED(portsc));
+ ret += sprintf(str + ret, "Speed=%d ", DEV_PORT_SPEED(portsc));
+ ret += sprintf(str + ret, "Link=%s ", xhci_portsc_link_state_string(portsc));
+ /* RO/ROS: Read-only */
+ if (portsc & PORT_CONNECT)
+ ret += sprintf(str + ret, "CCS ");
if (portsc & PORT_OC)
- ret += sprintf(str + ret, "OverCurrent ");
+ ret += sprintf(str + ret, "OCA "); /* No set for USB2 ports */
+ if (portsc & PORT_CAS)
+ ret += sprintf(str + ret, "CAS ");
+ if (portsc & PORT_DEV_REMOVE)
+ ret += sprintf(str + ret, "DR ");
+
+ /* RWS; writing 1 sets the bit, writing 0 clears the bit. */
+ if (portsc & PORT_POWER)
+ ret += sprintf(str + ret, "PP ");
+ if (portsc & PORT_WKCONN_E)
+ ret += sprintf(str + ret, "WCE ");
+ if (portsc & PORT_WKDISC_E)
+ ret += sprintf(str + ret, "WDE ");
+ if (portsc & PORT_WKOC_E)
+ ret += sprintf(str + ret, "WOE ");
+
+ /* RW; writing 1 sets the bit, writing 0 clears the bit */
+ if (portsc & PORT_LINK_STROBE)
+ ret += sprintf(str + ret, "LWS "); /* LWS 0 write is ignored */
+
+ /* RW1S; writing 1 sets the bit, writing 0 has no effect */
if (portsc & PORT_RESET)
- ret += sprintf(str + ret, "In-Reset ");
+ ret += sprintf(str + ret, "PR ");
+ if (portsc & PORT_WR)
+ ret += sprintf(str + ret, "WPR "); /* RsvdZ for USB2 ports */
- ret += sprintf(str + ret, "Change: ");
+ /* RW1CS; writing 1 clears the bit, writing 0 has no effect. */
+ if (portsc & PORT_PE)
+ ret += sprintf(str + ret, "PED ");
if (portsc & PORT_CSC)
ret += sprintf(str + ret, "CSC ");
if (portsc & PORT_PEC)
- ret += sprintf(str + ret, "PEC ");
+ ret += sprintf(str + ret, "PEC "); /* No set for USB3 ports */
if (portsc & PORT_WRC)
- ret += sprintf(str + ret, "WRC ");
+ ret += sprintf(str + ret, "WRC "); /* RsvdZ for USB2 ports */
if (portsc & PORT_OCC)
ret += sprintf(str + ret, "OCC ");
if (portsc & PORT_RC)
@@ -2425,17 +2449,7 @@ static inline const char *xhci_decode_portsc(char *str, u32 portsc)
if (portsc & PORT_PLC)
ret += sprintf(str + ret, "PLC ");
if (portsc & PORT_CEC)
- ret += sprintf(str + ret, "CEC ");
- if (portsc & PORT_CAS)
- ret += sprintf(str + ret, "CAS ");
-
- ret += sprintf(str + ret, "Wake: ");
- if (portsc & PORT_WKCONN_E)
- ret += sprintf(str + ret, "WCE ");
- if (portsc & PORT_WKDISC_E)
- ret += sprintf(str + ret, "WDE ");
- if (portsc & PORT_WKOC_E)
- ret += sprintf(str + ret, "WOE ");
+ ret += sprintf(str + ret, "CEC "); /* RsvdZ for USB2 ports */
return str;
}
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index 09ac6f1c985f..0b56b773dbdf 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -182,6 +182,7 @@ config USB_LJCA
config USB_USBIO
tristate "Intel USBIO Bridge support"
depends on USB && ACPI
+ depends on X86 || COMPILE_TEST
select AUXILIARY_BUS
help
This adds support for Intel USBIO drivers.
diff --git a/drivers/usb/misc/apple-mfi-fastcharge.c b/drivers/usb/misc/apple-mfi-fastcharge.c
index 8e852f4b8262..47b38dcc2992 100644
--- a/drivers/usb/misc/apple-mfi-fastcharge.c
+++ b/drivers/usb/misc/apple-mfi-fastcharge.c
@@ -134,7 +134,6 @@ static int apple_mfi_fc_set_property(struct power_supply *psy,
ret = -EINVAL;
}
- pm_runtime_mark_last_busy(&mfi->udev->dev);
pm_runtime_put_autosuspend(&mfi->udev->dev);
return ret;
diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c
index 225863321dc4..45cff32656c6 100644
--- a/drivers/usb/misc/chaoskey.c
+++ b/drivers/usb/misc/chaoskey.c
@@ -444,9 +444,19 @@ static ssize_t chaoskey_read(struct file *file,
goto bail;
mutex_unlock(&dev->rng_lock);
- result = mutex_lock_interruptible(&dev->lock);
- if (result)
- goto bail;
+ if (file->f_flags & O_NONBLOCK) {
+ result = mutex_trylock(&dev->lock);
+ if (result == 0) {
+ result = -EAGAIN;
+ goto bail;
+ } else {
+ result = 0;
+ }
+ } else {
+ result = mutex_lock_interruptible(&dev->lock);
+ if (result)
+ goto bail;
+ }
if (dev->valid == dev->used) {
result = _chaoskey_fill(dev);
if (result < 0) {
diff --git a/drivers/usb/misc/usb-ljca.c b/drivers/usb/misc/usb-ljca.c
index c562630d862c..9e65bb9577ea 100644
--- a/drivers/usb/misc/usb-ljca.c
+++ b/drivers/usb/misc/usb-ljca.c
@@ -164,28 +164,39 @@ struct ljca_match_ids_walk_data {
struct acpi_device *adev;
};
+/*
+ * ACPI hardware IDs for LJCA client devices.
+ *
+ * [1] Some BIOS implementations use these IDs for denoting LJCA client devices
+ * even though the IDs have been allocated for USBIO. This isn't a problem
+ * as the usb-ljca driver is probed based on the USB device's vendor and
+ * product IDs and its client drivers are probed based on auxiliary device
+ * names, not these ACPI _HIDs. List of such systems:
+ *
+ * Dell Precision 5490
+ */
static const struct acpi_device_id ljca_gpio_hids[] = {
- { "INTC1074" },
- { "INTC1096" },
- { "INTC100B" },
- { "INTC10D1" },
- { "INTC10B5" },
+ { "INTC100B" }, /* RPL LJCA GPIO */
+ { "INTC1074" }, /* CVF LJCA GPIO */
+ { "INTC1096" }, /* ADL LJCA GPIO */
+ { "INTC10B5" }, /* LNL LJCA GPIO */
+ { "INTC10D1" }, /* MTL (CVF VSC) USBIO GPIO [1] */
{},
};
static const struct acpi_device_id ljca_i2c_hids[] = {
- { "INTC1075" },
- { "INTC1097" },
- { "INTC100C" },
- { "INTC10D2" },
+ { "INTC100C" }, /* RPL LJCA I2C */
+ { "INTC1075" }, /* CVF LJCA I2C */
+ { "INTC1097" }, /* ADL LJCA I2C */
+ { "INTC10D2" }, /* MTL (CVF VSC) USBIO I2C [1] */
{},
};
static const struct acpi_device_id ljca_spi_hids[] = {
- { "INTC1091" },
- { "INTC1098" },
- { "INTC100D" },
- { "INTC10D3" },
+ { "INTC100D" }, /* RPL LJCA SPI */
+ { "INTC1091" }, /* TGL/ADL LJCA SPI */
+ { "INTC1098" }, /* ADL LJCA SPI */
+ { "INTC10D3" }, /* MTL (CVF VSC) USBIO SPI [1] */
{},
};
@@ -891,7 +902,7 @@ static struct usb_driver ljca_driver = {
};
module_usb_driver(ljca_driver);
-MODULE_AUTHOR("Wentong Wu <wentong.wu@intel.com>");
+MODULE_AUTHOR("Wentong Wu");
MODULE_AUTHOR("Zhifeng Wang <zhifeng.wang@intel.com>");
MODULE_AUTHOR("Lixu Zhang <lixu.zhang@intel.com>");
MODULE_DESCRIPTION("Intel La Jolla Cove Adapter USB driver");
diff --git a/drivers/usb/mtu3/mtu3.h b/drivers/usb/mtu3/mtu3.h
index c11840b9a6f1..ba5a63669e5f 100644
--- a/drivers/usb/mtu3/mtu3.h
+++ b/drivers/usb/mtu3/mtu3.h
@@ -65,7 +65,7 @@ struct mtu3_request;
#define MTU3_U3_IP_SLOT_DEFAULT 2
#define MTU3_U2_IP_SLOT_DEFAULT 1
-/**
+/*
* IP TRUNK version
* from 0x1003 version, USB3 Gen2 is supported, two changes affect driver:
* 1. MAXPKT and MULTI bits layout of TXCSR1 and RXCSR1 are adjusted,
@@ -74,9 +74,9 @@ struct mtu3_request;
*/
#define MTU3_TRUNK_VERS_1003 0x1003
-/**
+/*
* Normally the device works on HS or SS, to simplify fifo management,
- * devide fifo into some 512B parts, use bitmap to manage it; And
+ * divide fifo into some 512B parts, use bitmap to manage it; And
* 128 bits size of bitmap is large enough, that means it can manage
* up to 64KB fifo size.
* NOTE: MTU3_EP_FIFO_UNIT should be power of two
@@ -85,7 +85,7 @@ struct mtu3_request;
#define MTU3_FIFO_BIT_SIZE 128
#define MTU3_U2_IP_EP0_FIFO_SIZE 64
-/**
+/*
* Maximum size of ep0 response buffer for ch9 requests,
* the SET_SEL request uses 6 so far, and GET_STATUS is 2
*/
@@ -103,6 +103,7 @@ enum mtu3_speed {
};
/**
+ * enum mtu3_g_ep0_state - endpoint 0 states
* @MU3D_EP0_STATE_SETUP: waits for SETUP or received a SETUP
* without data stage.
* @MU3D_EP0_STATE_TX: IN data stage
@@ -121,11 +122,12 @@ enum mtu3_g_ep0_state {
};
/**
- * MTU3_DR_FORCE_NONE: automatically switch host and periperal mode
+ * enum mtu3_dr_force_mode - indicates host/OTG operating mode
+ * @MTU3_DR_FORCE_NONE: automatically switch host and peripheral mode
* by IDPIN signal.
- * MTU3_DR_FORCE_HOST: force to enter host mode and override OTG
+ * @MTU3_DR_FORCE_HOST: force to enter host mode and override OTG
* IDPIN signal.
- * MTU3_DR_FORCE_DEVICE: force to enter peripheral mode.
+ * @MTU3_DR_FORCE_DEVICE: force to enter peripheral mode.
*/
enum mtu3_dr_force_mode {
MTU3_DR_FORCE_NONE = 0,
@@ -134,6 +136,7 @@ enum mtu3_dr_force_mode {
};
/**
+ * struct mtu3_fifo_info - HW FIFO description and management data
* @base: the base address of fifo
* @limit: the bitmap size in bits
* @bitmap: fifo bitmap in unit of @MTU3_EP_FIFO_UNIT
@@ -145,7 +148,7 @@ struct mtu3_fifo_info {
};
/**
- * General Purpose Descriptor (GPD):
+ * struct qmu_gpd - General Purpose Descriptor (GPD):
* The format of TX GPD is a little different from RX one.
* And the size of GPD is 16 bytes.
*
@@ -179,11 +182,13 @@ struct qmu_gpd {
} __packed;
/**
-* dma: physical base address of GPD segment
-* start: virtual base address of GPD segment
-* end: the last GPD element
-* enqueue: the first empty GPD to use
-* dequeue: the first completed GPD serviced by ISR
+* struct mtu3_gpd_ring - GPD ring descriptor
+* @dma: physical base address of GPD segment
+* @start: virtual base address of GPD segment
+* @end: the last GPD element
+* @enqueue: the first empty GPD to use
+* @dequeue: the first completed GPD serviced by ISR
+*
* NOTE: the size of GPD ring should be >= 2
*/
struct mtu3_gpd_ring {
@@ -195,6 +200,7 @@ struct mtu3_gpd_ring {
};
/**
+* struct otg_switch_mtk - OTG/dual-role switch management
* @vbus: vbus 5V used by host mode
* @edev: external connector used to detect vbus and iddig changes
* @id_nb : notifier for iddig(idpin) detection
@@ -222,6 +228,7 @@ struct otg_switch_mtk {
};
/**
+ * struct ssusb_mtk - SuperSpeed USB descriptor (MTK)
* @mac_base: register base address of device MAC, exclude xHCI's
* @ippc_base: register base address of IP Power and Clock interface (IPPC)
* @vusb33: usb3.3V shared by device/host IP
@@ -268,6 +275,7 @@ struct ssusb_mtk {
};
/**
+ * struct mtu3_ep - common mtu3 endpoint description
* @fifo_size: it is (@slot + 1) * @fifo_seg_size
* @fifo_seg_size: it is roundup_pow_of_two(@maxp)
*/
diff --git a/drivers/usb/mtu3/mtu3_core.c b/drivers/usb/mtu3/mtu3_core.c
index a3a6282893d0..3a25ee18f144 100644
--- a/drivers/usb/mtu3/mtu3_core.c
+++ b/drivers/usb/mtu3/mtu3_core.c
@@ -290,7 +290,7 @@ static void mtu3_csr_init(struct mtu3 *mtu)
/* delay about 0.1us from detecting reset to send chirp-K */
mtu3_clrbits(mbase, U3D_LINK_RESET_INFO, WTCHRP_MSK);
- /* enable automatical HWRW from L1 */
+ /* enable automatic HWRW from L1 */
mtu3_setbits(mbase, U3D_POWER_MANAGEMENT, LPM_HRWE);
}
diff --git a/drivers/usb/mtu3/mtu3_plat.c b/drivers/usb/mtu3/mtu3_plat.c
index 7b5a431acb56..cc8a864dbd63 100644
--- a/drivers/usb/mtu3/mtu3_plat.c
+++ b/drivers/usb/mtu3/mtu3_plat.c
@@ -431,7 +431,6 @@ static int mtu3_probe(struct platform_device *pdev)
}
device_enable_async_suspend(dev);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
pm_runtime_forbid(dev);
diff --git a/drivers/usb/mtu3/mtu3_qmu.c b/drivers/usb/mtu3/mtu3_qmu.c
index 3d77408e3133..03f26589b056 100644
--- a/drivers/usb/mtu3/mtu3_qmu.c
+++ b/drivers/usb/mtu3/mtu3_qmu.c
@@ -221,7 +221,7 @@ static struct qmu_gpd *advance_deq_gpd(struct mtu3_gpd_ring *ring)
return ring->dequeue;
}
-/* check if a ring is emtpy */
+/* check if a ring is empty */
static bool gpd_ring_empty(struct mtu3_gpd_ring *ring)
{
struct qmu_gpd *enq = ring->enqueue;
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index c7234b236971..0acc62569ae5 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2031,7 +2031,6 @@ static void musb_pm_runtime_check_session(struct musb *musb)
if (!musb->session)
break;
trace_musb_state(musb, devctl, "Allow PM on possible host mode disconnect");
- pm_runtime_mark_last_busy(musb->controller);
pm_runtime_put_autosuspend(musb->controller);
musb->session = false;
return;
@@ -2063,7 +2062,6 @@ static void musb_pm_runtime_check_session(struct musb *musb)
msecs_to_jiffies(3000));
} else {
trace_musb_state(musb, devctl, "Allow PM with no session");
- pm_runtime_mark_last_busy(musb->controller);
pm_runtime_put_autosuspend(musb->controller);
}
@@ -2090,7 +2088,6 @@ static void musb_irq_work(struct work_struct *data)
sysfs_notify(&musb->controller->kobj, NULL, "mode");
}
- pm_runtime_mark_last_busy(musb->controller);
pm_runtime_put_autosuspend(musb->controller);
}
@@ -2564,7 +2561,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
musb_init_debugfs(musb);
musb->is_initialized = 1;
- pm_runtime_mark_last_busy(musb->controller);
pm_runtime_put_autosuspend(musb->controller);
return 0;
@@ -2887,7 +2883,6 @@ static int musb_resume(struct device *dev)
error);
spin_unlock_irqrestore(&musb->lock, flags);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
index 2d623284edf6..5092d62c2062 100644
--- a/drivers/usb/musb/musb_debugfs.c
+++ b/drivers/usb/musb/musb_debugfs.c
@@ -106,7 +106,6 @@ static int musb_regdump_show(struct seq_file *s, void *unused)
}
}
- pm_runtime_mark_last_busy(musb->controller);
pm_runtime_put_autosuspend(musb->controller);
return 0;
}
@@ -119,7 +118,6 @@ static int musb_test_mode_show(struct seq_file *s, void *unused)
pm_runtime_get_sync(musb->controller);
test = musb_readb(musb->mregs, MUSB_TESTMODE);
- pm_runtime_mark_last_busy(musb->controller);
pm_runtime_put_autosuspend(musb->controller);
if (test == (MUSB_TEST_FORCE_HOST | MUSB_TEST_FORCE_FS))
@@ -216,7 +214,6 @@ static ssize_t musb_test_mode_write(struct file *file,
musb_writeb(musb->mregs, MUSB_TESTMODE, test);
ret:
- pm_runtime_mark_last_busy(musb->controller);
pm_runtime_put_autosuspend(musb->controller);
return count;
}
@@ -243,7 +240,6 @@ static int musb_softconnect_show(struct seq_file *s, void *unused)
reg = musb_readb(musb->mregs, MUSB_DEVCTL);
connect = reg & MUSB_DEVCTL_SESSION ? 1 : 0;
- pm_runtime_mark_last_busy(musb->controller);
pm_runtime_put_autosuspend(musb->controller);
break;
default:
@@ -304,7 +300,6 @@ static ssize_t musb_softconnect_write(struct file *file,
}
}
- pm_runtime_mark_last_busy(musb->controller);
pm_runtime_put_autosuspend(musb->controller);
return count;
}
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index a08ce96c08d3..e3935f18dd56 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -296,7 +296,6 @@ static void otg_timer(struct timer_list *t)
if (err < 0)
dev_err(dev, "%s resume work: %i\n", __func__, err);
spin_unlock_irqrestore(&musb->lock, flags);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
}
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index caf4d4cd4b75..d666c4292753 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1258,7 +1258,6 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
unlock:
spin_unlock_irqrestore(&musb->lock, lockflags);
- pm_runtime_mark_last_busy(musb->controller);
pm_runtime_put_autosuspend(musb->controller);
return status;
@@ -1642,7 +1641,6 @@ static void musb_gadget_work(struct work_struct *work)
spin_lock_irqsave(&musb->lock, flags);
musb_pullup(musb, musb->softconnect);
spin_unlock_irqrestore(&musb->lock, flags);
- pm_runtime_mark_last_busy(musb->controller);
pm_runtime_put_autosuspend(musb->controller);
}
@@ -1862,7 +1860,6 @@ static int musb_gadget_start(struct usb_gadget *g,
if (musb->xceiv && musb->xceiv->last_event == USB_EVENT_ID)
musb_platform_set_vbus(musb, 1);
- pm_runtime_mark_last_busy(musb->controller);
pm_runtime_put_autosuspend(musb->controller);
return 0;
@@ -1916,7 +1913,6 @@ static int musb_gadget_stop(struct usb_gadget *g)
usb_gadget_set_state(g, USB_STATE_NOTATTACHED);
/* Force check of devctl register for PM runtime */
- pm_runtime_mark_last_busy(musb->controller);
pm_runtime_put_autosuspend(musb->controller);
return 0;
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index c35c07b7488c..48bb9bfb2204 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -151,7 +151,6 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
default:
dev_dbg(musb->controller, "ID float\n");
}
- pm_runtime_mark_last_busy(musb->controller);
pm_runtime_put_autosuspend(musb->controller);
atomic_notifier_call_chain(&musb->xceiv->notifier,
musb->xceiv->last_event, NULL);
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index 8f536f2c500f..8df68261a320 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -827,10 +827,26 @@ static void usbhs_remove(struct platform_device *pdev)
usbhs_pipe_remove(priv);
}
+static void usbhsc_restore(struct device *dev)
+{
+ struct usbhs_priv *priv = dev_get_drvdata(dev);
+ struct platform_device *pdev = usbhs_priv_to_pdev(priv);
+
+ if (!usbhs_get_dparam(priv, runtime_pwctrl)) {
+ usbhsc_power_ctrl(priv, 1);
+ usbhs_mod_autonomy_mode(priv);
+ }
+
+ usbhs_platform_call(priv, phy_reset, pdev);
+
+ usbhsc_schedule_notify_hotplug(pdev);
+}
+
static int usbhsc_suspend(struct device *dev)
{
struct usbhs_priv *priv = dev_get_drvdata(dev);
struct usbhs_mod *mod = usbhs_mod_get_current(priv);
+ int ret;
if (mod) {
usbhs_mod_call(priv, stop, priv);
@@ -840,22 +856,23 @@ static int usbhsc_suspend(struct device *dev)
if (mod || !usbhs_get_dparam(priv, runtime_pwctrl))
usbhsc_power_ctrl(priv, 0);
- return 0;
+ ret = reset_control_assert(priv->rsts);
+ if (ret)
+ usbhsc_restore(dev);
+
+ return ret;
}
static int usbhsc_resume(struct device *dev)
{
struct usbhs_priv *priv = dev_get_drvdata(dev);
- struct platform_device *pdev = usbhs_priv_to_pdev(priv);
-
- if (!usbhs_get_dparam(priv, runtime_pwctrl)) {
- usbhsc_power_ctrl(priv, 1);
- usbhs_mod_autonomy_mode(priv);
- }
+ int ret;
- usbhs_platform_call(priv, phy_reset, pdev);
+ ret = reset_control_deassert(priv->rsts);
+ if (ret)
+ return ret;
- usbhsc_schedule_notify_hotplug(pdev);
+ usbhsc_restore(dev);
return 0;
}
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 62e984d20e59..5de856f65f0d 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -273,6 +273,7 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_EM05CN 0x0312
#define QUECTEL_PRODUCT_EM05G_GR 0x0313
#define QUECTEL_PRODUCT_EM05G_RS 0x0314
+#define QUECTEL_PRODUCT_RG255C 0x0316
#define QUECTEL_PRODUCT_EM12 0x0512
#define QUECTEL_PRODUCT_RM500Q 0x0800
#define QUECTEL_PRODUCT_RM520N 0x0801
@@ -617,6 +618,7 @@ static void option_instat_callback(struct urb *urb);
#define UNISOC_VENDOR_ID 0x1782
/* TOZED LT70-C based on UNISOC SL8563 uses UNISOC's vendor ID */
#define TOZED_PRODUCT_LT70C 0x4055
+#define UNISOC_PRODUCT_UIS7720 0x4064
/* Luat Air72*U series based on UNISOC UIS8910 uses UNISOC's vendor ID */
#define LUAT_PRODUCT_AIR720U 0x4e00
@@ -1270,6 +1272,9 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RG650V, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RG650V, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RG255C, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RG255C, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RG255C, 0xff, 0xff, 0x40) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
@@ -1398,10 +1403,14 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(0) | NCTRL(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a2, 0xff), /* Telit FN920C04 (MBIM) */
.driver_info = NCTRL(4) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a3, 0xff), /* Telit FN920C04 (ECM) */
+ .driver_info = NCTRL(4) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a4, 0xff), /* Telit FN20C04 (rmnet) */
.driver_info = RSVD(0) | NCTRL(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a7, 0xff), /* Telit FN920C04 (MBIM) */
.driver_info = NCTRL(4) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a8, 0xff), /* Telit FN920C04 (ECM) */
+ .driver_info = NCTRL(4) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a9, 0xff), /* Telit FN20C04 (rmnet) */
.driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10aa, 0xff), /* Telit FN920C04 (MBIM) */
@@ -2466,6 +2475,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9291, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9291, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, UNISOC_PRODUCT_UIS7720, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) },
{ USB_DEVICE_INTERFACE_CLASS(0x1bbb, 0x0530, 0xff), /* TCL IK512 MBIM */
.driver_info = NCTRL(1) },
diff --git a/drivers/usb/storage/protocol.c b/drivers/usb/storage/protocol.c
index 9033e505db7f..0cff54ad90fa 100644
--- a/drivers/usb/storage/protocol.c
+++ b/drivers/usb/storage/protocol.c
@@ -139,8 +139,7 @@ unsigned int usb_stor_access_xfer_buf(unsigned char *buffer,
return cnt;
while (sg_miter_next(&miter) && cnt < buflen) {
- unsigned int len = min_t(unsigned int, miter.length,
- buflen - cnt);
+ unsigned int len = min(miter.length, buflen - cnt);
if (dir == FROM_XFER_BUF)
memcpy(buffer + cnt, miter.addr, len);
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 4ed0dc19afe0..8f24b81d6c12 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -309,18 +309,18 @@ static void uas_stat_cmplt(struct urb *urb)
int status = urb->status;
bool success;
- spin_lock_irqsave(&devinfo->lock, flags);
-
- if (devinfo->resetting)
- goto out;
-
if (status) {
if (status != -ENOENT && status != -ECONNRESET && status != -ESHUTDOWN)
dev_err(&urb->dev->dev, "stat urb: status %d\n", status);
- goto out;
+ goto bail;
}
idx = be16_to_cpup(&iu->tag) - 1;
+
+ spin_lock_irqsave(&devinfo->lock, flags);
+
+ if (devinfo->resetting)
+ goto out;
if (idx >= MAX_CMNDS || !devinfo->cmnd[idx]) {
dev_err(&urb->dev->dev,
"stat urb: no pending cmd for uas-tag %d\n", idx + 1);
@@ -375,9 +375,8 @@ static void uas_stat_cmplt(struct urb *urb)
default:
uas_log_cmd_state(cmnd, "bogus IU", iu->iu_id);
}
-out:
- usb_free_urb(urb);
spin_unlock_irqrestore(&devinfo->lock, flags);
+ usb_free_urb(urb);
/* Unlinking of data urbs must be done without holding the lock */
if (data_in_urb) {
@@ -388,6 +387,12 @@ out:
usb_unlink_urb(data_out_urb);
usb_put_urb(data_out_urb);
}
+ return;
+
+out:
+ spin_unlock_irqrestore(&devinfo->lock, flags);
+bail:
+ usb_free_urb(urb);
}
static void uas_data_cmplt(struct urb *urb)
@@ -429,8 +434,8 @@ static void uas_data_cmplt(struct urb *urb)
}
uas_try_complete(cmnd, __func__);
out:
- usb_free_urb(urb);
spin_unlock_irqrestore(&devinfo->lock, flags);
+ usb_free_urb(urb);
}
static void uas_cmd_cmplt(struct urb *urb)
@@ -1265,7 +1270,7 @@ static int __init uas_init(void)
{
int rv;
- workqueue = alloc_workqueue("uas", WQ_MEM_RECLAIM, 0);
+ workqueue = alloc_workqueue("uas", WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!workqueue)
return -ENOMEM;
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index 1477e31d7763..b695f5ba9a40 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -98,7 +98,7 @@ UNUSUAL_DEV(0x125f, 0xa94a, 0x0160, 0x0160,
US_FL_NO_ATA_1X),
/* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */
-UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999,
+UNUSUAL_DEV(0x13fd, 0x3940, 0x0309, 0x0309,
"Initio Corporation",
"INIC-3069",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
index 1dcb77faf85d..8d111ad3b71b 100644
--- a/drivers/usb/typec/altmodes/displayport.c
+++ b/drivers/usb/typec/altmodes/displayport.c
@@ -758,7 +758,9 @@ int dp_altmode_probe(struct typec_altmode *alt)
struct fwnode_handle *fwnode;
struct dp_altmode *dp;
- /* FIXME: Port can only be DFP_U. */
+ /* Port can only be DFP_U. */
+ if (typec_altmode_get_data_role(alt) != TYPEC_HOST)
+ return -EPROTO;
/* Make sure we have compatible pin configurations */
if (!(DP_CAP_PIN_ASSIGN_DFP_D(port->vdo) &
diff --git a/drivers/usb/typec/anx7411.c b/drivers/usb/typec/anx7411.c
index 0ae0a5ee3fae..2e8ae1d2faf9 100644
--- a/drivers/usb/typec/anx7411.c
+++ b/drivers/usb/typec/anx7411.c
@@ -1516,8 +1516,7 @@ static int anx7411_i2c_probe(struct i2c_client *client)
INIT_WORK(&plat->work, anx7411_work_func);
plat->workqueue = alloc_workqueue("anx7411_work",
- WQ_FREEZABLE |
- WQ_MEM_RECLAIM,
+ WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
1);
if (!plat->workqueue) {
dev_err(dev, "fail to create work queue\n");
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index 67a533e35150..9b2647cb199b 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -2121,6 +2121,19 @@ void typec_set_data_role(struct typec_port *port, enum typec_data_role role)
EXPORT_SYMBOL_GPL(typec_set_data_role);
/**
+ * typec_get_data_role - Get port data role
+ * @port: The USB Type-C Port to query
+ *
+ * This routine is used by the altmode drivers to determine if the port is the
+ * DFP before issuing Enter Mode
+ */
+enum typec_data_role typec_get_data_role(struct typec_port *port)
+{
+ return port->data_role;
+}
+EXPORT_SYMBOL_GPL(typec_get_data_role);
+
+/**
* typec_set_pwr_role - Report power role change
* @port: The USB Type-C Port where the role was changed
* @role: The new data role
diff --git a/drivers/usb/typec/hd3ss3220.c b/drivers/usb/typec/hd3ss3220.c
index 3ecc688dda82..3876f4faead6 100644
--- a/drivers/usb/typec/hd3ss3220.c
+++ b/drivers/usb/typec/hd3ss3220.c
@@ -15,6 +15,9 @@
#include <linux/usb/typec.h>
#include <linux/delay.h>
#include <linux/workqueue.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+#include <linux/of_graph.h>
#define HD3SS3220_REG_CN_STAT 0x08
#define HD3SS3220_REG_CN_STAT_CTRL 0x09
@@ -54,6 +57,11 @@ struct hd3ss3220 {
struct delayed_work output_poll_work;
enum usb_role role_state;
bool poll;
+
+ struct gpio_desc *id_gpiod;
+ int id_irq;
+
+ struct regulator *vbus;
};
static int hd3ss3220_set_power_opmode(struct hd3ss3220 *hd3ss3220, int power_opmode)
@@ -319,13 +327,33 @@ static const struct regmap_config config = {
.max_register = 0x0A,
};
+static irqreturn_t hd3ss3220_id_isr(int irq, void *dev_id)
+{
+ struct hd3ss3220 *hd3ss3220 = dev_id;
+ int ret;
+ int id;
+
+ id = gpiod_get_value_cansleep(hd3ss3220->id_gpiod);
+ if (!id)
+ ret = regulator_enable(hd3ss3220->vbus);
+ else
+ ret = regulator_disable(hd3ss3220->vbus);
+
+ if (ret)
+ dev_err(hd3ss3220->dev,
+ "vbus regulator %s failed: %d\n", id ? "disable" : "enable", ret);
+
+ return IRQ_HANDLED;
+}
+
static int hd3ss3220_probe(struct i2c_client *client)
{
struct typec_capability typec_cap = { };
- struct hd3ss3220 *hd3ss3220;
struct fwnode_handle *connector, *ep;
- int ret;
+ struct hd3ss3220 *hd3ss3220;
+ struct regulator *vbus;
unsigned int data;
+ int ret;
hd3ss3220 = devm_kzalloc(&client->dev, sizeof(struct hd3ss3220),
GFP_KERNEL);
@@ -359,6 +387,49 @@ static int hd3ss3220_probe(struct i2c_client *client)
goto err_put_fwnode;
}
+ vbus = devm_of_regulator_get_optional(hd3ss3220->dev,
+ to_of_node(connector),
+ "vbus");
+ if (IS_ERR(vbus) && vbus != ERR_PTR(-ENODEV)) {
+ ret = PTR_ERR(vbus);
+ dev_err(hd3ss3220->dev, "failed to get vbus: %d", ret);
+ goto err_put_fwnode;
+ }
+
+ hd3ss3220->vbus = (vbus == ERR_PTR(-ENODEV) ? NULL : vbus);
+
+ if (hd3ss3220->vbus) {
+ hd3ss3220->id_gpiod = devm_gpiod_get_optional(hd3ss3220->dev,
+ "id",
+ GPIOD_IN);
+ if (IS_ERR(hd3ss3220->id_gpiod)) {
+ ret = PTR_ERR(hd3ss3220->id_gpiod);
+ goto err_put_fwnode;
+ }
+ }
+
+ if (hd3ss3220->id_gpiod) {
+ hd3ss3220->id_irq = gpiod_to_irq(hd3ss3220->id_gpiod);
+ if (hd3ss3220->id_irq < 0) {
+ ret = hd3ss3220->id_irq;
+ dev_err(hd3ss3220->dev,
+ "failed to get ID gpio: %d\n",
+ hd3ss3220->id_irq);
+ goto err_put_fwnode;
+ }
+
+ ret = devm_request_threaded_irq(hd3ss3220->dev,
+ hd3ss3220->id_irq, NULL,
+ hd3ss3220_id_isr,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ dev_name(hd3ss3220->dev), hd3ss3220);
+ if (ret < 0) {
+ dev_err(hd3ss3220->dev, "failed to get ID irq: %d\n", ret);
+ goto err_put_fwnode;
+ }
+ }
+
typec_cap.prefer_role = TYPEC_NO_PREFERRED_ROLE;
typec_cap.driver_data = hd3ss3220;
typec_cap.type = TYPEC_PORT_DRP;
diff --git a/drivers/usb/typec/mux/ps883x.c b/drivers/usb/typec/mux/ps883x.c
index ad59babf7cce..5f2879749769 100644
--- a/drivers/usb/typec/mux/ps883x.c
+++ b/drivers/usb/typec/mux/ps883x.c
@@ -14,15 +14,18 @@
#include <linux/mutex.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
+#include <linux/usb/pd.h>
#include <linux/usb/typec_altmode.h>
#include <linux/usb/typec_dp.h>
#include <linux/usb/typec_mux.h>
#include <linux/usb/typec_retimer.h>
+#include <linux/usb/typec_tbt.h>
#define REG_USB_PORT_CONN_STATUS_0 0x00
#define CONN_STATUS_0_CONNECTION_PRESENT BIT(0)
#define CONN_STATUS_0_ORIENTATION_REVERSED BIT(1)
+#define CONN_STATUS_0_ACTIVE_CABLE BIT(2)
#define CONN_STATUS_0_USB_3_1_CONNECTED BIT(5)
#define REG_USB_PORT_CONN_STATUS_1 0x01
@@ -34,6 +37,10 @@
#define REG_USB_PORT_CONN_STATUS_2 0x02
+#define CONN_STATUS_2_TBT_CONNECTED BIT(0)
+#define CONN_STATUS_2_TBT_UNIDIR_LSRX_ACT_LT BIT(4)
+#define CONN_STATUS_2_USB4_CONNECTED BIT(7)
+
struct ps883x_retimer {
struct i2c_client *client;
struct gpio_desc *reset_gpio;
@@ -54,8 +61,9 @@ struct ps883x_retimer {
struct mutex lock; /* protect non-concurrent retimer & switch */
enum typec_orientation orientation;
- unsigned long mode;
- unsigned int svid;
+ u8 cfg0;
+ u8 cfg1;
+ u8 cfg2;
};
static int ps883x_configure(struct ps883x_retimer *retimer, int cfg0,
@@ -64,6 +72,9 @@ static int ps883x_configure(struct ps883x_retimer *retimer, int cfg0,
struct device *dev = &retimer->client->dev;
int ret;
+ if (retimer->cfg0 == cfg0 && retimer->cfg1 == cfg1 && retimer->cfg2 == cfg2)
+ return 0;
+
ret = regmap_write(retimer->regmap, REG_USB_PORT_CONN_STATUS_0, cfg0);
if (ret) {
dev_err(dev, "failed to write conn_status_0: %d\n", ret);
@@ -82,53 +93,82 @@ static int ps883x_configure(struct ps883x_retimer *retimer, int cfg0,
return ret;
}
+ retimer->cfg0 = cfg0;
+ retimer->cfg1 = cfg1;
+ retimer->cfg2 = cfg2;
+
return 0;
}
-static int ps883x_set(struct ps883x_retimer *retimer)
+static int ps883x_set(struct ps883x_retimer *retimer, struct typec_retimer_state *state)
{
+ struct typec_thunderbolt_data *tb_data;
+ const struct enter_usb_data *eudo_data;
int cfg0 = CONN_STATUS_0_CONNECTION_PRESENT;
int cfg1 = 0x00;
int cfg2 = 0x00;
- if (retimer->orientation == TYPEC_ORIENTATION_NONE ||
- retimer->mode == TYPEC_STATE_SAFE) {
- return ps883x_configure(retimer, cfg0, cfg1, cfg2);
- }
-
- if (retimer->mode != TYPEC_STATE_USB && retimer->svid != USB_TYPEC_DP_SID)
- return -EINVAL;
-
if (retimer->orientation == TYPEC_ORIENTATION_REVERSE)
cfg0 |= CONN_STATUS_0_ORIENTATION_REVERSED;
- switch (retimer->mode) {
- case TYPEC_STATE_USB:
- cfg0 |= CONN_STATUS_0_USB_3_1_CONNECTED;
- break;
-
- case TYPEC_DP_STATE_C:
- cfg1 = CONN_STATUS_1_DP_CONNECTED |
- CONN_STATUS_1_DP_SINK_REQUESTED |
- CONN_STATUS_1_DP_PIN_ASSIGNMENT_C_D |
- CONN_STATUS_1_DP_HPD_LEVEL;
- break;
-
- case TYPEC_DP_STATE_D:
- cfg0 |= CONN_STATUS_0_USB_3_1_CONNECTED;
- cfg1 = CONN_STATUS_1_DP_CONNECTED |
- CONN_STATUS_1_DP_SINK_REQUESTED |
- CONN_STATUS_1_DP_PIN_ASSIGNMENT_C_D |
- CONN_STATUS_1_DP_HPD_LEVEL;
- break;
-
- case TYPEC_DP_STATE_E:
- cfg1 = CONN_STATUS_1_DP_CONNECTED |
- CONN_STATUS_1_DP_HPD_LEVEL;
- break;
-
- default:
- return -EOPNOTSUPP;
+ if (state->alt) {
+ switch (state->alt->svid) {
+ case USB_TYPEC_DP_SID:
+ cfg1 |= CONN_STATUS_1_DP_CONNECTED |
+ CONN_STATUS_1_DP_HPD_LEVEL;
+
+ switch (state->mode) {
+ case TYPEC_DP_STATE_C:
+ cfg1 |= CONN_STATUS_1_DP_SINK_REQUESTED |
+ CONN_STATUS_1_DP_PIN_ASSIGNMENT_C_D;
+ fallthrough;
+ case TYPEC_DP_STATE_D:
+ cfg1 |= CONN_STATUS_0_USB_3_1_CONNECTED;
+ break;
+ default: /* MODE_E */
+ break;
+ }
+ break;
+ case USB_TYPEC_TBT_SID:
+ tb_data = state->data;
+
+ /* Unconditional */
+ cfg2 |= CONN_STATUS_2_TBT_CONNECTED;
+
+ if (tb_data->cable_mode & TBT_CABLE_ACTIVE_PASSIVE)
+ cfg0 |= CONN_STATUS_0_ACTIVE_CABLE;
+
+ if (tb_data->enter_vdo & TBT_ENTER_MODE_UNI_DIR_LSRX)
+ cfg2 |= CONN_STATUS_2_TBT_UNIDIR_LSRX_ACT_LT;
+ break;
+ default:
+ dev_err(&retimer->client->dev, "Got unsupported SID: 0x%x\n",
+ state->alt->svid);
+ return -EOPNOTSUPP;
+ }
+ } else {
+ switch (state->mode) {
+ case TYPEC_STATE_SAFE:
+ /* USB2 pins don't even go through this chip */
+ case TYPEC_MODE_USB2:
+ break;
+ case TYPEC_STATE_USB:
+ case TYPEC_MODE_USB3:
+ cfg0 |= CONN_STATUS_0_USB_3_1_CONNECTED;
+ break;
+ case TYPEC_MODE_USB4:
+ eudo_data = state->data;
+
+ cfg2 |= CONN_STATUS_2_USB4_CONNECTED;
+
+ if (FIELD_GET(EUDO_CABLE_TYPE_MASK, eudo_data->eudo) != EUDO_CABLE_TYPE_PASSIVE)
+ cfg0 |= CONN_STATUS_0_ACTIVE_CABLE;
+ break;
+ default:
+ dev_err(&retimer->client->dev, "Got unsupported mode: %lu\n",
+ state->mode);
+ return -EOPNOTSUPP;
+ }
}
return ps883x_configure(retimer, cfg0, cfg1, cfg2);
@@ -149,7 +189,11 @@ static int ps883x_sw_set(struct typec_switch_dev *sw,
if (retimer->orientation != orientation) {
retimer->orientation = orientation;
- ret = ps883x_set(retimer);
+ ret = regmap_assign_bits(retimer->regmap, REG_USB_PORT_CONN_STATUS_0,
+ CONN_STATUS_0_ORIENTATION_REVERSED,
+ orientation == TYPEC_ORIENTATION_REVERSE);
+ if (ret)
+ dev_err(&retimer->client->dev, "failed to set orientation: %d\n", ret);
}
mutex_unlock(&retimer->lock);
@@ -165,18 +209,7 @@ static int ps883x_retimer_set(struct typec_retimer *rtmr,
int ret = 0;
mutex_lock(&retimer->lock);
-
- if (state->mode != retimer->mode) {
- retimer->mode = state->mode;
-
- if (state->alt)
- retimer->svid = state->alt->svid;
- else
- retimer->svid = 0;
-
- ret = ps883x_set(retimer);
- }
-
+ ret = ps883x_set(retimer, state);
mutex_unlock(&retimer->lock);
if (ret)
diff --git a/drivers/usb/typec/pd.c b/drivers/usb/typec/pd.c
index d78c04a421bc..67f20b5ffdf4 100644
--- a/drivers/usb/typec/pd.c
+++ b/drivers/usb/typec/pd.c
@@ -360,6 +360,84 @@ static const struct device_type sink_pps_type = {
};
/* -------------------------------------------------------------------------- */
+/* Standard Power Range (SPR) Adjustable Voltage Supply (AVS) */
+
+static ssize_t
+spr_avs_9v_to_15v_max_current_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%umA\n",
+ pdo_spr_avs_apdo_9v_to_15v_max_current_ma(to_pdo(dev)->pdo));
+}
+
+static ssize_t
+spr_avs_15v_to_20v_max_current_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%umA\n",
+ pdo_spr_avs_apdo_15v_to_20v_max_current_ma(to_pdo(dev)->pdo));
+}
+
+static ssize_t
+spr_avs_src_peak_current_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%u\n",
+ pdo_spr_avs_apdo_src_peak_current(to_pdo(dev)->pdo));
+}
+
+static struct device_attribute spr_avs_9v_to_15v_max_current_attr = {
+ .attr = {
+ .name = "maximum_current_9V_to_15V",
+ .mode = 0444,
+ },
+ .show = spr_avs_9v_to_15v_max_current_show,
+};
+
+static struct device_attribute spr_avs_15v_to_20v_max_current_attr = {
+ .attr = {
+ .name = "maximum_current_15V_to_20V",
+ .mode = 0444,
+ },
+ .show = spr_avs_15v_to_20v_max_current_show,
+};
+
+static struct device_attribute spr_avs_src_peak_current_attr = {
+ .attr = {
+ .name = "peak_current",
+ .mode = 0444,
+ },
+ .show = spr_avs_src_peak_current_show,
+};
+
+static struct attribute *source_spr_avs_attrs[] = {
+ &spr_avs_9v_to_15v_max_current_attr.attr,
+ &spr_avs_15v_to_20v_max_current_attr.attr,
+ &spr_avs_src_peak_current_attr.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(source_spr_avs);
+
+static const struct device_type source_spr_avs_type = {
+ .name = "pdo",
+ .release = pdo_release,
+ .groups = source_spr_avs_groups,
+};
+
+static struct attribute *sink_spr_avs_attrs[] = {
+ &spr_avs_9v_to_15v_max_current_attr.attr,
+ &spr_avs_15v_to_20v_max_current_attr.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(sink_spr_avs);
+
+static const struct device_type sink_spr_avs_type = {
+ .name = "pdo",
+ .release = pdo_release,
+ .groups = sink_spr_avs_groups,
+};
+
+/* -------------------------------------------------------------------------- */
static const char * const supply_name[] = {
[PDO_TYPE_FIXED] = "fixed_supply",
@@ -368,7 +446,8 @@ static const char * const supply_name[] = {
};
static const char * const apdo_supply_name[] = {
- [APDO_TYPE_PPS] = "programmable_supply",
+ [APDO_TYPE_PPS] = "programmable_supply",
+ [APDO_TYPE_SPR_AVS] = "spr_adjustable_voltage_supply",
};
static const struct device_type *source_type[] = {
@@ -378,7 +457,8 @@ static const struct device_type *source_type[] = {
};
static const struct device_type *source_apdo_type[] = {
- [APDO_TYPE_PPS] = &source_pps_type,
+ [APDO_TYPE_PPS] = &source_pps_type,
+ [APDO_TYPE_SPR_AVS] = &source_spr_avs_type,
};
static const struct device_type *sink_type[] = {
@@ -388,7 +468,8 @@ static const struct device_type *sink_type[] = {
};
static const struct device_type *sink_apdo_type[] = {
- [APDO_TYPE_PPS] = &sink_pps_type,
+ [APDO_TYPE_PPS] = &sink_pps_type,
+ [APDO_TYPE_SPR_AVS] = &sink_spr_avs_type,
};
/* REVISIT: Export when EPR_*_Capabilities need to be supported. */
@@ -407,8 +488,12 @@ static int add_pdo(struct usb_power_delivery_capabilities *cap, u32 pdo, int pos
p->object_position = position;
if (pdo_type(pdo) == PDO_TYPE_APDO) {
- /* FIXME: Only PPS supported for now! Skipping others. */
- if (pdo_apdo_type(pdo) > APDO_TYPE_PPS) {
+ /*
+ * FIXME: Only PPS, SPR_AVS supported for now!
+ * Skipping others.
+ */
+ if (pdo_apdo_type(pdo) != APDO_TYPE_PPS &&
+ pdo_apdo_type(pdo) != APDO_TYPE_SPR_AVS) {
dev_warn(&cap->dev, "Unknown APDO type. PDO 0x%08x\n", pdo);
kfree(p);
return 0;
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index b2a568a5bc9b..4ca2746ce16b 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -823,10 +823,23 @@ static void tcpm_log_source_caps(struct tcpm_port *port)
case PDO_TYPE_APDO:
if (pdo_apdo_type(pdo) == APDO_TYPE_PPS)
scnprintf(msg, sizeof(msg),
- "%u-%u mV, %u mA",
+ "PPS %u-%u mV, %u mA",
pdo_pps_apdo_min_voltage(pdo),
pdo_pps_apdo_max_voltage(pdo),
pdo_pps_apdo_max_current(pdo));
+ else if (pdo_apdo_type(pdo) == APDO_TYPE_EPR_AVS)
+ scnprintf(msg, sizeof(msg),
+ "EPR AVS %u-%u mV %u W peak_current: %u",
+ pdo_epr_avs_apdo_min_voltage_mv(pdo),
+ pdo_epr_avs_apdo_max_voltage_mv(pdo),
+ pdo_epr_avs_apdo_pdp_w(pdo),
+ pdo_epr_avs_apdo_src_peak_current(pdo));
+ else if (pdo_apdo_type(pdo) == APDO_TYPE_SPR_AVS)
+ scnprintf(msg, sizeof(msg),
+ "SPR AVS 9-15 V: %u mA 15-20 V: %u mA peak_current: %u",
+ pdo_spr_avs_apdo_9v_to_15v_max_current_ma(pdo),
+ pdo_spr_avs_apdo_15v_to_20v_max_current_ma(pdo),
+ pdo_spr_avs_apdo_src_peak_current(pdo));
else
strcpy(msg, "undefined APDO");
break;
@@ -7876,9 +7889,9 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
port->partner_desc.identity = &port->partner_ident;
- port->role_sw = usb_role_switch_get(port->dev);
+ port->role_sw = fwnode_usb_role_switch_get(tcpc->fwnode);
if (!port->role_sw)
- port->role_sw = fwnode_usb_role_switch_get(tcpc->fwnode);
+ port->role_sw = usb_role_switch_get(port->dev);
if (IS_ERR(port->role_sw)) {
err = PTR_ERR(port->role_sw);
goto out_destroy_wq;
diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c
index 2b1049c9a6f3..e2b26af2b84a 100644
--- a/drivers/usb/typec/tipd/core.c
+++ b/drivers/usb/typec/tipd/core.c
@@ -577,30 +577,36 @@ static bool cd321x_read_data_status(struct tps6598x *tps)
int ret;
ret = tps6598x_read_data_status(tps);
- if (ret < 0)
+ if (!ret)
return false;
if (tps->data_status & TPS_DATA_STATUS_DP_CONNECTION) {
ret = tps6598x_block_read(tps, TPS_REG_DP_SID_STATUS,
&cd321x->dp_sid_status, sizeof(cd321x->dp_sid_status));
- if (ret)
+ if (ret) {
dev_err(tps->dev, "Failed to read DP SID Status: %d\n",
ret);
+ return false;
+ }
}
if (tps->data_status & TPS_DATA_STATUS_TBT_CONNECTION) {
ret = tps6598x_block_read(tps, TPS_REG_INTEL_VID_STATUS,
&cd321x->intel_vid_status, sizeof(cd321x->intel_vid_status));
- if (ret)
+ if (ret) {
dev_err(tps->dev, "Failed to read Intel VID Status: %d\n", ret);
+ return false;
+ }
}
if (tps->data_status & CD321X_DATA_STATUS_USB4_CONNECTION) {
ret = tps6598x_block_read(tps, TPS_REG_USB4_STATUS,
&cd321x->usb4_status, sizeof(cd321x->usb4_status));
- if (ret)
+ if (ret) {
dev_err(tps->dev,
"Failed to read USB4 Status: %d\n", ret);
+ return false;
+ }
}
return true;
@@ -1695,6 +1701,7 @@ tps25750_register_port(struct tps6598x *tps, struct fwnode_handle *fwnode)
typec_cap.data = ret;
typec_cap.revision = USB_TYPEC_REV_1_3;
typec_cap.pd_revision = 0x300;
+ typec_cap.orientation_aware = true;
typec_cap.driver_data = tps;
typec_cap.ops = &tps6598x_ops;
typec_cap.fwnode = fwnode;
diff --git a/drivers/usb/typec/ucsi/cros_ec_ucsi.c b/drivers/usb/typec/ucsi/cros_ec_ucsi.c
index eed2a7d0ebc6..d753f2188e25 100644
--- a/drivers/usb/typec/ucsi/cros_ec_ucsi.c
+++ b/drivers/usb/typec/ucsi/cros_ec_ucsi.c
@@ -105,13 +105,12 @@ static int cros_ucsi_async_control(struct ucsi *ucsi, u64 cmd)
return 0;
}
-static int cros_ucsi_sync_control(struct ucsi *ucsi, u64 cmd, u32 *cci,
- void *data, size_t size)
+static int cros_ucsi_sync_control(struct ucsi *ucsi, u64 cmd, u32 *cci)
{
struct cros_ucsi_data *udata = ucsi_get_drvdata(ucsi);
int ret;
- ret = ucsi_sync_control_common(ucsi, cmd, cci, data, size);
+ ret = ucsi_sync_control_common(ucsi, cmd, cci);
switch (ret) {
case -EBUSY:
/* EC may return -EBUSY if CCI.busy is set.
diff --git a/drivers/usb/typec/ucsi/debugfs.c b/drivers/usb/typec/ucsi/debugfs.c
index f73f2b54554e..174f4d53b777 100644
--- a/drivers/usb/typec/ucsi/debugfs.c
+++ b/drivers/usb/typec/ucsi/debugfs.c
@@ -35,8 +35,11 @@ static int ucsi_cmd(void *data, u64 val)
case UCSI_SET_SINK_PATH:
case UCSI_SET_NEW_CAM:
case UCSI_SET_USB:
+ case UCSI_SET_POWER_LEVEL:
case UCSI_READ_POWER_LEVEL:
- ret = ucsi_send_command(ucsi, val, NULL, 0);
+ case UCSI_SET_PDOS:
+ ucsi->message_in_size = 0;
+ ret = ucsi_send_command(ucsi, val);
break;
case UCSI_GET_CAPABILITY:
case UCSI_GET_CONNECTOR_CAPABILITY:
@@ -51,9 +54,9 @@ static int ucsi_cmd(void *data, u64 val)
case UCSI_GET_ATTENTION_VDO:
case UCSI_GET_CAM_CS:
case UCSI_GET_LPM_PPM_INFO:
- ret = ucsi_send_command(ucsi, val,
- &ucsi->debugfs->response,
- sizeof(ucsi->debugfs->response));
+ ucsi->message_in_size = sizeof(ucsi->debugfs->response);
+ ret = ucsi_send_command(ucsi, val);
+ memcpy(&ucsi->debugfs->response, ucsi->message_in, sizeof(ucsi->debugfs->response));
break;
default:
ret = -EOPNOTSUPP;
@@ -108,6 +111,30 @@ static int ucsi_vbus_volt_show(struct seq_file *m, void *v)
}
DEFINE_SHOW_ATTRIBUTE(ucsi_vbus_volt);
+static ssize_t ucsi_message_out_write(struct file *file,
+ const char __user *data, size_t count, loff_t *ppos)
+{
+ struct ucsi *ucsi = file->private_data;
+ int ret;
+
+ char *buf __free(kfree) = memdup_user_nul(data, count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ ucsi->message_out_size = min(count / 2, UCSI_MAX_MESSAGE_OUT_LENGTH);
+ ret = hex2bin(ucsi->message_out, buf, ucsi->message_out_size);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static const struct file_operations ucsi_message_out_fops = {
+ .open = simple_open,
+ .write = ucsi_message_out_write,
+ .llseek = generic_file_llseek,
+};
+
void ucsi_debugfs_register(struct ucsi *ucsi)
{
ucsi->debugfs = kzalloc(sizeof(*ucsi->debugfs), GFP_KERNEL);
@@ -120,6 +147,8 @@ void ucsi_debugfs_register(struct ucsi *ucsi)
debugfs_create_file("peak_current", 0400, ucsi->debugfs->dentry, ucsi, &ucsi_peak_curr_fops);
debugfs_create_file("avg_current", 0400, ucsi->debugfs->dentry, ucsi, &ucsi_avg_curr_fops);
debugfs_create_file("vbus_voltage", 0400, ucsi->debugfs->dentry, ucsi, &ucsi_vbus_volt_fops);
+ debugfs_create_file("message_out", 0200, ucsi->debugfs->dentry, ucsi,
+ &ucsi_message_out_fops);
}
void ucsi_debugfs_unregister(struct ucsi *ucsi)
diff --git a/drivers/usb/typec/ucsi/displayport.c b/drivers/usb/typec/ucsi/displayport.c
index 8aae80b457d7..a09b4900ec76 100644
--- a/drivers/usb/typec/ucsi/displayport.c
+++ b/drivers/usb/typec/ucsi/displayport.c
@@ -67,11 +67,14 @@ static int ucsi_displayport_enter(struct typec_altmode *alt, u32 *vdo)
}
command = UCSI_GET_CURRENT_CAM | UCSI_CONNECTOR_NUMBER(dp->con->num);
- ret = ucsi_send_command(ucsi, command, &cur, sizeof(cur));
+ ucsi->message_in_size = sizeof(cur);
+ ret = ucsi_send_command(ucsi, command);
if (ret < 0) {
if (ucsi->version > 0x0100)
goto err_unlock;
cur = 0xff;
+ } else {
+ memcpy(&cur, ucsi->message_in, ucsi->message_in_size);
}
if (cur != 0xff) {
@@ -126,7 +129,8 @@ static int ucsi_displayport_exit(struct typec_altmode *alt)
}
command = UCSI_CMD_SET_NEW_CAM(dp->con->num, 0, dp->offset, 0);
- ret = ucsi_send_command(dp->con->ucsi, command, NULL, 0);
+ dp->con->ucsi->message_in_size = 0;
+ ret = ucsi_send_command(dp->con->ucsi, command);
if (ret < 0)
goto out_unlock;
@@ -193,7 +197,8 @@ static int ucsi_displayport_configure(struct ucsi_dp *dp)
command = UCSI_CMD_SET_NEW_CAM(dp->con->num, 1, dp->offset, pins);
- return ucsi_send_command(dp->con->ucsi, command, NULL, 0);
+ dp->con->ucsi->message_in_size = 0;
+ return ucsi_send_command(dp->con->ucsi, command);
}
static int ucsi_displayport_vdm(struct typec_altmode *alt,
diff --git a/drivers/usb/typec/ucsi/psy.c b/drivers/usb/typec/ucsi/psy.c
index 62a9d68bb66d..2b0225821502 100644
--- a/drivers/usb/typec/ucsi/psy.c
+++ b/drivers/usb/typec/ucsi/psy.c
@@ -29,6 +29,7 @@ static enum power_supply_property ucsi_psy_props[] = {
POWER_SUPPLY_PROP_CURRENT_MAX,
POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_SCOPE,
+ POWER_SUPPLY_PROP_STATUS,
};
static int ucsi_psy_get_scope(struct ucsi_connector *con,
@@ -51,6 +52,29 @@ static int ucsi_psy_get_scope(struct ucsi_connector *con,
return 0;
}
+static int ucsi_psy_get_status(struct ucsi_connector *con,
+ union power_supply_propval *val)
+{
+ bool is_sink = UCSI_CONSTAT(con, PWR_DIR) == TYPEC_SINK;
+ bool sink_path_enabled = true;
+
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+
+ if (con->ucsi->version >= UCSI_VERSION_2_0)
+ sink_path_enabled =
+ UCSI_CONSTAT(con, SINK_PATH_STATUS_V2_0) ==
+ UCSI_CONSTAT_SINK_PATH_ENABLED;
+
+ if (UCSI_CONSTAT(con, CONNECTED)) {
+ if (is_sink && sink_path_enabled)
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ else if (!is_sink)
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ }
+
+ return 0;
+}
+
static int ucsi_psy_get_online(struct ucsi_connector *con,
union power_supply_propval *val)
{
@@ -245,6 +269,8 @@ static int ucsi_psy_get_prop(struct power_supply *psy,
return ucsi_psy_get_current_now(con, val);
case POWER_SUPPLY_PROP_SCOPE:
return ucsi_psy_get_scope(con, val);
+ case POWER_SUPPLY_PROP_STATUS:
+ return ucsi_psy_get_status(con, val);
default:
return -EINVAL;
}
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 3f568f790f39..9b3df776137a 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -55,8 +55,7 @@ void ucsi_notify_common(struct ucsi *ucsi, u32 cci)
}
EXPORT_SYMBOL_GPL(ucsi_notify_common);
-int ucsi_sync_control_common(struct ucsi *ucsi, u64 command, u32 *cci,
- void *data, size_t size)
+int ucsi_sync_control_common(struct ucsi *ucsi, u64 command, u32 *cci)
{
bool ack = UCSI_COMMAND(command) == UCSI_ACK_CC_CI;
int ret;
@@ -68,6 +67,20 @@ int ucsi_sync_control_common(struct ucsi *ucsi, u64 command, u32 *cci,
reinit_completion(&ucsi->complete);
+ if (ucsi->message_out_size > 0) {
+ if (!ucsi->ops->write_message_out) {
+ ucsi->message_out_size = 0;
+ ret = -EOPNOTSUPP;
+ goto out_clear_bit;
+ }
+
+ ret = ucsi->ops->write_message_out(ucsi, ucsi->message_out,
+ ucsi->message_out_size);
+ ucsi->message_out_size = 0;
+ if (ret)
+ goto out_clear_bit;
+ }
+
ret = ucsi->ops->async_control(ucsi, command);
if (ret)
goto out_clear_bit;
@@ -84,9 +97,10 @@ out_clear_bit:
if (!ret && cci)
ret = ucsi->ops->read_cci(ucsi, cci);
- if (!ret && data &&
+ if (!ret && ucsi->message_in_size > 0 &&
(*cci & UCSI_CCI_COMMAND_COMPLETE))
- ret = ucsi->ops->read_message_in(ucsi, data, size);
+ ret = ucsi->ops->read_message_in(ucsi, ucsi->message_in,
+ ucsi->message_in_size);
return ret;
}
@@ -103,23 +117,25 @@ static int ucsi_acknowledge(struct ucsi *ucsi, bool conn_ack)
ctrl |= UCSI_ACK_CONNECTOR_CHANGE;
}
- return ucsi->ops->sync_control(ucsi, ctrl, NULL, NULL, 0);
+ ucsi->message_in_size = 0;
+ return ucsi->ops->sync_control(ucsi, ctrl, NULL);
}
-static int ucsi_run_command(struct ucsi *ucsi, u64 command, u32 *cci,
- void *data, size_t size, bool conn_ack)
+static int ucsi_run_command(struct ucsi *ucsi, u64 command, u32 *cci, bool conn_ack)
{
int ret, err;
*cci = 0;
- if (size > UCSI_MAX_DATA_LENGTH(ucsi))
+ if (ucsi->message_in_size > UCSI_MAX_DATA_LENGTH(ucsi))
return -EINVAL;
- ret = ucsi->ops->sync_control(ucsi, command, cci, data, size);
+ ret = ucsi->ops->sync_control(ucsi, command, cci);
- if (*cci & UCSI_CCI_BUSY)
- return ucsi_run_command(ucsi, UCSI_CANCEL, cci, NULL, 0, false) ?: -EBUSY;
+ if (*cci & UCSI_CCI_BUSY) {
+ ucsi->message_in_size = 0;
+ return ucsi_run_command(ucsi, UCSI_CANCEL, cci, false) ?: -EBUSY;
+ }
if (ret)
return ret;
@@ -151,10 +167,13 @@ static int ucsi_read_error(struct ucsi *ucsi, u8 connector_num)
int ret;
command = UCSI_GET_ERROR_STATUS | UCSI_CONNECTOR_NUMBER(connector_num);
- ret = ucsi_run_command(ucsi, command, &cci, &error, sizeof(error), false);
+ ucsi->message_in_size = sizeof(error);
+ ret = ucsi_run_command(ucsi, command, &cci, false);
if (ret < 0)
return ret;
+ memcpy(&error, ucsi->message_in, sizeof(error));
+
switch (error) {
case UCSI_ERROR_INCOMPATIBLE_PARTNER:
return -EOPNOTSUPP;
@@ -200,8 +219,7 @@ static int ucsi_read_error(struct ucsi *ucsi, u8 connector_num)
return -EIO;
}
-static int ucsi_send_command_common(struct ucsi *ucsi, u64 cmd,
- void *data, size_t size, bool conn_ack)
+static int ucsi_send_command_common(struct ucsi *ucsi, u64 cmd, bool conn_ack)
{
u8 connector_num;
u32 cci;
@@ -229,7 +247,7 @@ static int ucsi_send_command_common(struct ucsi *ucsi, u64 cmd,
mutex_lock(&ucsi->ppm_lock);
- ret = ucsi_run_command(ucsi, cmd, &cci, data, size, conn_ack);
+ ret = ucsi_run_command(ucsi, cmd, &cci, conn_ack);
if (cci & UCSI_CCI_ERROR)
ret = ucsi_read_error(ucsi, connector_num);
@@ -238,10 +256,9 @@ static int ucsi_send_command_common(struct ucsi *ucsi, u64 cmd,
return ret;
}
-int ucsi_send_command(struct ucsi *ucsi, u64 command,
- void *data, size_t size)
+int ucsi_send_command(struct ucsi *ucsi, u64 command)
{
- return ucsi_send_command_common(ucsi, command, data, size, false);
+ return ucsi_send_command_common(ucsi, command, false);
}
EXPORT_SYMBOL_GPL(ucsi_send_command);
@@ -319,7 +336,8 @@ void ucsi_altmode_update_active(struct ucsi_connector *con)
int i;
command = UCSI_GET_CURRENT_CAM | UCSI_CONNECTOR_NUMBER(con->num);
- ret = ucsi_send_command(con->ucsi, command, &cur, sizeof(cur));
+ con->ucsi->message_in_size = sizeof(cur);
+ ret = ucsi_send_command(con->ucsi, command);
if (ret < 0) {
if (con->ucsi->version > 0x0100) {
dev_err(con->ucsi->dev,
@@ -327,6 +345,8 @@ void ucsi_altmode_update_active(struct ucsi_connector *con)
return;
}
cur = 0xff;
+ } else {
+ memcpy(&cur, con->ucsi->message_in, sizeof(cur));
}
if (cur < UCSI_MAX_ALTMODES)
@@ -510,7 +530,8 @@ ucsi_register_altmodes_nvidia(struct ucsi_connector *con, u8 recipient)
command |= UCSI_GET_ALTMODE_RECIPIENT(recipient);
command |= UCSI_GET_ALTMODE_CONNECTOR_NUMBER(con->num);
command |= UCSI_GET_ALTMODE_OFFSET(i);
- len = ucsi_send_command(con->ucsi, command, &alt, sizeof(alt));
+ ucsi->message_in_size = sizeof(alt);
+ len = ucsi_send_command(con->ucsi, command);
/*
* We are collecting all altmodes first and then registering.
* Some type-C device will return zero length data beyond last
@@ -519,6 +540,8 @@ ucsi_register_altmodes_nvidia(struct ucsi_connector *con, u8 recipient)
if (len < 0)
return len;
+ memcpy(&alt, ucsi->message_in, sizeof(alt));
+
/* We got all altmodes, now break out and register them */
if (!len || !alt.svid)
break;
@@ -586,12 +609,15 @@ static int ucsi_register_altmodes(struct ucsi_connector *con, u8 recipient)
command |= UCSI_GET_ALTMODE_RECIPIENT(recipient);
command |= UCSI_GET_ALTMODE_CONNECTOR_NUMBER(con->num);
command |= UCSI_GET_ALTMODE_OFFSET(i);
- len = ucsi_send_command(con->ucsi, command, alt, sizeof(alt));
+ con->ucsi->message_in_size = sizeof(alt);
+ len = ucsi_send_command(con->ucsi, command);
if (len == -EBUSY)
continue;
if (len <= 0)
return len;
+ memcpy(&alt, con->ucsi->message_in, sizeof(alt));
+
/*
* This code is requesting one alt mode at a time, but some PPMs
* may still return two. If that happens both alt modes need be
@@ -659,7 +685,9 @@ static int ucsi_get_connector_status(struct ucsi_connector *con, bool conn_ack)
UCSI_MAX_DATA_LENGTH(con->ucsi));
int ret;
- ret = ucsi_send_command_common(con->ucsi, command, &con->status, size, conn_ack);
+ con->ucsi->message_in_size = size;
+ ret = ucsi_send_command_common(con->ucsi, command, conn_ack);
+ memcpy(&con->status, con->ucsi->message_in, size);
return ret < 0 ? ret : 0;
}
@@ -682,8 +710,9 @@ static int ucsi_read_pdos(struct ucsi_connector *con,
command |= UCSI_GET_PDOS_PDO_OFFSET(offset);
command |= UCSI_GET_PDOS_NUM_PDOS(num_pdos - 1);
command |= is_source(role) ? UCSI_GET_PDOS_SRC_PDOS : 0;
- ret = ucsi_send_command(ucsi, command, pdos + offset,
- num_pdos * sizeof(u32));
+ ucsi->message_in_size = num_pdos * sizeof(u32);
+ ret = ucsi_send_command(ucsi, command);
+ memcpy(pdos + offset, ucsi->message_in, num_pdos * sizeof(u32));
if (ret < 0 && ret != -ETIMEDOUT)
dev_err(ucsi->dev, "UCSI_GET_PDOS failed (%d)\n", ret);
@@ -770,7 +799,9 @@ static int ucsi_get_pd_message(struct ucsi_connector *con, u8 recipient,
command |= UCSI_GET_PD_MESSAGE_BYTES(len);
command |= UCSI_GET_PD_MESSAGE_TYPE(type);
- ret = ucsi_send_command(con->ucsi, command, data + offset, len);
+ con->ucsi->message_in_size = len;
+ ret = ucsi_send_command(con->ucsi, command);
+ memcpy(data + offset, con->ucsi->message_in, len);
if (ret < 0)
return ret;
}
@@ -935,7 +966,9 @@ static int ucsi_register_cable(struct ucsi_connector *con)
int ret;
command = UCSI_GET_CABLE_PROPERTY | UCSI_CONNECTOR_NUMBER(con->num);
- ret = ucsi_send_command(con->ucsi, command, &cable_prop, sizeof(cable_prop));
+ con->ucsi->message_in_size = sizeof(cable_prop);
+ ret = ucsi_send_command(con->ucsi, command);
+ memcpy(&cable_prop, con->ucsi->message_in, sizeof(cable_prop));
if (ret < 0) {
dev_err(con->ucsi->dev, "GET_CABLE_PROPERTY failed (%d)\n", ret);
return ret;
@@ -996,7 +1029,9 @@ static int ucsi_check_connector_capability(struct ucsi_connector *con)
return 0;
command = UCSI_GET_CONNECTOR_CAPABILITY | UCSI_CONNECTOR_NUMBER(con->num);
- ret = ucsi_send_command(con->ucsi, command, &con->cap, sizeof(con->cap));
+ con->ucsi->message_in_size = sizeof(con->cap);
+ ret = ucsi_send_command(con->ucsi, command);
+ memcpy(&con->cap, con->ucsi->message_in, sizeof(con->cap));
if (ret < 0) {
dev_err(con->ucsi->dev, "GET_CONNECTOR_CAPABILITY failed (%d)\n", ret);
return ret;
@@ -1008,6 +1043,28 @@ static int ucsi_check_connector_capability(struct ucsi_connector *con)
return ret;
}
+static void ucsi_orientation(struct ucsi_connector *con)
+{
+ if (con->ucsi->version < UCSI_VERSION_2_0)
+ return;
+
+ if (!UCSI_CONSTAT(con, CONNECTED)) {
+ typec_set_orientation(con->port, TYPEC_ORIENTATION_NONE);
+ return;
+ }
+
+ switch (UCSI_CONSTAT(con, ORIENTATION)) {
+ case UCSI_CONSTAT_ORIENTATION_NORMAL:
+ typec_set_orientation(con->port, TYPEC_ORIENTATION_NORMAL);
+ break;
+ case UCSI_CONSTAT_ORIENTATION_REVERSE:
+ typec_set_orientation(con->port, TYPEC_ORIENTATION_REVERSE);
+ break;
+ default:
+ break;
+ }
+}
+
static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
{
switch (UCSI_CONSTAT(con, PWR_OPMODE)) {
@@ -1022,14 +1079,17 @@ static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
case UCSI_CONSTAT_PWR_OPMODE_TYPEC1_5:
con->rdo = 0;
typec_set_pwr_opmode(con->port, TYPEC_PWR_MODE_1_5A);
+ ucsi_port_psy_changed(con);
break;
case UCSI_CONSTAT_PWR_OPMODE_TYPEC3_0:
con->rdo = 0;
typec_set_pwr_opmode(con->port, TYPEC_PWR_MODE_3_0A);
+ ucsi_port_psy_changed(con);
break;
default:
con->rdo = 0;
typec_set_pwr_opmode(con->port, TYPEC_PWR_MODE_USB);
+ ucsi_port_psy_changed(con);
break;
}
}
@@ -1258,6 +1318,7 @@ static void ucsi_handle_connector_change(struct work_struct *work)
typec_set_pwr_role(con->port, role);
ucsi_port_psy_changed(con);
ucsi_partner_change(con);
+ ucsi_orientation(con);
if (UCSI_CONSTAT(con, CONNECTED)) {
ucsi_register_partner(con);
@@ -1290,7 +1351,7 @@ static void ucsi_handle_connector_change(struct work_struct *work)
if (change & UCSI_CONSTAT_CAM_CHANGE)
ucsi_partner_task(con, ucsi_check_altmodes, 1, HZ);
- if (change & UCSI_CONSTAT_BC_CHANGE)
+ if (change & (UCSI_CONSTAT_BC_CHANGE | UCSI_CONSTAT_SINK_PATH_CHANGE))
ucsi_port_psy_changed(con);
if (con->ucsi->version >= UCSI_VERSION_2_1 &&
@@ -1354,7 +1415,8 @@ static int ucsi_reset_connector(struct ucsi_connector *con, bool hard)
else if (con->ucsi->version >= UCSI_VERSION_2_0)
command |= hard ? 0 : UCSI_CONNECTOR_RESET_DATA_VER_2_0;
- return ucsi_send_command(con->ucsi, command, NULL, 0);
+ con->ucsi->message_in_size = 0;
+ return ucsi_send_command(con->ucsi, command);
}
static int ucsi_reset_ppm(struct ucsi *ucsi)
@@ -1435,7 +1497,8 @@ static int ucsi_role_cmd(struct ucsi_connector *con, u64 command)
{
int ret;
- ret = ucsi_send_command(con->ucsi, command, NULL, 0);
+ con->ucsi->message_in_size = 0;
+ ret = ucsi_send_command(con->ucsi, command);
if (ret == -ETIMEDOUT) {
u64 c;
@@ -1443,7 +1506,8 @@ static int ucsi_role_cmd(struct ucsi_connector *con, u64 command)
ucsi_reset_ppm(con->ucsi);
c = UCSI_SET_NOTIFICATION_ENABLE | con->ucsi->ntfy;
- ucsi_send_command(con->ucsi, c, NULL, 0);
+ con->ucsi->message_in_size = 0;
+ ucsi_send_command(con->ucsi, c);
ucsi_reset_connector(con, true);
}
@@ -1596,10 +1660,13 @@ static int ucsi_register_port(struct ucsi *ucsi, struct ucsi_connector *con)
/* Get connector capability */
command = UCSI_GET_CONNECTOR_CAPABILITY;
command |= UCSI_CONNECTOR_NUMBER(con->num);
- ret = ucsi_send_command(ucsi, command, &con->cap, sizeof(con->cap));
+ ucsi->message_in_size = sizeof(con->cap);
+ ret = ucsi_send_command(ucsi, command);
if (ret < 0)
goto out_unlock;
+ memcpy(&con->cap, ucsi->message_in, sizeof(con->cap));
+
if (UCSI_CONCAP(con, OPMODE_DRP))
cap->data = TYPEC_PORT_DRD;
else if (UCSI_CONCAP(con, OPMODE_DFP))
@@ -1634,6 +1701,9 @@ static int ucsi_register_port(struct ucsi *ucsi, struct ucsi_connector *con)
cap->driver_data = con;
cap->ops = &ucsi_ops;
+ if (ucsi->version >= UCSI_VERSION_2_0)
+ con->typec_cap.orientation_aware = true;
+
if (ucsi->ops->update_connector)
ucsi->ops->update_connector(con);
@@ -1690,6 +1760,7 @@ static int ucsi_register_port(struct ucsi *ucsi, struct ucsi_connector *con)
typec_set_pwr_role(con->port, UCSI_CONSTAT(con, PWR_DIR));
ucsi_register_partner(con);
ucsi_pwr_opmode_change(con);
+ ucsi_orientation(con);
ucsi_port_psy_changed(con);
if (con->ucsi->cap.features & UCSI_CAP_GET_PD_MESSAGE)
ucsi_get_partner_identity(con);
@@ -1792,21 +1863,30 @@ static int ucsi_init(struct ucsi *ucsi)
/* Enable basic notifications */
ntfy = UCSI_ENABLE_NTFY_CMD_COMPLETE | UCSI_ENABLE_NTFY_ERROR;
command = UCSI_SET_NOTIFICATION_ENABLE | ntfy;
- ret = ucsi_send_command(ucsi, command, NULL, 0);
+ ucsi->message_in_size = 0;
+ ret = ucsi_send_command(ucsi, command);
if (ret < 0)
goto err_reset;
/* Get PPM capabilities */
command = UCSI_GET_CAPABILITY;
- ret = ucsi_send_command(ucsi, command, &ucsi->cap,
- BITS_TO_BYTES(UCSI_GET_CAPABILITY_SIZE));
+ ucsi->message_in_size = BITS_TO_BYTES(UCSI_GET_CAPABILITY_SIZE);
+ ret = ucsi_send_command(ucsi, command);
if (ret < 0)
goto err_reset;
+ memcpy(&ucsi->cap, ucsi->message_in, BITS_TO_BYTES(UCSI_GET_CAPABILITY_SIZE));
+
if (!ucsi->cap.num_connectors) {
ret = -ENODEV;
goto err_reset;
}
+ /* Check if reserved bit set. This is out of spec but happens in buggy FW */
+ if (ucsi->cap.num_connectors & 0x80) {
+ dev_warn(ucsi->dev, "UCSI: Invalid num_connectors %d. Likely buggy FW\n",
+ ucsi->cap.num_connectors);
+ ucsi->cap.num_connectors &= 0x7f; // clear bit and carry on
+ }
/* Allocate the connectors. Released in ucsi_unregister() */
connector = kcalloc(ucsi->cap.num_connectors + 1, sizeof(*connector), GFP_KERNEL);
@@ -1826,7 +1906,8 @@ static int ucsi_init(struct ucsi *ucsi)
/* Enable all supported notifications */
ntfy = ucsi_get_supported_notifications(ucsi);
command = UCSI_SET_NOTIFICATION_ENABLE | ntfy;
- ret = ucsi_send_command(ucsi, command, NULL, 0);
+ ucsi->message_in_size = 0;
+ ret = ucsi_send_command(ucsi, command);
if (ret < 0)
goto err_unregister;
@@ -1877,7 +1958,8 @@ static void ucsi_resume_work(struct work_struct *work)
/* Restore UCSI notification enable mask after system resume */
command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy;
- ret = ucsi_send_command(ucsi, command, NULL, 0);
+ ucsi->message_in_size = 0;
+ ret = ucsi_send_command(ucsi, command);
if (ret < 0) {
dev_err(ucsi->dev, "failed to re-enable notifications (%d)\n", ret);
return;
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index e301d9012936..f946b728c373 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -29,6 +29,10 @@ struct dentry;
#define UCSI_MESSAGE_OUT 32
#define UCSIv2_MESSAGE_OUT 272
+/* Define maximum lengths for message buffers */
+#define UCSI_MAX_MESSAGE_IN_LENGTH 256
+#define UCSI_MAX_MESSAGE_OUT_LENGTH 256
+
/* UCSI versions */
#define UCSI_VERSION_1_0 0x0100
#define UCSI_VERSION_1_1 0x0110
@@ -65,6 +69,7 @@ struct dentry;
* @read_cci: Read CCI register
* @poll_cci: Read CCI register while polling with notifications disabled
* @read_message_in: Read message data from UCSI
+ * @write_message_out: Write message data to UCSI
* @sync_control: Blocking control operation
* @async_control: Non-blocking control operation
* @update_altmodes: Squashes duplicate DP altmodes
@@ -80,8 +85,8 @@ struct ucsi_operations {
int (*read_cci)(struct ucsi *ucsi, u32 *cci);
int (*poll_cci)(struct ucsi *ucsi, u32 *cci);
int (*read_message_in)(struct ucsi *ucsi, void *val, size_t val_len);
- int (*sync_control)(struct ucsi *ucsi, u64 command, u32 *cci,
- void *data, size_t size);
+ int (*write_message_out)(struct ucsi *ucsi, void *data, size_t data_len);
+ int (*sync_control)(struct ucsi *ucsi, u64 command, u32 *cci);
int (*async_control)(struct ucsi *ucsi, u64 command);
bool (*update_altmodes)(struct ucsi *ucsi, u8 recipient,
struct ucsi_altmode *orig,
@@ -127,10 +132,12 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num);
#define UCSI_GET_CONNECTOR_STATUS 0x12
#define UCSI_GET_CONNECTOR_STATUS_SIZE 152
#define UCSI_GET_ERROR_STATUS 0x13
+#define UCSI_SET_POWER_LEVEL 0x14
#define UCSI_GET_ATTENTION_VDO 0x16
#define UCSI_GET_PD_MESSAGE 0x15
#define UCSI_GET_CAM_CS 0x18
#define UCSI_SET_SINK_PATH 0x1c
+#define UCSI_SET_PDOS 0x1d
#define UCSI_READ_POWER_LEVEL 0x1e
#define UCSI_SET_USB 0x21
#define UCSI_GET_LPM_PPM_INFO 0x22
@@ -360,6 +367,12 @@ struct ucsi_cable_property {
#define UCSI_CONSTAT_BC_SLOW_CHARGING 2
#define UCSI_CONSTAT_BC_TRICKLE_CHARGING 3
#define UCSI_CONSTAT_PD_VERSION_V1_2 UCSI_DECLARE_BITFIELD_V1_2(70, 16)
+#define UCSI_CONSTAT_ORIENTATION UCSI_DECLARE_BITFIELD_V2_0(86, 1)
+#define UCSI_CONSTAT_ORIENTATION_NORMAL 0
+#define UCSI_CONSTAT_ORIENTATION_REVERSE 1
+#define UCSI_CONSTAT_SINK_PATH_STATUS_V2_0 UCSI_DECLARE_BITFIELD_V2_0(87, 1)
+#define UCSI_CONSTAT_SINK_PATH_DISABLED 0
+#define UCSI_CONSTAT_SINK_PATH_ENABLED 1
#define UCSI_CONSTAT_PWR_READING_READY_V2_1 UCSI_DECLARE_BITFIELD_V2_1(89, 1)
#define UCSI_CONSTAT_CURRENT_SCALE_V2_1 UCSI_DECLARE_BITFIELD_V2_1(90, 3)
#define UCSI_CONSTAT_PEAK_CURRENT_V2_1 UCSI_DECLARE_BITFIELD_V2_1(93, 16)
@@ -379,6 +392,7 @@ struct ucsi_cable_property {
#define UCSI_CONSTAT_BC_CHANGE BIT(9)
#define UCSI_CONSTAT_PARTNER_CHANGE BIT(11)
#define UCSI_CONSTAT_POWER_DIR_CHANGE BIT(12)
+#define UCSI_CONSTAT_SINK_PATH_CHANGE BIT(13)
#define UCSI_CONSTAT_CONNECT_CHANGE BIT(14)
#define UCSI_CONSTAT_ERROR BIT(15)
@@ -485,6 +499,12 @@ struct ucsi {
unsigned long quirks;
#define UCSI_NO_PARTNER_PDOS BIT(0) /* Don't read partner's PDOs */
#define UCSI_DELAY_DEVICE_PDOS BIT(1) /* Reading PDOs fails until the parter is in PD mode */
+
+ /* Fixed-size buffers for incoming and outgoing messages */
+ u8 message_in[UCSI_MAX_MESSAGE_IN_LENGTH];
+ size_t message_in_size;
+ u8 message_out[UCSI_MAX_MESSAGE_OUT_LENGTH];
+ size_t message_out_size;
};
#define UCSI_MAX_DATA_LENGTH(u) (((u)->version < UCSI_VERSION_2_0) ? 0x10 : 0xff)
@@ -547,15 +567,13 @@ struct ucsi_connector {
struct usb_pd_identity cable_identity;
};
-int ucsi_send_command(struct ucsi *ucsi, u64 command,
- void *retval, size_t size);
+int ucsi_send_command(struct ucsi *ucsi, u64 command);
void ucsi_altmode_update_active(struct ucsi_connector *con);
int ucsi_resume(struct ucsi *ucsi);
void ucsi_notify_common(struct ucsi *ucsi, u32 cci);
-int ucsi_sync_control_common(struct ucsi *ucsi, u64 command, u32 *cci,
- void *data, size_t size);
+int ucsi_sync_control_common(struct ucsi *ucsi, u64 command, u32 *cci);
#if IS_ENABLED(CONFIG_POWER_SUPPLY)
int ucsi_register_port_psy(struct ucsi_connector *con);
diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
index 6b92f296e985..f9beeb835238 100644
--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
+++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
@@ -86,6 +86,21 @@ static int ucsi_acpi_read_message_in(struct ucsi *ucsi, void *val, size_t val_le
return 0;
}
+static int ucsi_acpi_write_message_out(struct ucsi *ucsi, void *data, size_t data_len)
+{
+ struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
+
+ if (!data || !data_len)
+ return -EINVAL;
+
+ if (ucsi->version <= UCSI_VERSION_1_2)
+ memcpy(ua->base + UCSI_MESSAGE_OUT, data, data_len);
+ else
+ memcpy(ua->base + UCSIv2_MESSAGE_OUT, data, data_len);
+
+ return 0;
+}
+
static int ucsi_acpi_async_control(struct ucsi *ucsi, u64 command)
{
struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
@@ -101,19 +116,19 @@ static const struct ucsi_operations ucsi_acpi_ops = {
.read_cci = ucsi_acpi_read_cci,
.poll_cci = ucsi_acpi_poll_cci,
.read_message_in = ucsi_acpi_read_message_in,
+ .write_message_out = ucsi_acpi_write_message_out,
.sync_control = ucsi_sync_control_common,
.async_control = ucsi_acpi_async_control
};
-static int ucsi_gram_sync_control(struct ucsi *ucsi, u64 command, u32 *cci,
- void *val, size_t len)
+static int ucsi_gram_sync_control(struct ucsi *ucsi, u64 command, u32 *cci)
{
u16 bogus_change = UCSI_CONSTAT_POWER_LEVEL_CHANGE |
UCSI_CONSTAT_PDOS_CHANGE;
struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
int ret;
- ret = ucsi_sync_control_common(ucsi, command, cci, val, len);
+ ret = ucsi_sync_control_common(ucsi, command, cci);
if (ret < 0)
return ret;
@@ -125,8 +140,8 @@ static int ucsi_gram_sync_control(struct ucsi *ucsi, u64 command, u32 *cci,
if (UCSI_COMMAND(ua->cmd) == UCSI_GET_CONNECTOR_STATUS &&
ua->check_bogus_event) {
/* Clear the bogus change */
- if (*(u16 *)val == bogus_change)
- *(u16 *)val = 0;
+ if (*(u16 *)ucsi->message_in == bogus_change)
+ *(u16 *)ucsi->message_in = 0;
ua->check_bogus_event = false;
}
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
index d83a0051c737..ead1b2a25c79 100644
--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -606,8 +606,7 @@ static int ucsi_ccg_async_control(struct ucsi *ucsi, u64 command)
return ccg_write(uc, reg, (u8 *)&command, sizeof(command));
}
-static int ucsi_ccg_sync_control(struct ucsi *ucsi, u64 command, u32 *cci,
- void *data, size_t size)
+static int ucsi_ccg_sync_control(struct ucsi *ucsi, u64 command, u32 *cci)
{
struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
struct ucsi_connector *con;
@@ -629,16 +628,16 @@ static int ucsi_ccg_sync_control(struct ucsi *ucsi, u64 command, u32 *cci,
ucsi_ccg_update_set_new_cam_cmd(uc, con, &command);
}
- ret = ucsi_sync_control_common(ucsi, command, cci, data, size);
+ ret = ucsi_sync_control_common(ucsi, command, cci);
switch (UCSI_COMMAND(command)) {
case UCSI_GET_CURRENT_CAM:
if (uc->has_multiple_dp)
- ucsi_ccg_update_get_current_cam_cmd(uc, (u8 *)data);
+ ucsi_ccg_update_get_current_cam_cmd(uc, (u8 *)ucsi->message_in);
break;
case UCSI_GET_ALTERNATE_MODES:
if (UCSI_ALTMODE_RECIPIENT(command) == UCSI_RECIPIENT_SOP) {
- struct ucsi_altmode *alt = data;
+ struct ucsi_altmode *alt = (struct ucsi_altmode *)ucsi->message_in;
if (alt[0].svid == USB_TYPEC_NVIDIA_VLINK_SID)
ucsi_ccg_nvidia_altmode(uc, alt, command);
@@ -646,7 +645,7 @@ static int ucsi_ccg_sync_control(struct ucsi *ucsi, u64 command, u32 *cci,
break;
case UCSI_GET_CAPABILITY:
if (uc->fw_build == CCG_FW_BUILD_NVIDIA_TEGRA) {
- struct ucsi_capability *cap = data;
+ struct ucsi_capability *cap = (struct ucsi_capability *)ucsi->message_in;
cap->features &= ~UCSI_CAP_ALT_MODE_DETAILS;
}
diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c
index 8af79101a2fc..11b3e24e34e2 100644
--- a/drivers/usb/typec/ucsi/ucsi_glink.c
+++ b/drivers/usb/typec/ucsi/ucsi_glink.c
@@ -16,10 +16,10 @@
#define PMIC_GLINK_MAX_PORTS 3
-#define UCSI_BUF_SIZE 48
+#define UCSI_BUF_V1_SIZE (UCSI_MESSAGE_OUT + (UCSI_MESSAGE_OUT - UCSI_MESSAGE_IN))
+#define UCSI_BUF_V2_SIZE (UCSIv2_MESSAGE_OUT + (UCSIv2_MESSAGE_OUT - UCSI_MESSAGE_IN))
#define MSG_TYPE_REQ_RESP 1
-#define UCSI_BUF_SIZE 48
#define UC_NOTIFY_RECEIVER_UCSI 0x0
#define UC_UCSI_READ_BUF_REQ 0x11
@@ -30,24 +30,30 @@ struct ucsi_read_buf_req_msg {
struct pmic_glink_hdr hdr;
};
-struct ucsi_read_buf_resp_msg {
+struct __packed ucsi_read_buf_resp_msg {
struct pmic_glink_hdr hdr;
- u8 buf[UCSI_BUF_SIZE];
+ union {
+ u8 v2_buf[UCSI_BUF_V2_SIZE];
+ u8 v1_buf[UCSI_BUF_V1_SIZE];
+ } buf;
u32 ret_code;
};
-struct ucsi_write_buf_req_msg {
+struct __packed ucsi_write_buf_req_msg {
struct pmic_glink_hdr hdr;
- u8 buf[UCSI_BUF_SIZE];
+ union {
+ u8 v2_buf[UCSI_BUF_V2_SIZE];
+ u8 v1_buf[UCSI_BUF_V1_SIZE];
+ } buf;
u32 reserved;
};
-struct ucsi_write_buf_resp_msg {
+struct __packed ucsi_write_buf_resp_msg {
struct pmic_glink_hdr hdr;
u32 ret_code;
};
-struct ucsi_notify_ind_msg {
+struct __packed ucsi_notify_ind_msg {
struct pmic_glink_hdr hdr;
u32 notification;
u32 receiver;
@@ -72,7 +78,7 @@ struct pmic_glink_ucsi {
bool ucsi_registered;
bool pd_running;
- u8 read_buf[UCSI_BUF_SIZE];
+ u8 read_buf[UCSI_BUF_V2_SIZE];
};
static int pmic_glink_ucsi_read(struct ucsi *__ucsi, unsigned int offset,
@@ -132,17 +138,35 @@ static int pmic_glink_ucsi_locked_write(struct pmic_glink_ucsi *ucsi, unsigned i
const void *val, size_t val_len)
{
struct ucsi_write_buf_req_msg req = {};
+ size_t req_len, buf_len;
unsigned long left;
int ret;
+ u8 *buf;
req.hdr.owner = PMIC_GLINK_OWNER_USBC;
req.hdr.type = MSG_TYPE_REQ_RESP;
req.hdr.opcode = UC_UCSI_WRITE_BUF_REQ;
- memcpy(&req.buf[offset], val, val_len);
+
+ if (ucsi->ucsi->version >= UCSI_VERSION_2_0) {
+ buf_len = UCSI_BUF_V2_SIZE;
+ buf = req.buf.v2_buf;
+ } else if (ucsi->ucsi->version) {
+ buf_len = UCSI_BUF_V1_SIZE;
+ buf = req.buf.v1_buf;
+ } else {
+ dev_err(ucsi->dev, "UCSI version unknown\n");
+ return -EINVAL;
+ }
+ req_len = sizeof(struct pmic_glink_hdr) + buf_len + sizeof(u32);
+
+ if (offset + val_len > buf_len)
+ return -EINVAL;
+
+ memcpy(&buf[offset], val, val_len);
reinit_completion(&ucsi->write_ack);
- ret = pmic_glink_send(ucsi->client, &req, sizeof(req));
+ ret = pmic_glink_send(ucsi->client, &req, req_len);
if (ret < 0) {
dev_err(ucsi->dev, "failed to send UCSI write request: %d\n", ret);
return ret;
@@ -216,12 +240,48 @@ static const struct ucsi_operations pmic_glink_ucsi_ops = {
static void pmic_glink_ucsi_read_ack(struct pmic_glink_ucsi *ucsi, const void *data, int len)
{
- const struct ucsi_read_buf_resp_msg *resp = data;
+ u32 ret_code, resp_len, buf_len = 0;
+ u8 *buf;
+
+ if (ucsi->ucsi->version) {
+ if (ucsi->ucsi->version >= UCSI_VERSION_2_0) {
+ buf = ((struct ucsi_read_buf_resp_msg *)data)->buf.v2_buf;
+ buf_len = UCSI_BUF_V2_SIZE;
+ } else {
+ buf = ((struct ucsi_read_buf_resp_msg *)data)->buf.v1_buf;
+ buf_len = UCSI_BUF_V1_SIZE;
+ }
+ } else if (!ucsi->ucsi_registered) {
+ /*
+ * If UCSI version is not known yet because device is not registered, choose buffer
+ * size which best fits incoming data
+ */
+ if (len > sizeof(struct pmic_glink_hdr) + UCSI_BUF_V2_SIZE) {
+ buf = ((struct ucsi_read_buf_resp_msg *)data)->buf.v2_buf;
+ buf_len = UCSI_BUF_V2_SIZE;
+ } else {
+ buf = ((struct ucsi_read_buf_resp_msg *)data)->buf.v1_buf;
+ buf_len = UCSI_BUF_V1_SIZE;
+ }
+ } else {
+ dev_err(ucsi->dev, "Device has been registered but UCSI version is still unknown\n");
+ return;
+ }
- if (resp->ret_code)
+ resp_len = sizeof(struct pmic_glink_hdr) + buf_len + sizeof(u32);
+
+ if (len > resp_len)
+ return;
+
+ /* Ensure that buffer_len leaves space for ret_code to be read back from memory */
+ if (buf_len > len - sizeof(struct pmic_glink_hdr) - sizeof(u32))
+ buf_len = len - sizeof(struct pmic_glink_hdr) - sizeof(u32);
+
+ memcpy(&ret_code, buf + buf_len, sizeof(u32));
+ if (ret_code)
return;
- memcpy(ucsi->read_buf, resp->buf, UCSI_BUF_SIZE);
+ memcpy(ucsi->read_buf, buf, buf_len);
complete(&ucsi->read_ack);
}
diff --git a/drivers/usb/typec/ucsi/ucsi_yoga_c630.c b/drivers/usb/typec/ucsi/ucsi_yoga_c630.c
index 0187c1c4b21a..299081444caa 100644
--- a/drivers/usb/typec/ucsi/ucsi_yoga_c630.c
+++ b/drivers/usb/typec/ucsi/ucsi_yoga_c630.c
@@ -88,8 +88,7 @@ static int yoga_c630_ucsi_async_control(struct ucsi *ucsi, u64 command)
static int yoga_c630_ucsi_sync_control(struct ucsi *ucsi,
u64 command,
- u32 *cci,
- void *data, size_t size)
+ u32 *cci)
{
int ret;
@@ -107,8 +106,8 @@ static int yoga_c630_ucsi_sync_control(struct ucsi *ucsi,
};
dev_dbg(ucsi->dev, "faking DP altmode for con1\n");
- memset(data, 0, size);
- memcpy(data, &alt, min(sizeof(alt), size));
+ memset(ucsi->message_in, 0, ucsi->message_in_size);
+ memcpy(ucsi->message_in, &alt, min(sizeof(alt), ucsi->message_in_size));
*cci = UCSI_CCI_COMMAND_COMPLETE | UCSI_SET_CCI_LENGTH(sizeof(alt));
return 0;
}
@@ -121,18 +120,18 @@ static int yoga_c630_ucsi_sync_control(struct ucsi *ucsi,
if (UCSI_COMMAND(command) == UCSI_GET_ALTERNATE_MODES &&
UCSI_GET_ALTMODE_GET_CONNECTOR_NUMBER(command) == 2) {
dev_dbg(ucsi->dev, "ignoring altmodes for con2\n");
- memset(data, 0, size);
+ memset(ucsi->message_in, 0, ucsi->message_in_size);
*cci = UCSI_CCI_COMMAND_COMPLETE;
return 0;
}
- ret = ucsi_sync_control_common(ucsi, command, cci, data, size);
+ ret = ucsi_sync_control_common(ucsi, command, cci);
if (ret < 0)
return ret;
/* UCSI_GET_CURRENT_CAM is off-by-one on all ports */
- if (UCSI_COMMAND(command) == UCSI_GET_CURRENT_CAM && data)
- ((u8 *)data)[0]--;
+ if (UCSI_COMMAND(command) == UCSI_GET_CURRENT_CAM && ucsi->message_in_size > 0)
+ ucsi->message_in[0]--;
return ret;
}
diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c
index 7eb2e074012a..55919c3762ba 100644
--- a/drivers/usb/usbip/stub_tx.c
+++ b/drivers/usb/usbip/stub_tx.c
@@ -4,6 +4,7 @@
*/
#include <linux/kthread.h>
+#include <linux/minmax.h>
#include <linux/socket.h>
#include <linux/scatterlist.h>
@@ -239,17 +240,13 @@ static int stub_send_ret_submit(struct stub_device *sdev)
urb->actual_length > 0) {
if (urb->num_sgs) {
unsigned int copy = urb->actual_length;
- int size;
+ unsigned int size;
for_each_sg(urb->sg, sg, urb->num_sgs, i) {
if (copy == 0)
break;
- if (copy < sg->length)
- size = copy;
- else
- size = sg->length;
-
+ size = min(copy, sg->length);
iov[iovnum].iov_base = sg_virt(sg);
iov[iovnum].iov_len = size;
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 0d6c10a8490c..e55690da19e5 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -346,9 +346,10 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
if (wIndex < 1 || wIndex > VHCI_HC_PORTS) {
invalid_rhport = true;
if (wIndex > VHCI_HC_PORTS)
- pr_err("invalid port number %d\n", wIndex);
- } else
+ dev_err(hcd_dev(hcd), "invalid port number %d\n", wIndex);
+ } else {
rhport = wIndex - 1;
+ }
vhci_hcd = hcd_to_vhci_hcd(hcd);
vhci = vhci_hcd->vhci;
@@ -368,14 +369,14 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
break;
case ClearPortFeature:
if (invalid_rhport) {
- pr_err("invalid port number %d\n", wIndex);
+ dev_err(hcd_dev(hcd), "invalid port number %d\n", wIndex);
goto error;
}
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
if (hcd->speed >= HCD_USB3) {
- pr_err(" ClearPortFeature: USB_PORT_FEAT_SUSPEND req not "
- "supported for USB 3.0 roothub\n");
+ dev_err(hcd_dev(hcd),
+ "ClearPortFeature: USB_PORT_FEAT_SUSPEND req not supported for USB 3.0 roothub\n");
goto error;
}
usbip_dbg_vhci_rh(
@@ -408,7 +409,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
if (hcd->speed >= HCD_USB3 &&
(wLength < USB_DT_SS_HUB_SIZE ||
wValue != (USB_DT_SS_HUB << 8))) {
- pr_err("Wrong hub descriptor type for USB 3.0 roothub.\n");
+ dev_err(hcd_dev(hcd),
+ "Wrong hub descriptor type for USB 3.0 roothub.\n");
goto error;
}
if (hcd->speed >= HCD_USB3)
@@ -433,7 +435,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
case GetPortStatus:
usbip_dbg_vhci_rh(" GetPortStatus port %x\n", wIndex);
if (invalid_rhport) {
- pr_err("invalid port number %d\n", wIndex);
+ dev_err(hcd_dev(hcd), "invalid port number %d\n", wIndex);
retval = -EPIPE;
goto error;
}
@@ -483,7 +485,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
USB_PORT_STAT_LOW_SPEED;
break;
default:
- pr_err("vhci_device speed not set\n");
+ dev_err(hcd_dev(hcd), "vhci_device speed not set\n");
break;
}
}
@@ -505,8 +507,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
usbip_dbg_vhci_rh(
" SetPortFeature: USB_PORT_FEAT_LINK_STATE\n");
if (hcd->speed < HCD_USB3) {
- pr_err("USB_PORT_FEAT_LINK_STATE req not "
- "supported for USB 2.0 roothub\n");
+ dev_err(hcd_dev(hcd),
+ "USB_PORT_FEAT_LINK_STATE req not supported for USB 2.0 roothub\n");
goto error;
}
/*
@@ -523,8 +525,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
" SetPortFeature: USB_PORT_FEAT_U2_TIMEOUT\n");
/* TODO: add suspend/resume support! */
if (hcd->speed < HCD_USB3) {
- pr_err("USB_PORT_FEAT_U1/2_TIMEOUT req not "
- "supported for USB 2.0 roothub\n");
+ dev_err(hcd_dev(hcd),
+ "USB_PORT_FEAT_U1/2_TIMEOUT req not supported for USB 2.0 roothub\n");
goto error;
}
break;
@@ -533,13 +535,13 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
" SetPortFeature: USB_PORT_FEAT_SUSPEND\n");
/* Applicable only for USB2.0 hub */
if (hcd->speed >= HCD_USB3) {
- pr_err("USB_PORT_FEAT_SUSPEND req not "
- "supported for USB 3.0 roothub\n");
+ dev_err(hcd_dev(hcd),
+ "USB_PORT_FEAT_SUSPEND req not supported for USB 3.0 roothub\n");
goto error;
}
if (invalid_rhport) {
- pr_err("invalid port number %d\n", wIndex);
+ dev_err(hcd_dev(hcd), "invalid port number %d\n", wIndex);
goto error;
}
@@ -549,7 +551,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
usbip_dbg_vhci_rh(
" SetPortFeature: USB_PORT_FEAT_POWER\n");
if (invalid_rhport) {
- pr_err("invalid port number %d\n", wIndex);
+ dev_err(hcd_dev(hcd), "invalid port number %d\n", wIndex);
goto error;
}
if (hcd->speed >= HCD_USB3)
@@ -561,13 +563,13 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
usbip_dbg_vhci_rh(
" SetPortFeature: USB_PORT_FEAT_BH_PORT_RESET\n");
if (invalid_rhport) {
- pr_err("invalid port number %d\n", wIndex);
+ dev_err(hcd_dev(hcd), "invalid port number %d\n", wIndex);
goto error;
}
/* Applicable only for USB3.0 hub */
if (hcd->speed < HCD_USB3) {
- pr_err("USB_PORT_FEAT_BH_PORT_RESET req not "
- "supported for USB 2.0 roothub\n");
+ dev_err(hcd_dev(hcd),
+ "USB_PORT_FEAT_BH_PORT_RESET req not supported for USB 2.0 roothub\n");
goto error;
}
fallthrough;
@@ -575,7 +577,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
usbip_dbg_vhci_rh(
" SetPortFeature: USB_PORT_FEAT_RESET\n");
if (invalid_rhport) {
- pr_err("invalid port number %d\n", wIndex);
+ dev_err(hcd_dev(hcd), "invalid port number %d\n", wIndex);
goto error;
}
/* if it's already enabled, disable */
@@ -598,7 +600,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
usbip_dbg_vhci_rh(" SetPortFeature: default %d\n",
wValue);
if (invalid_rhport) {
- pr_err("invalid port number %d\n", wIndex);
+ dev_err(hcd_dev(hcd), "invalid port number %d\n", wIndex);
goto error;
}
if (wValue >= 32)
@@ -618,8 +620,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
case GetPortErrorCount:
usbip_dbg_vhci_rh(" GetPortErrorCount\n");
if (hcd->speed < HCD_USB3) {
- pr_err("GetPortErrorCount req not "
- "supported for USB 2.0 roothub\n");
+ dev_err(hcd_dev(hcd),
+ "GetPortErrorCount req not supported for USB 2.0 roothub\n");
goto error;
}
/* We'll always return 0 since this is a dummy hub */
@@ -628,13 +630,14 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
case SetHubDepth:
usbip_dbg_vhci_rh(" SetHubDepth\n");
if (hcd->speed < HCD_USB3) {
- pr_err("SetHubDepth req not supported for "
- "USB 2.0 roothub\n");
+ dev_err(hcd_dev(hcd),
+ "SetHubDepth req not supported for USB 2.0 roothub\n");
goto error;
}
break;
default:
- pr_err("default hub control req: %04x v%04x i%04x l%d\n",
+ dev_err(hcd_dev(hcd),
+ "default hub control req: %04x v%04x i%04x l%d\n",
typeReq, wValue, wIndex, wLength);
error:
/* "protocol stall" on error */
@@ -642,7 +645,7 @@ error:
}
if (usbip_dbg_flag_vhci_rh) {
- pr_debug("port %d\n", rhport);
+ dev_dbg(hcd_dev(hcd), "%s port %d\n", __func__, rhport);
/* Only dump valid port status */
if (!invalid_rhport) {
dump_port_status_diff(prev_port_status[rhport],
@@ -702,7 +705,7 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
unsigned long flags;
if (portnum > VHCI_HC_PORTS) {
- pr_err("invalid port number %d\n", portnum);
+ dev_err(hcd_dev(hcd), "invalid port number %d\n", portnum);
return -ENODEV;
}
vdev = &vhci_hcd->vdev[portnum-1];
@@ -831,15 +834,15 @@ out:
no_need_xmit:
usb_hcd_unlink_urb_from_ep(hcd, urb);
no_need_unlink:
- spin_unlock_irqrestore(&vhci->lock, flags);
if (!ret) {
/* usb_hcd_giveback_urb() should be called with
* irqs disabled
*/
- local_irq_disable();
+ spin_unlock(&vhci->lock);
usb_hcd_giveback_urb(hcd, urb, urb->status);
- local_irq_enable();
+ spin_lock(&vhci->lock);
}
+ spin_unlock_irqrestore(&vhci->lock, flags);
return ret;
}
@@ -958,7 +961,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
unlink->seqnum = atomic_inc_return(&vhci_hcd->seqnum);
if (unlink->seqnum == 0xffff)
- pr_info("seqnum max\n");
+ dev_info(hcd_dev(hcd), "seqnum max\n");
unlink->unlink_seqnum = priv->seqnum;
@@ -1036,10 +1039,11 @@ static void vhci_device_unlink_cleanup(struct vhci_device *vdev)
static void vhci_shutdown_connection(struct usbip_device *ud)
{
struct vhci_device *vdev = container_of(ud, struct vhci_device, ud);
+ struct usb_hcd *hcd = vhci_hcd_to_hcd(vdev_to_vhci_hcd(vdev));
/* need this? see stub_dev.c */
if (ud->tcp_socket) {
- pr_debug("shutdown tcp_socket %d\n", ud->sockfd);
+ dev_dbg(hcd_dev(hcd), "shutdown tcp_socket %d\n", ud->sockfd);
kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR);
}
@@ -1052,7 +1056,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
kthread_stop_put(vdev->ud.tcp_tx);
vdev->ud.tcp_tx = NULL;
}
- pr_info("stop threads\n");
+ dev_info(hcd_dev(hcd), "stop threads\n");
/* active connection is closed */
if (vdev->ud.tcp_socket) {
@@ -1060,7 +1064,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
vdev->ud.tcp_socket = NULL;
vdev->ud.sockfd = -1;
}
- pr_info("release socket\n");
+ dev_info(hcd_dev(hcd), "release socket\n");
vhci_device_unlink_cleanup(vdev);
@@ -1086,7 +1090,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
*/
rh_port_disconnect(vdev);
- pr_info("disconnect device\n");
+ dev_info(hcd_dev(hcd), "disconnect device\n");
}
static void vhci_device_reset(struct usbip_device *ud)
@@ -1222,7 +1226,7 @@ static int vhci_start(struct usb_hcd *hcd)
id = hcd_name_to_id(hcd_name(hcd));
if (id < 0) {
- pr_err("invalid vhci name %s\n", hcd_name(hcd));
+ dev_err(hcd_dev(hcd), "invalid vhci name %s\n", hcd_name(hcd));
return -EINVAL;
}
@@ -1239,7 +1243,7 @@ static int vhci_start(struct usb_hcd *hcd)
vhci_finish_attr_group();
return err;
}
- pr_info("created sysfs %s\n", hcd_name(hcd));
+ dev_info(hcd_dev(hcd), "created sysfs %s\n", hcd_name(hcd));
}
return 0;
@@ -1372,10 +1376,9 @@ static int vhci_hcd_probe(struct platform_device *pdev)
* Our private data is also allocated automatically.
*/
hcd_hs = usb_create_hcd(&vhci_hc_driver, &pdev->dev, dev_name(&pdev->dev));
- if (!hcd_hs) {
- pr_err("create primary hcd failed\n");
- return -ENOMEM;
- }
+ if (!hcd_hs)
+ return dev_err_probe(&pdev->dev, -ENOMEM, "create primary hcd failed\n");
+
hcd_hs->has_tt = 1;
/*
@@ -1383,22 +1386,21 @@ static int vhci_hcd_probe(struct platform_device *pdev)
* Call the driver's reset() and start() routines.
*/
ret = usb_add_hcd(hcd_hs, 0, 0);
- if (ret != 0) {
- pr_err("usb_add_hcd hs failed %d\n", ret);
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret, "usb_add_hcd hs failed\n");
goto put_usb2_hcd;
}
hcd_ss = usb_create_shared_hcd(&vhci_hc_driver, &pdev->dev,
dev_name(&pdev->dev), hcd_hs);
if (!hcd_ss) {
- ret = -ENOMEM;
- pr_err("create shared hcd failed\n");
+ ret = dev_err_probe(&pdev->dev, -ENOMEM, "create shared hcd failed\n");
goto remove_usb2_hcd;
}
ret = usb_add_hcd(hcd_ss, 0, 0);
if (ret) {
- pr_err("usb_add_hcd ss failed %d\n", ret);
+ dev_err_probe(&pdev->dev, ret, "usb_add_hcd ss failed\n");
goto put_usb3_hcd;
}
diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c
index f3248a3e5402..c1acbc98465d 100644
--- a/fs/9p/vfs_dentry.c
+++ b/fs/9p/vfs_dentry.c
@@ -66,7 +66,6 @@ static int __v9fs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
struct p9_fid *fid;
struct inode *inode;
struct v9fs_inode *v9inode;
- unsigned int cached;
if (flags & LOOKUP_RCU)
return -ECHILD;
@@ -76,11 +75,7 @@ static int __v9fs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
goto out_valid;
v9inode = V9FS_I(inode);
- struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode);
-
- cached = v9ses->cache & (CACHE_META | CACHE_LOOSE);
-
- if (!cached || v9inode->cache_validity & V9FS_INO_INVALID_ATTR) {
+ if (v9inode->cache_validity & V9FS_INO_INVALID_ATTR) {
int retval;
struct v9fs_session_info *v9ses;
@@ -114,6 +109,7 @@ static int __v9fs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
p9_debug(P9_DEBUG_VFS,
"refresh inode: dentry = %pd (%p), got error %pe\n",
dentry, dentry, ERR_PTR(retval));
+ if (retval < 0)
return retval;
}
}
@@ -150,8 +146,6 @@ const struct dentry_operations v9fs_cached_dentry_operations = {
};
const struct dentry_operations v9fs_dentry_operations = {
- .d_revalidate = v9fs_lookup_revalidate,
- .d_weak_revalidate = __v9fs_lookup_revalidate,
.d_release = v9fs_dentry_release,
.d_unalias_trylock = v9fs_dentry_unalias_trylock,
.d_unalias_unlock = v9fs_dentry_unalias_unlock,
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 69f378a83775..d0c77ec31b1d 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -1339,14 +1339,8 @@ int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
* Don't update inode if the file type is different
*/
umode = p9mode2unixmode(v9ses, st, &rdev);
- if (inode_wrong_type(inode, umode)) {
- /*
- * Do this as a way of letting the caller know the inode should not
- * be reused
- */
- v9fs_invalidate_inode_attr(inode);
+ if (inode_wrong_type(inode, umode))
goto out;
- }
/*
* We don't want to refresh inode->i_size,
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index 0b404e8484d2..be297e335468 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -897,14 +897,8 @@ int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode)
/*
* Don't update inode if the file type is different
*/
- if (inode_wrong_type(inode, st->st_mode)) {
- /*
- * Do this as a way of letting the caller know the inode should not
- * be reused
- */
- v9fs_invalidate_inode_attr(inode);
+ if (inode_wrong_type(inode, st->st_mode))
goto out;
- }
/*
* We don't want to refresh inode->i_size,
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 41e37f7f67cc..3df7b9d7fbe8 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -2110,9 +2110,9 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
for (int i = 0; i < count; i++) {
__btrfs_kill_delayed_node(delayed_nodes[i]);
+ btrfs_delayed_node_ref_tracker_dir_print(delayed_nodes[i]);
btrfs_release_delayed_node(delayed_nodes[i],
&delayed_node_trackers[i]);
- btrfs_delayed_node_ref_tracker_dir_print(delayed_nodes[i]);
}
}
}
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
index 0d949edc0caf..b09d4ec8c77d 100644
--- a/fs/btrfs/delayed-inode.h
+++ b/fs/btrfs/delayed-inode.h
@@ -219,6 +219,13 @@ static inline void btrfs_delayed_node_ref_tracker_dir_print(struct btrfs_delayed
if (!btrfs_test_opt(node->root->fs_info, REF_TRACKER))
return;
+ /*
+ * Only print if there are leaked references. The caller is
+ * holding one reference, so if refs == 1 there is no leak.
+ */
+ if (refcount_read(&node->refs) == 1)
+ return;
+
ref_tracker_dir_print(&node->ref_dir.dir,
BTRFS_DELAYED_NODE_REF_TRACKER_DISPLAY_LIMIT);
}
diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
index de4cb0f3fbd0..e9224145d754 100644
--- a/fs/btrfs/ref-verify.c
+++ b/fs/btrfs/ref-verify.c
@@ -982,7 +982,7 @@ int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info)
extent_root = btrfs_extent_root(fs_info, 0);
/* If the extent tree is damaged we cannot ignore it (IGNOREBADROOTS). */
- if (IS_ERR(extent_root)) {
+ if (!extent_root) {
btrfs_warn(fs_info, "ref-verify: extent tree not available, disabling");
btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
return 0;
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 6144e66661f5..96a030d28e09 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -4102,6 +4102,48 @@ out:
return ret;
}
+static int rbtree_check_dir_ref_comp(const void *k, const struct rb_node *node)
+{
+ const struct recorded_ref *data = k;
+ const struct recorded_ref *ref = rb_entry(node, struct recorded_ref, node);
+
+ if (data->dir > ref->dir)
+ return 1;
+ if (data->dir < ref->dir)
+ return -1;
+ if (data->dir_gen > ref->dir_gen)
+ return 1;
+ if (data->dir_gen < ref->dir_gen)
+ return -1;
+ return 0;
+}
+
+static bool rbtree_check_dir_ref_less(struct rb_node *node, const struct rb_node *parent)
+{
+ const struct recorded_ref *entry = rb_entry(node, struct recorded_ref, node);
+
+ return rbtree_check_dir_ref_comp(entry, parent) < 0;
+}
+
+static int record_check_dir_ref_in_tree(struct rb_root *root,
+ struct recorded_ref *ref, struct list_head *list)
+{
+ struct recorded_ref *tmp_ref;
+ int ret;
+
+ if (rb_find(ref, root, rbtree_check_dir_ref_comp))
+ return 0;
+
+ ret = dup_ref(ref, list);
+ if (ret < 0)
+ return ret;
+
+ tmp_ref = list_last_entry(list, struct recorded_ref, list);
+ rb_add(&tmp_ref->node, root, rbtree_check_dir_ref_less);
+ tmp_ref->root = root;
+ return 0;
+}
+
static int rename_current_inode(struct send_ctx *sctx,
struct fs_path *current_path,
struct fs_path *new_path)
@@ -4129,11 +4171,11 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
struct recorded_ref *cur;
struct recorded_ref *cur2;
LIST_HEAD(check_dirs);
+ struct rb_root rbtree_check_dirs = RB_ROOT;
struct fs_path *valid_path = NULL;
u64 ow_inode = 0;
u64 ow_gen;
u64 ow_mode;
- u64 last_dir_ino_rm = 0;
bool did_overwrite = false;
bool is_orphan = false;
bool can_rename = true;
@@ -4437,7 +4479,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
goto out;
}
}
- ret = dup_ref(cur, &check_dirs);
+ ret = record_check_dir_ref_in_tree(&rbtree_check_dirs, cur, &check_dirs);
if (ret < 0)
goto out;
}
@@ -4465,7 +4507,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
}
list_for_each_entry(cur, &sctx->deleted_refs, list) {
- ret = dup_ref(cur, &check_dirs);
+ ret = record_check_dir_ref_in_tree(&rbtree_check_dirs, cur, &check_dirs);
if (ret < 0)
goto out;
}
@@ -4475,7 +4517,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
* We have a moved dir. Add the old parent to check_dirs
*/
cur = list_first_entry(&sctx->deleted_refs, struct recorded_ref, list);
- ret = dup_ref(cur, &check_dirs);
+ ret = record_check_dir_ref_in_tree(&rbtree_check_dirs, cur, &check_dirs);
if (ret < 0)
goto out;
} else if (!S_ISDIR(sctx->cur_inode_mode)) {
@@ -4509,7 +4551,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
if (is_current_inode_path(sctx, cur->full_path))
fs_path_reset(&sctx->cur_inode_path);
}
- ret = dup_ref(cur, &check_dirs);
+ ret = record_check_dir_ref_in_tree(&rbtree_check_dirs, cur, &check_dirs);
if (ret < 0)
goto out;
}
@@ -4552,8 +4594,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
ret = cache_dir_utimes(sctx, cur->dir, cur->dir_gen);
if (ret < 0)
goto out;
- } else if (ret == inode_state_did_delete &&
- cur->dir != last_dir_ino_rm) {
+ } else if (ret == inode_state_did_delete) {
ret = can_rmdir(sctx, cur->dir, cur->dir_gen);
if (ret < 0)
goto out;
@@ -4565,7 +4606,6 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
ret = send_rmdir(sctx, valid_path);
if (ret < 0)
goto out;
- last_dir_ino_rm = cur->dir;
}
}
}
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index aadc02374b2a..430e7419349c 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -2068,7 +2068,13 @@ static int btrfs_get_tree_subvol(struct fs_context *fc)
fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL);
fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL);
if (!fs_info->super_copy || !fs_info->super_for_commit) {
- btrfs_free_fs_info(fs_info);
+ /*
+ * Dont call btrfs_free_fs_info() to free it as it's still
+ * initialized partially.
+ */
+ kfree(fs_info->super_copy);
+ kfree(fs_info->super_for_commit);
+ kvfree(fs_info);
return -ENOMEM;
}
btrfs_init_fs_info(fs_info);
diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
index e5581dbeb4c2..c8d8e129eb4b 100644
--- a/fs/erofs/zmap.c
+++ b/fs/erofs/zmap.c
@@ -55,10 +55,6 @@ static int z_erofs_load_full_lcluster(struct z_erofs_maprecorder *m,
} else {
m->partialref = !!(advise & Z_EROFS_LI_PARTIAL_REF);
m->clusterofs = le16_to_cpu(di->di_clusterofs);
- if (m->clusterofs >= 1 << vi->z_lclusterbits) {
- DBG_BUGON(1);
- return -EFSCORRUPTED;
- }
m->pblk = le32_to_cpu(di->di_u.blkaddr);
}
return 0;
@@ -240,21 +236,29 @@ static int z_erofs_load_compact_lcluster(struct z_erofs_maprecorder *m,
static int z_erofs_load_lcluster_from_disk(struct z_erofs_maprecorder *m,
unsigned int lcn, bool lookahead)
{
+ struct erofs_inode *vi = EROFS_I(m->inode);
+ int err;
+
+ if (vi->datalayout == EROFS_INODE_COMPRESSED_COMPACT) {
+ err = z_erofs_load_compact_lcluster(m, lcn, lookahead);
+ } else {
+ DBG_BUGON(vi->datalayout != EROFS_INODE_COMPRESSED_FULL);
+ err = z_erofs_load_full_lcluster(m, lcn);
+ }
+ if (err)
+ return err;
+
if (m->type >= Z_EROFS_LCLUSTER_TYPE_MAX) {
erofs_err(m->inode->i_sb, "unknown type %u @ lcn %u of nid %llu",
- m->type, lcn, EROFS_I(m->inode)->nid);
+ m->type, lcn, EROFS_I(m->inode)->nid);
DBG_BUGON(1);
return -EOPNOTSUPP;
+ } else if (m->type != Z_EROFS_LCLUSTER_TYPE_NONHEAD &&
+ m->clusterofs >= (1 << vi->z_lclusterbits)) {
+ DBG_BUGON(1);
+ return -EFSCORRUPTED;
}
-
- switch (EROFS_I(m->inode)->datalayout) {
- case EROFS_INODE_COMPRESSED_FULL:
- return z_erofs_load_full_lcluster(m, lcn);
- case EROFS_INODE_COMPRESSED_COMPACT:
- return z_erofs_load_compact_lcluster(m, lcn, lookahead);
- default:
- return -EINVAL;
- }
+ return 0;
}
static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m,
@@ -268,20 +272,19 @@ static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m,
unsigned long lcn = m->lcn - lookback_distance;
int err;
+ if (!lookback_distance)
+ break;
+
err = z_erofs_load_lcluster_from_disk(m, lcn, false);
if (err)
return err;
-
if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
lookback_distance = m->delta[0];
- if (!lookback_distance)
- break;
continue;
- } else {
- m->headtype = m->type;
- m->map->m_la = (lcn << lclusterbits) | m->clusterofs;
- return 0;
}
+ m->headtype = m->type;
+ m->map->m_la = (lcn << lclusterbits) | m->clusterofs;
+ return 0;
}
erofs_err(sb, "bogus lookback distance %u @ lcn %lu of nid %llu",
lookback_distance, m->lcn, vi->nid);
@@ -431,13 +434,6 @@ static int z_erofs_map_blocks_fo(struct inode *inode,
end = inode->i_size;
} else {
if (m.type != Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
- /* m.lcn should be >= 1 if endoff < m.clusterofs */
- if (!m.lcn) {
- erofs_err(sb, "invalid logical cluster 0 at nid %llu",
- vi->nid);
- err = -EFSCORRUPTED;
- goto unmap_out;
- }
end = (m.lcn << lclusterbits) | m.clusterofs;
map->m_flags |= EROFS_MAP_FULL_MAPPED;
m.delta[0] = 1;
@@ -596,7 +592,7 @@ static int z_erofs_map_blocks_ext(struct inode *inode,
vi->z_fragmentoff = map->m_plen;
if (recsz > offsetof(struct z_erofs_extent, pstart_lo))
vi->z_fragmentoff |= map->m_pa << 32;
- } else if (map->m_plen) {
+ } else if (map->m_plen & Z_EROFS_EXTENT_PLEN_MASK) {
map->m_flags |= EROFS_MAP_MAPPED |
EROFS_MAP_FULL_MAPPED | EROFS_MAP_ENCODED;
fmt = map->m_plen >> Z_EROFS_EXTENT_PLEN_FMT_BIT;
@@ -715,6 +711,7 @@ static int z_erofs_map_sanity_check(struct inode *inode,
struct erofs_map_blocks *map)
{
struct erofs_sb_info *sbi = EROFS_I_SB(inode);
+ u64 pend;
if (!(map->m_flags & EROFS_MAP_ENCODED))
return 0;
@@ -732,6 +729,10 @@ static int z_erofs_map_sanity_check(struct inode *inode,
if (unlikely(map->m_plen > Z_EROFS_PCLUSTER_MAX_SIZE ||
map->m_llen > Z_EROFS_PCLUSTER_MAX_DSIZE))
return -EOPNOTSUPP;
+ /* Filesystems beyond 48-bit physical block addresses are invalid */
+ if (unlikely(check_add_overflow(map->m_pa, map->m_plen, &pend) ||
+ (pend >> sbi->blkszbits) >= BIT_ULL(48)))
+ return -EFSCORRUPTED;
return 0;
}
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 9c94ed8c3ab0..f42548ee9083 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -478,14 +478,6 @@ hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end,
if (!hugetlb_vma_trylock_write(vma))
continue;
- /*
- * Skip VMAs without shareable locks. Per the design in commit
- * 40549ba8f8e0, these will be handled by remove_inode_hugepages()
- * called after this function with proper locking.
- */
- if (!__vma_shareable_lock(vma))
- goto skip;
-
v_start = vma_offset_start(vma, start);
v_end = vma_offset_end(vma, end);
@@ -496,7 +488,6 @@ hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end,
* vmas. Therefore, lock is not held when calling
* unmap_hugepage_range for private vmas.
*/
-skip:
hugetlb_vma_unlock_write(vma);
}
}
diff --git a/fs/notify/fdinfo.c b/fs/notify/fdinfo.c
index 1161eabf11ee..9cc7eb863643 100644
--- a/fs/notify/fdinfo.c
+++ b/fs/notify/fdinfo.c
@@ -17,6 +17,7 @@
#include "fanotify/fanotify.h"
#include "fdinfo.h"
#include "fsnotify.h"
+#include "../internal.h"
#if defined(CONFIG_PROC_FS)
@@ -46,7 +47,12 @@ static void show_mark_fhandle(struct seq_file *m, struct inode *inode)
size = f->handle_bytes >> 2;
+ if (!super_trylock_shared(inode->i_sb))
+ return;
+
ret = exportfs_encode_fid(inode, (struct fid *)f->f_handle, &size);
+ up_read(&inode->i_sb->s_umount);
+
if ((ret == FILEID_INVALID) || (ret < 0))
return;
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
index 86f2631e6360..10923bf7c8b8 100644
--- a/fs/ocfs2/move_extents.c
+++ b/fs/ocfs2/move_extents.c
@@ -867,6 +867,11 @@ static int __ocfs2_move_extents_range(struct buffer_head *di_bh,
mlog_errno(ret);
goto out;
}
+ /*
+ * Invalidate extent cache after moving/defragging to prevent
+ * stale cached data with outdated extent flags.
+ */
+ ocfs2_extent_map_trunc(inode, cpos);
context->clusters_moved += alloc_size;
next:
diff --git a/fs/resctrl/monitor.c b/fs/resctrl/monitor.c
index 4076336fbba6..572a9925bd6c 100644
--- a/fs/resctrl/monitor.c
+++ b/fs/resctrl/monitor.c
@@ -1782,15 +1782,13 @@ int resctrl_mon_resource_init(void)
mba_mbps_default_event = QOS_L3_MBM_TOTAL_EVENT_ID;
if (r->mon.mbm_cntr_assignable) {
- if (!resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID))
- resctrl_enable_mon_event(QOS_L3_MBM_TOTAL_EVENT_ID);
- if (!resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID))
- resctrl_enable_mon_event(QOS_L3_MBM_LOCAL_EVENT_ID);
- mon_event_all[QOS_L3_MBM_TOTAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask;
- mon_event_all[QOS_L3_MBM_LOCAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask &
- (READS_TO_LOCAL_MEM |
- READS_TO_LOCAL_S_MEM |
- NON_TEMP_WRITE_TO_LOCAL_MEM);
+ if (resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID))
+ mon_event_all[QOS_L3_MBM_TOTAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask;
+ if (resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID))
+ mon_event_all[QOS_L3_MBM_LOCAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask &
+ (READS_TO_LOCAL_MEM |
+ READS_TO_LOCAL_S_MEM |
+ NON_TEMP_WRITE_TO_LOCAL_MEM);
r->mon.mbm_assign_on_mkdir = true;
resctrl_file_fflags_init("num_mbm_cntrs",
RFTYPE_MON_INFO | RFTYPE_RES_CACHE);
diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
index 16a00a61fd2c..203e2aaa3c25 100644
--- a/fs/smb/client/cifsglob.h
+++ b/fs/smb/client/cifsglob.h
@@ -534,8 +534,6 @@ struct smb_version_operations {
void (*new_lease_key)(struct cifs_fid *);
int (*generate_signingkey)(struct cifs_ses *ses,
struct TCP_Server_Info *server);
- int (*calc_signature)(struct smb_rqst *, struct TCP_Server_Info *,
- bool allocate_crypto);
int (*set_integrity)(const unsigned int, struct cifs_tcon *tcon,
struct cifsFileInfo *src_file);
int (*enum_snapshots)(const unsigned int xid, struct cifs_tcon *tcon,
@@ -732,7 +730,7 @@ struct TCP_Server_Info {
bool nosharesock;
bool tcp_nodelay;
bool terminate;
- unsigned int credits; /* send no more requests at once */
+ int credits; /* send no more requests at once */
unsigned int max_credits; /* can override large 32000 default at mnt */
unsigned int in_flight; /* number of requests on the wire to server */
unsigned int max_in_flight; /* max number of requests that were on wire */
diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
index 4976be2c47c1..fb1813cbe0eb 100644
--- a/fs/smb/client/cifsproto.h
+++ b/fs/smb/client/cifsproto.h
@@ -9,6 +9,7 @@
#define _CIFSPROTO_H
#include <linux/nls.h>
#include <linux/ctype.h>
+#include "cifsglob.h"
#include "trace.h"
#ifdef CONFIG_CIFS_DFS_UPCALL
#include "dfs_cache.h"
diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c
index 2881efcbe09a..7da194f29fef 100644
--- a/fs/smb/client/cifssmb.c
+++ b/fs/smb/client/cifssmb.c
@@ -1311,6 +1311,8 @@ cifs_readv_callback(struct mid_q_entry *mid)
.rreq_debug_id = rdata->rreq->debug_id,
.rreq_debug_index = rdata->subreq.debug_index,
};
+ unsigned int rreq_debug_id = rdata->rreq->debug_id;
+ unsigned int subreq_debug_index = rdata->subreq.debug_index;
cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%zu\n",
__func__, mid->mid, mid->mid_state, rdata->result,
@@ -1374,6 +1376,9 @@ do_retry:
__set_bit(NETFS_SREQ_MADE_PROGRESS, &rdata->subreq.flags);
}
+ trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, rdata->credits.value,
+ server->credits, server->in_flight,
+ 0, cifs_trace_rw_credits_read_response_clear);
rdata->credits.value = 0;
rdata->subreq.error = rdata->result;
rdata->subreq.transferred += rdata->got_bytes;
@@ -1381,6 +1386,9 @@ do_retry:
netfs_read_subreq_terminated(&rdata->subreq);
release_mid(mid);
add_credits(server, &credits, 0);
+ trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, 0,
+ server->credits, server->in_flight,
+ credits.value, cifs_trace_rw_credits_read_response_add);
}
/* cifs_async_readv - send an async write, and set up mid to handle result */
diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
index 098a79b7a959..cac355364e43 100644
--- a/fs/smb/client/inode.c
+++ b/fs/smb/client/inode.c
@@ -2484,11 +2484,8 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry,
}
#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
do_rename_exit:
- if (rc == 0) {
+ if (rc == 0)
d_move(from_dentry, to_dentry);
- /* Force a new lookup */
- d_drop(from_dentry);
- }
cifs_put_tlink(tlink);
return rc;
}
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index 95cd484cfbba..0f9130ef2e7d 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -5446,7 +5446,6 @@ struct smb_version_operations smb20_operations = {
.get_lease_key = smb2_get_lease_key,
.set_lease_key = smb2_set_lease_key,
.new_lease_key = smb2_new_lease_key,
- .calc_signature = smb2_calc_signature,
.is_read_op = smb2_is_read_op,
.set_oplock_level = smb2_set_oplock_level,
.create_lease_buf = smb2_create_lease_buf,
@@ -5550,7 +5549,6 @@ struct smb_version_operations smb21_operations = {
.get_lease_key = smb2_get_lease_key,
.set_lease_key = smb2_set_lease_key,
.new_lease_key = smb2_new_lease_key,
- .calc_signature = smb2_calc_signature,
.is_read_op = smb21_is_read_op,
.set_oplock_level = smb21_set_oplock_level,
.create_lease_buf = smb2_create_lease_buf,
@@ -5660,7 +5658,6 @@ struct smb_version_operations smb30_operations = {
.set_lease_key = smb2_set_lease_key,
.new_lease_key = smb2_new_lease_key,
.generate_signingkey = generate_smb30signingkey,
- .calc_signature = smb3_calc_signature,
.set_integrity = smb3_set_integrity,
.is_read_op = smb21_is_read_op,
.set_oplock_level = smb3_set_oplock_level,
@@ -5777,7 +5774,6 @@ struct smb_version_operations smb311_operations = {
.set_lease_key = smb2_set_lease_key,
.new_lease_key = smb2_new_lease_key,
.generate_signingkey = generate_smb311signingkey,
- .calc_signature = smb3_calc_signature,
.set_integrity = smb3_set_integrity,
.is_read_op = smb21_is_read_op,
.set_oplock_level = smb3_set_oplock_level,
diff --git a/fs/smb/client/smb2proto.h b/fs/smb/client/smb2proto.h
index 6eb86d134abc..5241daaae543 100644
--- a/fs/smb/client/smb2proto.h
+++ b/fs/smb/client/smb2proto.h
@@ -39,12 +39,6 @@ extern struct mid_q_entry *smb2_setup_async_request(
struct TCP_Server_Info *server, struct smb_rqst *rqst);
extern struct cifs_tcon *smb2_find_smb_tcon(struct TCP_Server_Info *server,
__u64 ses_id, __u32 tid);
-extern int smb2_calc_signature(struct smb_rqst *rqst,
- struct TCP_Server_Info *server,
- bool allocate_crypto);
-extern int smb3_calc_signature(struct smb_rqst *rqst,
- struct TCP_Server_Info *server,
- bool allocate_crypto);
extern void smb2_echo_request(struct work_struct *work);
extern __le32 smb2_get_lease_state(struct cifsInodeInfo *cinode);
extern bool smb2_is_valid_oplock_break(char *buffer,
diff --git a/fs/smb/client/smb2transport.c b/fs/smb/client/smb2transport.c
index ad6068e17a2a..6a9b80385b86 100644
--- a/fs/smb/client/smb2transport.c
+++ b/fs/smb/client/smb2transport.c
@@ -209,9 +209,9 @@ smb2_find_smb_tcon(struct TCP_Server_Info *server, __u64 ses_id, __u32 tid)
return tcon;
}
-int
+static int
smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server,
- bool allocate_crypto)
+ bool allocate_crypto)
{
int rc;
unsigned char smb2_signature[SMB2_HMACSHA256_SIZE];
@@ -465,9 +465,9 @@ generate_smb311signingkey(struct cifs_ses *ses,
return generate_smb3signingkey(ses, server, &triplet);
}
-int
+static int
smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server,
- bool allocate_crypto)
+ bool allocate_crypto)
{
int rc;
unsigned char smb3_signature[SMB2_CMACAES_SIZE];
@@ -477,6 +477,9 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server,
struct smb_rqst drqst;
u8 key[SMB3_SIGN_KEY_SIZE];
+ if (server->vals->protocol_id <= SMB21_PROT_ID)
+ return smb2_calc_signature(rqst, server, allocate_crypto);
+
rc = smb3_get_sign_key(le64_to_cpu(shdr->SessionId), server, key);
if (unlikely(rc)) {
cifs_server_dbg(FYI, "%s: Could not get signing key\n", __func__);
@@ -547,7 +550,6 @@ out:
static int
smb2_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server)
{
- int rc = 0;
struct smb2_hdr *shdr;
struct smb2_sess_setup_req *ssr;
bool is_binding;
@@ -574,9 +576,7 @@ smb2_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server)
return 0;
}
- rc = server->ops->calc_signature(rqst, server, false);
-
- return rc;
+ return smb3_calc_signature(rqst, server, false);
}
int
@@ -612,7 +612,7 @@ smb2_verify_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
memset(shdr->Signature, 0, SMB2_SIGNATURE_SIZE);
- rc = server->ops->calc_signature(rqst, server, true);
+ rc = smb3_calc_signature(rqst, server, true);
if (rc)
return rc;
diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
index 49e2df3ad1f0..85a4c55b61b8 100644
--- a/fs/smb/client/smbdirect.c
+++ b/fs/smb/client/smbdirect.c
@@ -172,6 +172,7 @@ static void smbd_disconnect_wake_up_all(struct smbdirect_socket *sc)
* in order to notice the broken connection.
*/
wake_up_all(&sc->status_wait);
+ wake_up_all(&sc->send_io.lcredits.wait_queue);
wake_up_all(&sc->send_io.credits.wait_queue);
wake_up_all(&sc->send_io.pending.dec_wait_queue);
wake_up_all(&sc->send_io.pending.zero_wait_queue);
@@ -495,6 +496,7 @@ static void send_done(struct ib_cq *cq, struct ib_wc *wc)
struct smbdirect_send_io *request =
container_of(wc->wr_cqe, struct smbdirect_send_io, cqe);
struct smbdirect_socket *sc = request->socket;
+ int lcredits = 0;
log_rdma_send(INFO, "smbdirect_send_io 0x%p completed wc->status=%s\n",
request, ib_wc_status_msg(wc->status));
@@ -504,22 +506,24 @@ static void send_done(struct ib_cq *cq, struct ib_wc *wc)
request->sge[i].addr,
request->sge[i].length,
DMA_TO_DEVICE);
+ mempool_free(request, sc->send_io.mem.pool);
+ lcredits += 1;
if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) {
if (wc->status != IB_WC_WR_FLUSH_ERR)
log_rdma_send(ERR, "wc->status=%s wc->opcode=%d\n",
ib_wc_status_msg(wc->status), wc->opcode);
- mempool_free(request, sc->send_io.mem.pool);
smbd_disconnect_rdma_connection(sc);
return;
}
+ atomic_add(lcredits, &sc->send_io.lcredits.count);
+ wake_up(&sc->send_io.lcredits.wait_queue);
+
if (atomic_dec_and_test(&sc->send_io.pending.count))
wake_up(&sc->send_io.pending.zero_wait_queue);
wake_up(&sc->send_io.pending.dec_wait_queue);
-
- mempool_free(request, sc->send_io.mem.pool);
}
static void dump_smbdirect_negotiate_resp(struct smbdirect_negotiate_resp *resp)
@@ -567,6 +571,7 @@ static bool process_negotiation_response(
log_rdma_event(ERR, "error: credits_granted==0\n");
return false;
}
+ atomic_set(&sc->send_io.lcredits.count, sp->send_credit_target);
atomic_set(&sc->send_io.credits.count, le16_to_cpu(packet->credits_granted));
if (le32_to_cpu(packet->preferred_send_size) > sp->max_recv_size) {
@@ -1114,6 +1119,24 @@ static int smbd_post_send_iter(struct smbdirect_socket *sc,
struct smbdirect_data_transfer *packet;
int new_credits = 0;
+wait_lcredit:
+ /* Wait for local send credits */
+ rc = wait_event_interruptible(sc->send_io.lcredits.wait_queue,
+ atomic_read(&sc->send_io.lcredits.count) > 0 ||
+ sc->status != SMBDIRECT_SOCKET_CONNECTED);
+ if (rc)
+ goto err_wait_lcredit;
+
+ if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
+ log_outgoing(ERR, "disconnected not sending on wait_credit\n");
+ rc = -EAGAIN;
+ goto err_wait_lcredit;
+ }
+ if (unlikely(atomic_dec_return(&sc->send_io.lcredits.count) < 0)) {
+ atomic_inc(&sc->send_io.lcredits.count);
+ goto wait_lcredit;
+ }
+
wait_credit:
/* Wait for send credits. A SMBD packet needs one credit */
rc = wait_event_interruptible(sc->send_io.credits.wait_queue,
@@ -1132,23 +1155,6 @@ wait_credit:
goto wait_credit;
}
-wait_send_queue:
- wait_event(sc->send_io.pending.dec_wait_queue,
- atomic_read(&sc->send_io.pending.count) < sp->send_credit_target ||
- sc->status != SMBDIRECT_SOCKET_CONNECTED);
-
- if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
- log_outgoing(ERR, "disconnected not sending on wait_send_queue\n");
- rc = -EAGAIN;
- goto err_wait_send_queue;
- }
-
- if (unlikely(atomic_inc_return(&sc->send_io.pending.count) >
- sp->send_credit_target)) {
- atomic_dec(&sc->send_io.pending.count);
- goto wait_send_queue;
- }
-
request = mempool_alloc(sc->send_io.mem.pool, GFP_KERNEL);
if (!request) {
rc = -ENOMEM;
@@ -1229,10 +1235,21 @@ wait_send_queue:
le32_to_cpu(packet->data_length),
le32_to_cpu(packet->remaining_data_length));
+ /*
+ * Now that we got a local and a remote credit
+ * we add us as pending
+ */
+ atomic_inc(&sc->send_io.pending.count);
+
rc = smbd_post_send(sc, request);
if (!rc)
return 0;
+ if (atomic_dec_and_test(&sc->send_io.pending.count))
+ wake_up(&sc->send_io.pending.zero_wait_queue);
+
+ wake_up(&sc->send_io.pending.dec_wait_queue);
+
err_dma:
for (i = 0; i < request->num_sge; i++)
if (request->sge[i].addr)
@@ -1246,14 +1263,14 @@ err_dma:
atomic_sub(new_credits, &sc->recv_io.credits.count);
err_alloc:
- if (atomic_dec_and_test(&sc->send_io.pending.count))
- wake_up(&sc->send_io.pending.zero_wait_queue);
-
-err_wait_send_queue:
- /* roll back send credits and pending */
atomic_inc(&sc->send_io.credits.count);
+ wake_up(&sc->send_io.credits.wait_queue);
err_wait_credit:
+ atomic_inc(&sc->send_io.lcredits.count);
+ wake_up(&sc->send_io.lcredits.wait_queue);
+
+err_wait_lcredit:
return rc;
}
@@ -1767,6 +1784,7 @@ static struct smbd_connection *_smbd_get_connection(
struct smbdirect_socket *sc;
struct smbdirect_socket_parameters *sp;
struct rdma_conn_param conn_param;
+ struct ib_qp_cap qp_cap;
struct ib_qp_init_attr qp_attr;
struct sockaddr_in *addr_in = (struct sockaddr_in *) dstaddr;
struct ib_port_immutable port_immutable;
@@ -1838,6 +1856,25 @@ static struct smbd_connection *_smbd_get_connection(
goto config_failed;
}
+ sp->responder_resources =
+ min_t(u8, sp->responder_resources,
+ sc->ib.dev->attrs.max_qp_rd_atom);
+ log_rdma_mr(INFO, "responder_resources=%d\n",
+ sp->responder_resources);
+
+ /*
+ * We use allocate sp->responder_resources * 2 MRs
+ * and each MR needs WRs for REG and INV, so
+ * we use '* 4'.
+ *
+ * +1 for ib_drain_qp()
+ */
+ memset(&qp_cap, 0, sizeof(qp_cap));
+ qp_cap.max_send_wr = sp->send_credit_target + sp->responder_resources * 4 + 1;
+ qp_cap.max_recv_wr = sp->recv_credit_max + 1;
+ qp_cap.max_send_sge = SMBDIRECT_SEND_IO_MAX_SGE;
+ qp_cap.max_recv_sge = SMBDIRECT_RECV_IO_MAX_SGE;
+
sc->ib.pd = ib_alloc_pd(sc->ib.dev, 0);
if (IS_ERR(sc->ib.pd)) {
rc = PTR_ERR(sc->ib.pd);
@@ -1848,7 +1885,7 @@ static struct smbd_connection *_smbd_get_connection(
sc->ib.send_cq =
ib_alloc_cq_any(sc->ib.dev, sc,
- sp->send_credit_target, IB_POLL_SOFTIRQ);
+ qp_cap.max_send_wr, IB_POLL_SOFTIRQ);
if (IS_ERR(sc->ib.send_cq)) {
sc->ib.send_cq = NULL;
goto alloc_cq_failed;
@@ -1856,7 +1893,7 @@ static struct smbd_connection *_smbd_get_connection(
sc->ib.recv_cq =
ib_alloc_cq_any(sc->ib.dev, sc,
- sp->recv_credit_max, IB_POLL_SOFTIRQ);
+ qp_cap.max_recv_wr, IB_POLL_SOFTIRQ);
if (IS_ERR(sc->ib.recv_cq)) {
sc->ib.recv_cq = NULL;
goto alloc_cq_failed;
@@ -1865,11 +1902,7 @@ static struct smbd_connection *_smbd_get_connection(
memset(&qp_attr, 0, sizeof(qp_attr));
qp_attr.event_handler = smbd_qp_async_error_upcall;
qp_attr.qp_context = sc;
- qp_attr.cap.max_send_wr = sp->send_credit_target;
- qp_attr.cap.max_recv_wr = sp->recv_credit_max;
- qp_attr.cap.max_send_sge = SMBDIRECT_SEND_IO_MAX_SGE;
- qp_attr.cap.max_recv_sge = SMBDIRECT_RECV_IO_MAX_SGE;
- qp_attr.cap.max_inline_data = 0;
+ qp_attr.cap = qp_cap;
qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
qp_attr.qp_type = IB_QPT_RC;
qp_attr.send_cq = sc->ib.send_cq;
@@ -1883,12 +1916,6 @@ static struct smbd_connection *_smbd_get_connection(
}
sc->ib.qp = sc->rdma.cm_id->qp;
- sp->responder_resources =
- min_t(u8, sp->responder_resources,
- sc->ib.dev->attrs.max_qp_rd_atom);
- log_rdma_mr(INFO, "responder_resources=%d\n",
- sp->responder_resources);
-
memset(&conn_param, 0, sizeof(conn_param));
conn_param.initiator_depth = sp->initiator_depth;
conn_param.responder_resources = sp->responder_resources;
diff --git a/fs/smb/client/trace.c b/fs/smb/client/trace.c
index 465483787193..16b0e719731f 100644
--- a/fs/smb/client/trace.c
+++ b/fs/smb/client/trace.c
@@ -4,5 +4,6 @@
*
* Author(s): Steve French <stfrench@microsoft.com>
*/
+#include "cifsglob.h"
#define CREATE_TRACE_POINTS
#include "trace.h"
diff --git a/fs/smb/common/smbdirect/smbdirect_socket.h b/fs/smb/common/smbdirect/smbdirect_socket.h
index 361db7f9f623..ee5a90d691c8 100644
--- a/fs/smb/common/smbdirect/smbdirect_socket.h
+++ b/fs/smb/common/smbdirect/smbdirect_socket.h
@@ -142,7 +142,15 @@ struct smbdirect_socket {
} mem;
/*
- * The credit state for the send side
+ * The local credit state for ib_post_send()
+ */
+ struct {
+ atomic_t count;
+ wait_queue_head_t wait_queue;
+ } lcredits;
+
+ /*
+ * The remote credit state for the send side
*/
struct {
atomic_t count;
@@ -337,6 +345,9 @@ static __always_inline void smbdirect_socket_init(struct smbdirect_socket *sc)
INIT_DELAYED_WORK(&sc->idle.timer_work, __smbdirect_socket_disabled_work);
disable_delayed_work_sync(&sc->idle.timer_work);
+ atomic_set(&sc->send_io.lcredits.count, 0);
+ init_waitqueue_head(&sc->send_io.lcredits.wait_queue);
+
atomic_set(&sc->send_io.credits.count, 0);
init_waitqueue_head(&sc->send_io.credits.wait_queue);
diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c
index a201c5871a77..89b02efdba0c 100644
--- a/fs/smb/server/transport_rdma.c
+++ b/fs/smb/server/transport_rdma.c
@@ -219,6 +219,7 @@ static void smb_direct_disconnect_wake_up_all(struct smbdirect_socket *sc)
* in order to notice the broken connection.
*/
wake_up_all(&sc->status_wait);
+ wake_up_all(&sc->send_io.lcredits.wait_queue);
wake_up_all(&sc->send_io.credits.wait_queue);
wake_up_all(&sc->send_io.pending.zero_wait_queue);
wake_up_all(&sc->recv_io.reassembly.wait_queue);
@@ -450,11 +451,10 @@ static void free_transport(struct smb_direct_transport *t)
struct smbdirect_recv_io *recvmsg;
disable_work_sync(&sc->disconnect_work);
- if (sc->status < SMBDIRECT_SOCKET_DISCONNECTING) {
+ if (sc->status < SMBDIRECT_SOCKET_DISCONNECTING)
smb_direct_disconnect_rdma_work(&sc->disconnect_work);
- wait_event_interruptible(sc->status_wait,
- sc->status == SMBDIRECT_SOCKET_DISCONNECTED);
- }
+ if (sc->status < SMBDIRECT_SOCKET_DISCONNECTED)
+ wait_event(sc->status_wait, sc->status == SMBDIRECT_SOCKET_DISCONNECTED);
/*
* Wake up all waiters in all wait queues
@@ -471,7 +471,6 @@ static void free_transport(struct smb_direct_transport *t)
if (sc->ib.qp) {
ib_drain_qp(sc->ib.qp);
- ib_mr_pool_destroy(sc->ib.qp, &sc->ib.qp->rdma_mrs);
sc->ib.qp = NULL;
rdma_destroy_qp(sc->rdma.cm_id);
}
@@ -524,6 +523,12 @@ static void smb_direct_free_sendmsg(struct smbdirect_socket *sc,
{
int i;
+ /*
+ * The list needs to be empty!
+ * The caller should take care of it.
+ */
+ WARN_ON_ONCE(!list_empty(&msg->sibling_list));
+
if (msg->num_sge > 0) {
ib_dma_unmap_single(sc->ib.dev,
msg->sge[0].addr, msg->sge[0].length,
@@ -909,9 +914,9 @@ static void smb_direct_post_recv_credits(struct work_struct *work)
static void send_done(struct ib_cq *cq, struct ib_wc *wc)
{
- struct smbdirect_send_io *sendmsg, *sibling;
+ struct smbdirect_send_io *sendmsg, *sibling, *next;
struct smbdirect_socket *sc;
- struct list_head *pos, *prev, *end;
+ int lcredits = 0;
sendmsg = container_of(wc->wr_cqe, struct smbdirect_send_io, cqe);
sc = sendmsg->socket;
@@ -920,27 +925,31 @@ static void send_done(struct ib_cq *cq, struct ib_wc *wc)
ib_wc_status_msg(wc->status), wc->status,
wc->opcode);
+ /*
+ * Free possible siblings and then the main send_io
+ */
+ list_for_each_entry_safe(sibling, next, &sendmsg->sibling_list, sibling_list) {
+ list_del_init(&sibling->sibling_list);
+ smb_direct_free_sendmsg(sc, sibling);
+ lcredits += 1;
+ }
+ /* Note this frees wc->wr_cqe, but not wc */
+ smb_direct_free_sendmsg(sc, sendmsg);
+ lcredits += 1;
+
if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) {
pr_err("Send error. status='%s (%d)', opcode=%d\n",
ib_wc_status_msg(wc->status), wc->status,
wc->opcode);
smb_direct_disconnect_rdma_connection(sc);
+ return;
}
+ atomic_add(lcredits, &sc->send_io.lcredits.count);
+ wake_up(&sc->send_io.lcredits.wait_queue);
+
if (atomic_dec_and_test(&sc->send_io.pending.count))
wake_up(&sc->send_io.pending.zero_wait_queue);
-
- /* iterate and free the list of messages in reverse. the list's head
- * is invalid.
- */
- for (pos = &sendmsg->sibling_list, prev = pos->prev, end = sendmsg->sibling_list.next;
- prev != end; pos = prev, prev = prev->prev) {
- sibling = container_of(pos, struct smbdirect_send_io, sibling_list);
- smb_direct_free_sendmsg(sc, sibling);
- }
-
- sibling = container_of(pos, struct smbdirect_send_io, sibling_list);
- smb_direct_free_sendmsg(sc, sibling);
}
static int manage_credits_prior_sending(struct smbdirect_socket *sc)
@@ -988,8 +997,6 @@ static int smb_direct_post_send(struct smbdirect_socket *sc,
ret = ib_post_send(sc->ib.qp, wr, NULL);
if (ret) {
pr_err("failed to post send: %d\n", ret);
- if (atomic_dec_and_test(&sc->send_io.pending.count))
- wake_up(&sc->send_io.pending.zero_wait_queue);
smb_direct_disconnect_rdma_connection(sc);
}
return ret;
@@ -1032,19 +1039,29 @@ static int smb_direct_flush_send_list(struct smbdirect_socket *sc,
last->wr.send_flags = IB_SEND_SIGNALED;
last->wr.wr_cqe = &last->cqe;
+ /*
+ * Remove last from send_ctx->msg_list
+ * and splice the rest of send_ctx->msg_list
+ * to last->sibling_list.
+ *
+ * send_ctx->msg_list is a valid empty list
+ * at the end.
+ */
+ list_del_init(&last->sibling_list);
+ list_splice_tail_init(&send_ctx->msg_list, &last->sibling_list);
+ send_ctx->wr_cnt = 0;
+
ret = smb_direct_post_send(sc, &first->wr);
- if (!ret) {
- smb_direct_send_ctx_init(send_ctx,
- send_ctx->need_invalidate_rkey,
- send_ctx->remote_key);
- } else {
- atomic_add(send_ctx->wr_cnt, &sc->send_io.credits.count);
- wake_up(&sc->send_io.credits.wait_queue);
- list_for_each_entry_safe(first, last, &send_ctx->msg_list,
- sibling_list) {
- smb_direct_free_sendmsg(sc, first);
+ if (ret) {
+ struct smbdirect_send_io *sibling, *next;
+
+ list_for_each_entry_safe(sibling, next, &last->sibling_list, sibling_list) {
+ list_del_init(&sibling->sibling_list);
+ smb_direct_free_sendmsg(sc, sibling);
}
+ smb_direct_free_sendmsg(sc, last);
}
+
return ret;
}
@@ -1070,6 +1087,23 @@ static int wait_for_credits(struct smbdirect_socket *sc,
} while (true);
}
+static int wait_for_send_lcredit(struct smbdirect_socket *sc,
+ struct smbdirect_send_batch *send_ctx)
+{
+ if (send_ctx && (atomic_read(&sc->send_io.lcredits.count) <= 1)) {
+ int ret;
+
+ ret = smb_direct_flush_send_list(sc, send_ctx, false);
+ if (ret)
+ return ret;
+ }
+
+ return wait_for_credits(sc,
+ &sc->send_io.lcredits.wait_queue,
+ &sc->send_io.lcredits.count,
+ 1);
+}
+
static int wait_for_send_credits(struct smbdirect_socket *sc,
struct smbdirect_send_batch *send_ctx)
{
@@ -1257,9 +1291,13 @@ static int smb_direct_post_send_data(struct smbdirect_socket *sc,
int data_length;
struct scatterlist sg[SMBDIRECT_SEND_IO_MAX_SGE - 1];
+ ret = wait_for_send_lcredit(sc, send_ctx);
+ if (ret)
+ goto lcredit_failed;
+
ret = wait_for_send_credits(sc, send_ctx);
if (ret)
- return ret;
+ goto credit_failed;
data_length = 0;
for (i = 0; i < niov; i++)
@@ -1267,10 +1305,8 @@ static int smb_direct_post_send_data(struct smbdirect_socket *sc,
ret = smb_direct_create_header(sc, data_length, remaining_data_length,
&msg);
- if (ret) {
- atomic_inc(&sc->send_io.credits.count);
- return ret;
- }
+ if (ret)
+ goto header_failed;
for (i = 0; i < niov; i++) {
struct ib_sge *sge;
@@ -1308,7 +1344,11 @@ static int smb_direct_post_send_data(struct smbdirect_socket *sc,
return 0;
err:
smb_direct_free_sendmsg(sc, msg);
+header_failed:
atomic_inc(&sc->send_io.credits.count);
+credit_failed:
+ atomic_inc(&sc->send_io.lcredits.count);
+lcredit_failed:
return ret;
}
@@ -1871,20 +1911,11 @@ out_err:
return ret;
}
-static unsigned int smb_direct_get_max_fr_pages(struct smbdirect_socket *sc)
-{
- return min_t(unsigned int,
- sc->ib.dev->attrs.max_fast_reg_page_list_len,
- 256);
-}
-
-static int smb_direct_init_params(struct smbdirect_socket *sc,
- struct ib_qp_cap *cap)
+static int smb_direct_init_params(struct smbdirect_socket *sc)
{
struct smbdirect_socket_parameters *sp = &sc->parameters;
- struct ib_device *device = sc->ib.dev;
- int max_send_sges, max_rw_wrs, max_send_wrs;
- unsigned int max_sge_per_wr, wrs_per_credit;
+ int max_send_sges;
+ unsigned int maxpages;
/* need 3 more sge. because a SMB_DIRECT header, SMB2 header,
* SMB2 response could be mapped.
@@ -1895,67 +1926,20 @@ static int smb_direct_init_params(struct smbdirect_socket *sc,
return -EINVAL;
}
- /* Calculate the number of work requests for RDMA R/W.
- * The maximum number of pages which can be registered
- * with one Memory region can be transferred with one
- * R/W credit. And at least 4 work requests for each credit
- * are needed for MR registration, RDMA R/W, local & remote
- * MR invalidation.
- */
- sc->rw_io.credits.num_pages = smb_direct_get_max_fr_pages(sc);
- sc->rw_io.credits.max = DIV_ROUND_UP(sp->max_read_write_size,
- (sc->rw_io.credits.num_pages - 1) *
- PAGE_SIZE);
-
- max_sge_per_wr = min_t(unsigned int, device->attrs.max_send_sge,
- device->attrs.max_sge_rd);
- max_sge_per_wr = max_t(unsigned int, max_sge_per_wr,
- max_send_sges);
- wrs_per_credit = max_t(unsigned int, 4,
- DIV_ROUND_UP(sc->rw_io.credits.num_pages,
- max_sge_per_wr) + 1);
- max_rw_wrs = sc->rw_io.credits.max * wrs_per_credit;
-
- max_send_wrs = sp->send_credit_target + max_rw_wrs;
- if (max_send_wrs > device->attrs.max_cqe ||
- max_send_wrs > device->attrs.max_qp_wr) {
- pr_err("consider lowering send_credit_target = %d\n",
- sp->send_credit_target);
- pr_err("Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n",
- device->attrs.max_cqe, device->attrs.max_qp_wr);
- return -EINVAL;
- }
+ atomic_set(&sc->send_io.lcredits.count, sp->send_credit_target);
- if (sp->recv_credit_max > device->attrs.max_cqe ||
- sp->recv_credit_max > device->attrs.max_qp_wr) {
- pr_err("consider lowering receive_credit_max = %d\n",
- sp->recv_credit_max);
- pr_err("Possible CQE overrun, device reporting max_cpe %d max_qp_wr %d\n",
- device->attrs.max_cqe, device->attrs.max_qp_wr);
- return -EINVAL;
- }
-
- if (device->attrs.max_send_sge < SMBDIRECT_SEND_IO_MAX_SGE) {
- pr_err("warning: device max_send_sge = %d too small\n",
- device->attrs.max_send_sge);
- return -EINVAL;
- }
- if (device->attrs.max_recv_sge < SMBDIRECT_RECV_IO_MAX_SGE) {
- pr_err("warning: device max_recv_sge = %d too small\n",
- device->attrs.max_recv_sge);
- return -EINVAL;
- }
+ maxpages = DIV_ROUND_UP(sp->max_read_write_size, PAGE_SIZE);
+ sc->rw_io.credits.max = rdma_rw_mr_factor(sc->ib.dev,
+ sc->rdma.cm_id->port_num,
+ maxpages);
+ sc->rw_io.credits.num_pages = DIV_ROUND_UP(maxpages, sc->rw_io.credits.max);
+ /* add one extra in order to handle unaligned pages */
+ sc->rw_io.credits.max += 1;
sc->recv_io.credits.target = 1;
atomic_set(&sc->rw_io.credits.count, sc->rw_io.credits.max);
- cap->max_send_wr = max_send_wrs;
- cap->max_recv_wr = sp->recv_credit_max;
- cap->max_send_sge = SMBDIRECT_SEND_IO_MAX_SGE;
- cap->max_recv_sge = SMBDIRECT_RECV_IO_MAX_SGE;
- cap->max_inline_data = 0;
- cap->max_rdma_ctxs = sc->rw_io.credits.max;
return 0;
}
@@ -2029,13 +2013,129 @@ err:
return -ENOMEM;
}
-static int smb_direct_create_qpair(struct smbdirect_socket *sc,
- struct ib_qp_cap *cap)
+static u32 smb_direct_rdma_rw_send_wrs(struct ib_device *dev, const struct ib_qp_init_attr *attr)
+{
+ /*
+ * This could be split out of rdma_rw_init_qp()
+ * and be a helper function next to rdma_rw_mr_factor()
+ *
+ * We can't check unlikely(rdma_rw_force_mr) here,
+ * but that is most likely 0 anyway.
+ */
+ u32 factor;
+
+ WARN_ON_ONCE(attr->port_num == 0);
+
+ /*
+ * Each context needs at least one RDMA READ or WRITE WR.
+ *
+ * For some hardware we might need more, eventually we should ask the
+ * HCA driver for a multiplier here.
+ */
+ factor = 1;
+
+ /*
+ * If the device needs MRs to perform RDMA READ or WRITE operations,
+ * we'll need two additional MRs for the registrations and the
+ * invalidation.
+ */
+ if (rdma_protocol_iwarp(dev, attr->port_num) || dev->attrs.max_sgl_rd)
+ factor += 2; /* inv + reg */
+
+ return factor * attr->cap.max_rdma_ctxs;
+}
+
+static int smb_direct_create_qpair(struct smbdirect_socket *sc)
{
struct smbdirect_socket_parameters *sp = &sc->parameters;
int ret;
+ struct ib_qp_cap qp_cap;
struct ib_qp_init_attr qp_attr;
- int pages_per_rw;
+ u32 max_send_wr;
+ u32 rdma_send_wr;
+
+ /*
+ * Note that {rdma,ib}_create_qp() will call
+ * rdma_rw_init_qp() if cap->max_rdma_ctxs is not 0.
+ * It will adjust cap->max_send_wr to the required
+ * number of additional WRs for the RDMA RW operations.
+ * It will cap cap->max_send_wr to the device limit.
+ *
+ * +1 for ib_drain_qp
+ */
+ qp_cap.max_send_wr = sp->send_credit_target + 1;
+ qp_cap.max_recv_wr = sp->recv_credit_max + 1;
+ qp_cap.max_send_sge = SMBDIRECT_SEND_IO_MAX_SGE;
+ qp_cap.max_recv_sge = SMBDIRECT_RECV_IO_MAX_SGE;
+ qp_cap.max_inline_data = 0;
+ qp_cap.max_rdma_ctxs = sc->rw_io.credits.max;
+
+ /*
+ * Find out the number of max_send_wr
+ * after rdma_rw_init_qp() adjusted it.
+ *
+ * We only do it on a temporary variable,
+ * as rdma_create_qp() will trigger
+ * rdma_rw_init_qp() again.
+ */
+ memset(&qp_attr, 0, sizeof(qp_attr));
+ qp_attr.cap = qp_cap;
+ qp_attr.port_num = sc->rdma.cm_id->port_num;
+ rdma_send_wr = smb_direct_rdma_rw_send_wrs(sc->ib.dev, &qp_attr);
+ max_send_wr = qp_cap.max_send_wr + rdma_send_wr;
+
+ if (qp_cap.max_send_wr > sc->ib.dev->attrs.max_cqe ||
+ qp_cap.max_send_wr > sc->ib.dev->attrs.max_qp_wr) {
+ pr_err("Possible CQE overrun: max_send_wr %d\n",
+ qp_cap.max_send_wr);
+ pr_err("device %.*s reporting max_cqe %d max_qp_wr %d\n",
+ IB_DEVICE_NAME_MAX,
+ sc->ib.dev->name,
+ sc->ib.dev->attrs.max_cqe,
+ sc->ib.dev->attrs.max_qp_wr);
+ pr_err("consider lowering send_credit_target = %d\n",
+ sp->send_credit_target);
+ return -EINVAL;
+ }
+
+ if (qp_cap.max_rdma_ctxs &&
+ (max_send_wr >= sc->ib.dev->attrs.max_cqe ||
+ max_send_wr >= sc->ib.dev->attrs.max_qp_wr)) {
+ pr_err("Possible CQE overrun: rdma_send_wr %d + max_send_wr %d = %d\n",
+ rdma_send_wr, qp_cap.max_send_wr, max_send_wr);
+ pr_err("device %.*s reporting max_cqe %d max_qp_wr %d\n",
+ IB_DEVICE_NAME_MAX,
+ sc->ib.dev->name,
+ sc->ib.dev->attrs.max_cqe,
+ sc->ib.dev->attrs.max_qp_wr);
+ pr_err("consider lowering send_credit_target = %d, max_rdma_ctxs = %d\n",
+ sp->send_credit_target, qp_cap.max_rdma_ctxs);
+ return -EINVAL;
+ }
+
+ if (qp_cap.max_recv_wr > sc->ib.dev->attrs.max_cqe ||
+ qp_cap.max_recv_wr > sc->ib.dev->attrs.max_qp_wr) {
+ pr_err("Possible CQE overrun: max_recv_wr %d\n",
+ qp_cap.max_recv_wr);
+ pr_err("device %.*s reporting max_cqe %d max_qp_wr %d\n",
+ IB_DEVICE_NAME_MAX,
+ sc->ib.dev->name,
+ sc->ib.dev->attrs.max_cqe,
+ sc->ib.dev->attrs.max_qp_wr);
+ pr_err("consider lowering receive_credit_max = %d\n",
+ sp->recv_credit_max);
+ return -EINVAL;
+ }
+
+ if (qp_cap.max_send_sge > sc->ib.dev->attrs.max_send_sge ||
+ qp_cap.max_recv_sge > sc->ib.dev->attrs.max_recv_sge) {
+ pr_err("device %.*s max_send_sge/max_recv_sge = %d/%d too small\n",
+ IB_DEVICE_NAME_MAX,
+ sc->ib.dev->name,
+ sc->ib.dev->attrs.max_send_sge,
+ sc->ib.dev->attrs.max_recv_sge);
+ return -EINVAL;
+ }
sc->ib.pd = ib_alloc_pd(sc->ib.dev, 0);
if (IS_ERR(sc->ib.pd)) {
@@ -2046,8 +2146,7 @@ static int smb_direct_create_qpair(struct smbdirect_socket *sc,
}
sc->ib.send_cq = ib_alloc_cq_any(sc->ib.dev, sc,
- sp->send_credit_target +
- cap->max_rdma_ctxs,
+ max_send_wr,
IB_POLL_WORKQUEUE);
if (IS_ERR(sc->ib.send_cq)) {
pr_err("Can't create RDMA send CQ\n");
@@ -2057,7 +2156,7 @@ static int smb_direct_create_qpair(struct smbdirect_socket *sc,
}
sc->ib.recv_cq = ib_alloc_cq_any(sc->ib.dev, sc,
- sp->recv_credit_max,
+ qp_cap.max_recv_wr,
IB_POLL_WORKQUEUE);
if (IS_ERR(sc->ib.recv_cq)) {
pr_err("Can't create RDMA recv CQ\n");
@@ -2066,10 +2165,18 @@ static int smb_direct_create_qpair(struct smbdirect_socket *sc,
goto err;
}
+ /*
+ * We reset completely here!
+ * As the above use was just temporary
+ * to calc max_send_wr and rdma_send_wr.
+ *
+ * rdma_create_qp() will trigger rdma_rw_init_qp()
+ * again if max_rdma_ctxs is not 0.
+ */
memset(&qp_attr, 0, sizeof(qp_attr));
qp_attr.event_handler = smb_direct_qpair_handler;
qp_attr.qp_context = sc;
- qp_attr.cap = *cap;
+ qp_attr.cap = qp_cap;
qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
qp_attr.qp_type = IB_QPT_RC;
qp_attr.send_cq = sc->ib.send_cq;
@@ -2085,18 +2192,6 @@ static int smb_direct_create_qpair(struct smbdirect_socket *sc,
sc->ib.qp = sc->rdma.cm_id->qp;
sc->rdma.cm_id->event_handler = smb_direct_cm_handler;
- pages_per_rw = DIV_ROUND_UP(sp->max_read_write_size, PAGE_SIZE) + 1;
- if (pages_per_rw > sc->ib.dev->attrs.max_sgl_rd) {
- ret = ib_mr_pool_init(sc->ib.qp, &sc->ib.qp->rdma_mrs,
- sc->rw_io.credits.max, IB_MR_TYPE_MEM_REG,
- sc->rw_io.credits.num_pages, 0);
- if (ret) {
- pr_err("failed to init mr pool count %zu pages %zu\n",
- sc->rw_io.credits.max, sc->rw_io.credits.num_pages);
- goto err;
- }
- }
-
return 0;
err:
if (sc->ib.qp) {
@@ -2183,10 +2278,9 @@ out:
static int smb_direct_connect(struct smbdirect_socket *sc)
{
- struct ib_qp_cap qp_cap;
int ret;
- ret = smb_direct_init_params(sc, &qp_cap);
+ ret = smb_direct_init_params(sc);
if (ret) {
pr_err("Can't configure RDMA parameters\n");
return ret;
@@ -2198,7 +2292,7 @@ static int smb_direct_connect(struct smbdirect_socket *sc)
return ret;
}
- ret = smb_direct_create_qpair(sc, &qp_cap);
+ ret = smb_direct_create_qpair(sc);
if (ret) {
pr_err("Can't accept RDMA client: %d\n", ret);
return ret;
diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c
index 2d78e94072a0..e142bac4f9f8 100644
--- a/fs/sysfs/group.c
+++ b/fs/sysfs/group.c
@@ -498,17 +498,26 @@ int compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj,
}
EXPORT_SYMBOL_GPL(compat_only_sysfs_link_entry_to_kobj);
-static int sysfs_group_attrs_change_owner(struct kernfs_node *grp_kn,
+static int sysfs_group_attrs_change_owner(struct kobject *kobj,
+ struct kernfs_node *grp_kn,
const struct attribute_group *grp,
struct iattr *newattrs)
{
struct kernfs_node *kn;
- int error;
+ int error, i;
+ umode_t mode;
if (grp->attrs) {
struct attribute *const *attr;
- for (attr = grp->attrs; *attr; attr++) {
+ for (i = 0, attr = grp->attrs; *attr; i++, attr++) {
+ if (grp->is_visible) {
+ mode = grp->is_visible(kobj, *attr, i);
+ if (mode & SYSFS_GROUP_INVISIBLE)
+ break;
+ if (!mode)
+ continue;
+ }
kn = kernfs_find_and_get(grp_kn, (*attr)->name);
if (!kn)
return -ENOENT;
@@ -523,7 +532,14 @@ static int sysfs_group_attrs_change_owner(struct kernfs_node *grp_kn,
if (grp->bin_attrs) {
const struct bin_attribute *const *bin_attr;
- for (bin_attr = grp->bin_attrs; *bin_attr; bin_attr++) {
+ for (i = 0, bin_attr = grp->bin_attrs; *bin_attr; i++, bin_attr++) {
+ if (grp->is_bin_visible) {
+ mode = grp->is_bin_visible(kobj, *bin_attr, i);
+ if (mode & SYSFS_GROUP_INVISIBLE)
+ break;
+ if (!mode)
+ continue;
+ }
kn = kernfs_find_and_get(grp_kn, (*bin_attr)->attr.name);
if (!kn)
return -ENOENT;
@@ -573,7 +589,7 @@ int sysfs_group_change_owner(struct kobject *kobj,
error = kernfs_setattr(grp_kn, &newattrs);
if (!error)
- error = sysfs_group_attrs_change_owner(grp_kn, grp, &newattrs);
+ error = sysfs_group_attrs_change_owner(kobj, grp_kn, grp, &newattrs);
kernfs_put(grp_kn);
diff --git a/fs/xfs/Kconfig b/fs/xfs/Kconfig
index 8930d5254e1d..b99da294e9a3 100644
--- a/fs/xfs/Kconfig
+++ b/fs/xfs/Kconfig
@@ -119,6 +119,15 @@ config XFS_RT
See the xfs man page in section 5 for additional information.
+ This option is mandatory to support zoned block devices. For these
+ devices, the realtime subvolume must be backed by a zoned block
+ device and a regular block device used as the main device (for
+ metadata). If the zoned block device is a host-managed SMR hard-disk
+ containing conventional zones at the beginning of its address space,
+ XFS will use the disk conventional zones as the main device and the
+ remaining sequential write required zones as the backing storage for
+ the realtime subvolume.
+
If unsure, say N.
config XFS_DRAIN_INTENTS
@@ -156,7 +165,7 @@ config XFS_ONLINE_SCRUB_STATS
bool "XFS online metadata check usage data collection"
default y
depends on XFS_ONLINE_SCRUB
- select DEBUG_FS
+ depends on DEBUG_FS
help
If you say Y here, the kernel will gather usage data about
the online metadata check subsystem. This includes the number
diff --git a/fs/xfs/scrub/nlinks.c b/fs/xfs/scrub/nlinks.c
index 26721fab5cab..091c79e432e5 100644
--- a/fs/xfs/scrub/nlinks.c
+++ b/fs/xfs/scrub/nlinks.c
@@ -376,6 +376,36 @@ out_incomplete:
return error;
}
+static uint
+xchk_nlinks_ilock_dir(
+ struct xfs_inode *ip)
+{
+ uint lock_mode = XFS_ILOCK_SHARED;
+
+ /*
+ * We're going to scan the directory entries, so we must be ready to
+ * pull the data fork mappings into memory if they aren't already.
+ */
+ if (xfs_need_iread_extents(&ip->i_df))
+ lock_mode = XFS_ILOCK_EXCL;
+
+ /*
+ * We're going to scan the parent pointers, so we must be ready to
+ * pull the attr fork mappings into memory if they aren't already.
+ */
+ if (xfs_has_parent(ip->i_mount) && xfs_inode_has_attr_fork(ip) &&
+ xfs_need_iread_extents(&ip->i_af))
+ lock_mode = XFS_ILOCK_EXCL;
+
+ /*
+ * Take the IOLOCK so that other threads cannot start a directory
+ * update while we're scanning.
+ */
+ lock_mode |= XFS_IOLOCK_SHARED;
+ xfs_ilock(ip, lock_mode);
+ return lock_mode;
+}
+
/* Walk a directory to bump the observed link counts of the children. */
STATIC int
xchk_nlinks_collect_dir(
@@ -394,8 +424,7 @@ xchk_nlinks_collect_dir(
return 0;
/* Prevent anyone from changing this directory while we walk it. */
- xfs_ilock(dp, XFS_IOLOCK_SHARED);
- lock_mode = xfs_ilock_data_map_shared(dp);
+ lock_mode = xchk_nlinks_ilock_dir(dp);
/*
* The dotdot entry of an unlinked directory still points to the last
@@ -452,7 +481,6 @@ out_abort:
xchk_iscan_abort(&xnc->collect_iscan);
out_unlock:
xfs_iunlock(dp, lock_mode);
- xfs_iunlock(dp, XFS_IOLOCK_SHARED);
return error;
}
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 773d959965dc..47edf3041631 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -1751,7 +1751,7 @@ xfs_init_buftarg(
const char *descr)
{
/* The maximum size of the buftarg is only known once the sb is read. */
- btp->bt_nr_sectors = (xfs_daddr_t)-1;
+ btp->bt_nr_sectors = XFS_BUF_DADDR_MAX;
/* Set up device logical sector size mask */
btp->bt_logical_sectorsize = logical_sectorsize;
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 8fa7bdf59c91..e25cd2a160f3 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -22,6 +22,7 @@ extern struct kmem_cache *xfs_buf_cache;
*/
struct xfs_buf;
+#define XFS_BUF_DADDR_MAX ((xfs_daddr_t) S64_MAX)
#define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL))
#define XBF_READ (1u << 0) /* buffer intended for reading from device */
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index f046d1215b04..b871dfde372b 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -236,7 +236,6 @@ typedef struct xfs_mount {
bool m_update_sb; /* sb needs update in mount */
unsigned int m_max_open_zones;
unsigned int m_zonegc_low_space;
- struct xfs_mru_cache *m_zone_cache; /* Inode to open zone cache */
/* max_atomic_write mount option value */
unsigned long long m_awu_max_bytes;
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index e85a156dc17d..1067ebb3b001 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -102,7 +102,7 @@ static const struct constant_table dax_param_enums[] = {
* Table driven mount option parser.
*/
enum {
- Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev,
+ Op_deprecated, Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev,
Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32,
@@ -114,7 +114,21 @@ enum {
Opt_lifetime, Opt_nolifetime, Opt_max_atomic_write,
};
+#define fsparam_dead(NAME) \
+ __fsparam(NULL, (NAME), Op_deprecated, fs_param_deprecated, NULL)
+
static const struct fs_parameter_spec xfs_fs_parameters[] = {
+ /*
+ * These mount options were supposed to be deprecated in September 2025
+ * but the deprecation warning was buggy, so not all users were
+ * notified. The deprecation is now obnoxiously loud and postponed to
+ * September 2030.
+ */
+ fsparam_dead("attr2"),
+ fsparam_dead("noattr2"),
+ fsparam_dead("ikeep"),
+ fsparam_dead("noikeep"),
+
fsparam_u32("logbufs", Opt_logbufs),
fsparam_string("logbsize", Opt_logbsize),
fsparam_string("logdev", Opt_logdev),
@@ -786,6 +800,12 @@ xfs_fs_evict_inode(
truncate_inode_pages_final(&inode->i_data);
clear_inode(inode);
+
+ if (IS_ENABLED(CONFIG_XFS_RT) &&
+ S_ISREG(inode->i_mode) && inode->i_private) {
+ xfs_open_zone_put(inode->i_private);
+ inode->i_private = NULL;
+ }
}
static void
@@ -1373,16 +1393,25 @@ suffix_kstrtoull(
static inline void
xfs_fs_warn_deprecated(
struct fs_context *fc,
- struct fs_parameter *param,
- uint64_t flag,
- bool value)
+ struct fs_parameter *param)
{
- /* Don't print the warning if reconfiguring and current mount point
- * already had the flag set
+ /*
+ * Always warn about someone passing in a deprecated mount option.
+ * Previously we wouldn't print the warning if we were reconfiguring
+ * and current mount point already had the flag set, but that was not
+ * the right thing to do.
+ *
+ * Many distributions mount the root filesystem with no options in the
+ * initramfs and rely on mount -a to remount the root fs with the
+ * options in fstab. However, the old behavior meant that there would
+ * never be a warning about deprecated mount options for the root fs in
+ * /etc/fstab. On a single-fs system, that means no warning at all.
+ *
+ * Compounding this problem are distribution scripts that copy
+ * /proc/mounts to fstab, which means that we can't remove mount
+ * options unless we're 100% sure they have only ever been advertised
+ * in /proc/mounts in response to explicitly provided mount options.
*/
- if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) &&
- !!(XFS_M(fc->root->d_sb)->m_features & flag) == value)
- return;
xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key);
}
@@ -1408,6 +1437,9 @@ xfs_fs_parse_param(
return opt;
switch (opt) {
+ case Op_deprecated:
+ xfs_fs_warn_deprecated(fc, param);
+ return 0;
case Opt_logbufs:
parsing_mp->m_logbufs = result.uint_32;
return 0;
@@ -1528,7 +1560,6 @@ xfs_fs_parse_param(
xfs_mount_set_dax_mode(parsing_mp, result.uint_32);
return 0;
#endif
- /* Following mount options will be removed in September 2025 */
case Opt_max_open_zones:
parsing_mp->m_max_open_zones = result.uint_32;
return 0;
@@ -2221,7 +2252,7 @@ xfs_init_fs_context(
struct xfs_mount *mp;
int i;
- mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL | __GFP_NOFAIL);
+ mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL);
if (!mp)
return -ENOMEM;
diff --git a/fs/xfs/xfs_zone_alloc.c b/fs/xfs/xfs_zone_alloc.c
index 1147bacb2da8..23cdab4515bb 100644
--- a/fs/xfs/xfs_zone_alloc.c
+++ b/fs/xfs/xfs_zone_alloc.c
@@ -26,14 +26,22 @@
#include "xfs_trace.h"
#include "xfs_mru_cache.h"
+static void
+xfs_open_zone_free_rcu(
+ struct callback_head *cb)
+{
+ struct xfs_open_zone *oz = container_of(cb, typeof(*oz), oz_rcu);
+
+ xfs_rtgroup_rele(oz->oz_rtg);
+ kfree(oz);
+}
+
void
xfs_open_zone_put(
struct xfs_open_zone *oz)
{
- if (atomic_dec_and_test(&oz->oz_ref)) {
- xfs_rtgroup_rele(oz->oz_rtg);
- kfree(oz);
- }
+ if (atomic_dec_and_test(&oz->oz_ref))
+ call_rcu(&oz->oz_rcu, xfs_open_zone_free_rcu);
}
static inline uint32_t
@@ -614,14 +622,25 @@ static inline enum rw_hint xfs_inode_write_hint(struct xfs_inode *ip)
}
/*
- * Try to pack inodes that are written back after they were closed tight instead
- * of trying to open new zones for them or spread them to the least recently
- * used zone. This optimizes the data layout for workloads that untar or copy
- * a lot of small files. Right now this does not separate multiple such
+ * Try to tightly pack small files that are written back after they were closed
+ * instead of trying to open new zones for them or spread them to the least
+ * recently used zone. This optimizes the data layout for workloads that untar
+ * or copy a lot of small files. Right now this does not separate multiple such
* streams.
*/
static inline bool xfs_zoned_pack_tight(struct xfs_inode *ip)
{
+ struct xfs_mount *mp = ip->i_mount;
+ size_t zone_capacity =
+ XFS_FSB_TO_B(mp, mp->m_groups[XG_TYPE_RTG].blocks);
+
+ /*
+ * Do not pack write files that are already using a full zone to avoid
+ * fragmentation.
+ */
+ if (i_size_read(VFS_I(ip)) >= zone_capacity)
+ return false;
+
return !inode_is_open_for_write(VFS_I(ip)) &&
!(ip->i_diflags & XFS_DIFLAG_APPEND);
}
@@ -746,97 +765,54 @@ xfs_mark_rtg_boundary(
}
/*
- * Cache the last zone written to for an inode so that it is considered first
- * for subsequent writes.
- */
-struct xfs_zone_cache_item {
- struct xfs_mru_cache_elem mru;
- struct xfs_open_zone *oz;
-};
-
-static inline struct xfs_zone_cache_item *
-xfs_zone_cache_item(struct xfs_mru_cache_elem *mru)
-{
- return container_of(mru, struct xfs_zone_cache_item, mru);
-}
-
-static void
-xfs_zone_cache_free_func(
- void *data,
- struct xfs_mru_cache_elem *mru)
-{
- struct xfs_zone_cache_item *item = xfs_zone_cache_item(mru);
-
- xfs_open_zone_put(item->oz);
- kfree(item);
-}
-
-/*
* Check if we have a cached last open zone available for the inode and
* if yes return a reference to it.
*/
static struct xfs_open_zone *
-xfs_cached_zone(
- struct xfs_mount *mp,
- struct xfs_inode *ip)
+xfs_get_cached_zone(
+ struct xfs_inode *ip)
{
- struct xfs_mru_cache_elem *mru;
- struct xfs_open_zone *oz;
+ struct xfs_open_zone *oz;
- mru = xfs_mru_cache_lookup(mp->m_zone_cache, ip->i_ino);
- if (!mru)
- return NULL;
- oz = xfs_zone_cache_item(mru)->oz;
+ rcu_read_lock();
+ oz = VFS_I(ip)->i_private;
if (oz) {
/*
* GC only steals open zones at mount time, so no GC zones
* should end up in the cache.
*/
ASSERT(!oz->oz_is_gc);
- ASSERT(atomic_read(&oz->oz_ref) > 0);
- atomic_inc(&oz->oz_ref);
+ if (!atomic_inc_not_zero(&oz->oz_ref))
+ oz = NULL;
}
- xfs_mru_cache_done(mp->m_zone_cache);
+ rcu_read_unlock();
+
return oz;
}
/*
- * Update the last used zone cache for a given inode.
+ * Stash our zone in the inode so that is is reused for future allocations.
*
- * The caller must have a reference on the open zone.
+ * The open_zone structure will be pinned until either the inode is freed or
+ * until the cached open zone is replaced with a different one because the
+ * current one was full when we tried to use it. This means we keep any
+ * open zone around forever as long as any inode that used it for the last
+ * write is cached, which slightly increases the memory use of cached inodes
+ * that were every written to, but significantly simplifies the cached zone
+ * lookup. Because the open_zone is clearly marked as full when all data
+ * in the underlying RTG was written, the caching is always safe.
*/
static void
-xfs_zone_cache_create_association(
- struct xfs_inode *ip,
- struct xfs_open_zone *oz)
+xfs_set_cached_zone(
+ struct xfs_inode *ip,
+ struct xfs_open_zone *oz)
{
- struct xfs_mount *mp = ip->i_mount;
- struct xfs_zone_cache_item *item = NULL;
- struct xfs_mru_cache_elem *mru;
+ struct xfs_open_zone *old_oz;
- ASSERT(atomic_read(&oz->oz_ref) > 0);
atomic_inc(&oz->oz_ref);
-
- mru = xfs_mru_cache_lookup(mp->m_zone_cache, ip->i_ino);
- if (mru) {
- /*
- * If we have an association already, update it to point to the
- * new zone.
- */
- item = xfs_zone_cache_item(mru);
- xfs_open_zone_put(item->oz);
- item->oz = oz;
- xfs_mru_cache_done(mp->m_zone_cache);
- return;
- }
-
- item = kmalloc(sizeof(*item), GFP_KERNEL);
- if (!item) {
- xfs_open_zone_put(oz);
- return;
- }
- item->oz = oz;
- xfs_mru_cache_insert(mp->m_zone_cache, ip->i_ino, &item->mru);
+ old_oz = xchg(&VFS_I(ip)->i_private, oz);
+ if (old_oz)
+ xfs_open_zone_put(old_oz);
}
static void
@@ -880,15 +856,14 @@ xfs_zone_alloc_and_submit(
* the inode is still associated with a zone and use that if so.
*/
if (!*oz)
- *oz = xfs_cached_zone(mp, ip);
+ *oz = xfs_get_cached_zone(ip);
if (!*oz) {
select_zone:
*oz = xfs_select_zone(mp, write_hint, pack_tight);
if (!*oz)
goto out_error;
-
- xfs_zone_cache_create_association(ip, *oz);
+ xfs_set_cached_zone(ip, *oz);
}
alloc_len = xfs_zone_alloc_blocks(*oz, XFS_B_TO_FSB(mp, ioend->io_size),
@@ -966,6 +941,12 @@ xfs_free_open_zones(
xfs_open_zone_put(oz);
}
spin_unlock(&zi->zi_open_zones_lock);
+
+ /*
+ * Wait for all open zones to be freed so that they drop the group
+ * references:
+ */
+ rcu_barrier();
}
struct xfs_init_zones {
@@ -1279,14 +1260,6 @@ xfs_mount_zones(
error = xfs_zone_gc_mount(mp);
if (error)
goto out_free_zone_info;
-
- /*
- * Set up a mru cache to track inode to open zone for data placement
- * purposes. The magic values for group count and life time is the
- * same as the defaults for file streams, which seems sane enough.
- */
- xfs_mru_cache_create(&mp->m_zone_cache, mp,
- 5000, 10, xfs_zone_cache_free_func);
return 0;
out_free_zone_info:
@@ -1300,5 +1273,4 @@ xfs_unmount_zones(
{
xfs_zone_gc_unmount(mp);
xfs_free_zone_info(mp->m_zone_info);
- xfs_mru_cache_destroy(mp->m_zone_cache);
}
diff --git a/fs/xfs/xfs_zone_gc.c b/fs/xfs/xfs_zone_gc.c
index 064cd1a857a0..109877d9a6bf 100644
--- a/fs/xfs/xfs_zone_gc.c
+++ b/fs/xfs/xfs_zone_gc.c
@@ -491,21 +491,6 @@ xfs_zone_gc_select_victim(
struct xfs_rtgroup *victim_rtg = NULL;
unsigned int bucket;
- if (xfs_is_shutdown(mp))
- return false;
-
- if (iter->victim_rtg)
- return true;
-
- /*
- * Don't start new work if we are asked to stop or park.
- */
- if (kthread_should_stop() || kthread_should_park())
- return false;
-
- if (!xfs_zoned_need_gc(mp))
- return false;
-
spin_lock(&zi->zi_used_buckets_lock);
for (bucket = 0; bucket < XFS_ZONE_USED_BUCKETS; bucket++) {
victim_rtg = xfs_zone_gc_pick_victim_from(mp, bucket);
@@ -975,6 +960,27 @@ xfs_zone_gc_reset_zones(
} while (next);
}
+static bool
+xfs_zone_gc_should_start_new_work(
+ struct xfs_zone_gc_data *data)
+{
+ if (xfs_is_shutdown(data->mp))
+ return false;
+ if (!xfs_zone_gc_space_available(data))
+ return false;
+
+ if (!data->iter.victim_rtg) {
+ if (kthread_should_stop() || kthread_should_park())
+ return false;
+ if (!xfs_zoned_need_gc(data->mp))
+ return false;
+ if (!xfs_zone_gc_select_victim(data))
+ return false;
+ }
+
+ return true;
+}
+
/*
* Handle the work to read and write data for GC and to reset the zones,
* including handling all completions.
@@ -982,7 +988,7 @@ xfs_zone_gc_reset_zones(
* Note that the order of the chunks is preserved so that we don't undo the
* optimal order established by xfs_zone_gc_query().
*/
-static bool
+static void
xfs_zone_gc_handle_work(
struct xfs_zone_gc_data *data)
{
@@ -996,30 +1002,22 @@ xfs_zone_gc_handle_work(
zi->zi_reset_list = NULL;
spin_unlock(&zi->zi_reset_list_lock);
- if (!xfs_zone_gc_select_victim(data) ||
- !xfs_zone_gc_space_available(data)) {
- if (list_empty(&data->reading) &&
- list_empty(&data->writing) &&
- list_empty(&data->resetting) &&
- !reset_list)
- return false;
- }
-
- __set_current_state(TASK_RUNNING);
- try_to_freeze();
-
- if (reset_list)
+ if (reset_list) {
+ set_current_state(TASK_RUNNING);
xfs_zone_gc_reset_zones(data, reset_list);
+ }
list_for_each_entry_safe(chunk, next, &data->resetting, entry) {
if (READ_ONCE(chunk->state) != XFS_GC_BIO_DONE)
break;
+ set_current_state(TASK_RUNNING);
xfs_zone_gc_finish_reset(chunk);
}
list_for_each_entry_safe(chunk, next, &data->writing, entry) {
if (READ_ONCE(chunk->state) != XFS_GC_BIO_DONE)
break;
+ set_current_state(TASK_RUNNING);
xfs_zone_gc_finish_chunk(chunk);
}
@@ -1027,15 +1025,18 @@ xfs_zone_gc_handle_work(
list_for_each_entry_safe(chunk, next, &data->reading, entry) {
if (READ_ONCE(chunk->state) != XFS_GC_BIO_DONE)
break;
+ set_current_state(TASK_RUNNING);
xfs_zone_gc_write_chunk(chunk);
}
blk_finish_plug(&plug);
- blk_start_plug(&plug);
- while (xfs_zone_gc_start_chunk(data))
- ;
- blk_finish_plug(&plug);
- return true;
+ if (xfs_zone_gc_should_start_new_work(data)) {
+ set_current_state(TASK_RUNNING);
+ blk_start_plug(&plug);
+ while (xfs_zone_gc_start_chunk(data))
+ ;
+ blk_finish_plug(&plug);
+ }
}
/*
@@ -1059,8 +1060,18 @@ xfs_zoned_gcd(
for (;;) {
set_current_state(TASK_INTERRUPTIBLE | TASK_FREEZABLE);
xfs_set_zonegc_running(mp);
- if (xfs_zone_gc_handle_work(data))
+
+ xfs_zone_gc_handle_work(data);
+
+ /*
+ * Only sleep if nothing set the state to running. Else check for
+ * work again as someone might have queued up more work and woken
+ * us in the meantime.
+ */
+ if (get_current_state() == TASK_RUNNING) {
+ try_to_freeze();
continue;
+ }
if (list_empty(&data->reading) &&
list_empty(&data->writing) &&
diff --git a/fs/xfs/xfs_zone_priv.h b/fs/xfs/xfs_zone_priv.h
index 35e6de3d25ed..4322e26dd99a 100644
--- a/fs/xfs/xfs_zone_priv.h
+++ b/fs/xfs/xfs_zone_priv.h
@@ -44,6 +44,8 @@ struct xfs_open_zone {
* the life time of an open zone.
*/
struct xfs_rtgroup *oz_rtg;
+
+ struct rcu_head oz_rcu;
};
/*
diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h
index cd7ee4df9045..81e603839c4a 100644
--- a/include/linux/arm_ffa.h
+++ b/include/linux/arm_ffa.h
@@ -338,6 +338,7 @@ struct ffa_mem_region_attributes {
* an `struct ffa_mem_region_addr_range`.
*/
u32 composite_off;
+ u8 impdef_val[16];
u64 reserved;
};
@@ -417,15 +418,31 @@ struct ffa_mem_region {
#define CONSTITUENTS_OFFSET(x) \
(offsetof(struct ffa_composite_mem_region, constituents[x]))
+#define FFA_EMAD_HAS_IMPDEF_FIELD(version) ((version) >= FFA_VERSION_1_2)
+#define FFA_MEM_REGION_HAS_EP_MEM_OFFSET(version) ((version) > FFA_VERSION_1_0)
+
+static inline u32 ffa_emad_size_get(u32 ffa_version)
+{
+ u32 sz;
+ struct ffa_mem_region_attributes *ep_mem_access;
+
+ if (FFA_EMAD_HAS_IMPDEF_FIELD(ffa_version))
+ sz = sizeof(*ep_mem_access);
+ else
+ sz = sizeof(*ep_mem_access) - sizeof(ep_mem_access->impdef_val);
+
+ return sz;
+}
+
static inline u32
ffa_mem_desc_offset(struct ffa_mem_region *buf, int count, u32 ffa_version)
{
- u32 offset = count * sizeof(struct ffa_mem_region_attributes);
+ u32 offset = count * ffa_emad_size_get(ffa_version);
/*
* Earlier to v1.1, the endpoint memory descriptor array started at
* offset 32(i.e. offset of ep_mem_offset in the current structure)
*/
- if (ffa_version <= FFA_VERSION_1_0)
+ if (!FFA_MEM_REGION_HAS_EP_MEM_OFFSET(ffa_version))
offset += offsetof(struct ffa_mem_region, ep_mem_offset);
else
offset += sizeof(struct ffa_mem_region);
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 93318fce31f3..b760a3c470a5 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -452,7 +452,7 @@ struct cgroup_freezer_state {
int nr_frozen_tasks;
/* Freeze time data consistency protection */
- seqcount_t freeze_seq;
+ seqcount_spinlock_t freeze_seq;
/*
* Most recent time the cgroup was requested to freeze.
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index d0cf10d5e0f7..f0cf2714ec52 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -320,9 +320,6 @@ static inline bool exportfs_can_decode_fh(const struct export_operations *nop)
static inline bool exportfs_can_encode_fh(const struct export_operations *nop,
int fh_flags)
{
- if (!nop)
- return false;
-
/*
* If a non-decodeable file handle was requested, we only need to make
* sure that filesystem did not opt-out of encoding fid.
@@ -330,6 +327,10 @@ static inline bool exportfs_can_encode_fh(const struct export_operations *nop,
if (fh_flags & EXPORT_FH_FID)
return exportfs_can_encode_fid(nop);
+ /* Normal file handles cannot be created without export ops */
+ if (!nop)
+ return false;
+
/*
* If a connectable file handle was requested, we need to make sure that
* filesystem can also decode connected file handles.
diff --git a/include/linux/gpio/regmap.h b/include/linux/gpio/regmap.h
index 622a2939ebe0..87983a5f3681 100644
--- a/include/linux/gpio/regmap.h
+++ b/include/linux/gpio/regmap.h
@@ -38,6 +38,10 @@ struct regmap;
* offset to a register/bitmask pair. If not
* given the default gpio_regmap_simple_xlate()
* is used.
+ * @fixed_direction_output:
+ * (Optional) Bitmap representing the fixed direction of
+ * the GPIO lines. Useful when there are GPIO lines with a
+ * fixed direction mixed together in the same register.
* @drvdata: (Optional) Pointer to driver specific data which is
* not used by gpio-remap but is provided "as is" to the
* driver callback(s).
@@ -85,6 +89,7 @@ struct gpio_regmap_config {
int reg_stride;
int ngpio_per_reg;
struct irq_domain *irq_domain;
+ unsigned long *fixed_direction_output;
#ifdef CONFIG_REGMAP_IRQ
struct regmap_irq_chip *regmap_irq_chip;
diff --git a/include/linux/hung_task.h b/include/linux/hung_task.h
index 34e615c76ca5..c4403eeb7144 100644
--- a/include/linux/hung_task.h
+++ b/include/linux/hung_task.h
@@ -20,6 +20,10 @@
* always zero. So we can use these bits to encode the specific blocking
* type.
*
+ * Note that on architectures where this is not guaranteed, or for any
+ * unaligned lock, this tracking mechanism is silently skipped for that
+ * lock.
+ *
* Type encoding:
* 00 - Blocked on mutex (BLOCKER_TYPE_MUTEX)
* 01 - Blocked on semaphore (BLOCKER_TYPE_SEM)
@@ -45,7 +49,7 @@ static inline void hung_task_set_blocker(void *lock, unsigned long type)
* If the lock pointer matches the BLOCKER_TYPE_MASK, return
* without writing anything.
*/
- if (WARN_ON_ONCE(lock_ptr & BLOCKER_TYPE_MASK))
+ if (lock_ptr & BLOCKER_TYPE_MASK)
return;
WRITE_ONCE(current->blocker, lock_ptr | type);
@@ -53,8 +57,6 @@ static inline void hung_task_set_blocker(void *lock, unsigned long type)
static inline void hung_task_clear_blocker(void)
{
- WARN_ON_ONCE(!READ_ONCE(current->blocker));
-
WRITE_ONCE(current->blocker, 0UL);
}
diff --git a/include/linux/misc_cgroup.h b/include/linux/misc_cgroup.h
index 71cf5bfc6349..0cb36a3ffc47 100644
--- a/include/linux/misc_cgroup.h
+++ b/include/linux/misc_cgroup.h
@@ -19,7 +19,7 @@ enum misc_res_type {
MISC_CG_RES_SEV_ES,
#endif
#ifdef CONFIG_INTEL_TDX_HOST
- /* Intel TDX HKIDs resource */
+ /** @MISC_CG_RES_TDX: Intel TDX HKIDs resource */
MISC_CG_RES_TDX,
#endif
/** @MISC_CG_RES_TYPES: count of enum misc_res_type constants */
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 07614cd95bed..1b0b36aa2a76 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -10833,7 +10833,9 @@ struct mlx5_ifc_pcam_regs_5000_to_507f_bits {
u8 port_access_reg_cap_mask_127_to_96[0x20];
u8 port_access_reg_cap_mask_95_to_64[0x20];
- u8 port_access_reg_cap_mask_63_to_36[0x1c];
+ u8 port_access_reg_cap_mask_63[0x1];
+ u8 pphcr[0x1];
+ u8 port_access_reg_cap_mask_61_to_36[0x1a];
u8 pplm[0x1];
u8 port_access_reg_cap_mask_34_to_32[0x3];
diff --git a/include/linux/platform_data/usb-davinci.h b/include/linux/platform_data/usb-davinci.h
deleted file mode 100644
index 879f5c78b91a..000000000000
--- a/include/linux/platform_data/usb-davinci.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * USB related definitions
- *
- * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#ifndef __ASM_ARCH_USB_H
-#define __ASM_ARCH_USB_H
-
-/* Passed as the platform data to the OHCI driver */
-struct da8xx_ohci_root_hub {
- /* Time from power on to power good (in 2 ms units) */
- u8 potpgt;
-};
-
-void davinci_setup_usb(unsigned mA, unsigned potpgt_ms);
-
-#endif /* ifndef __ASM_ARCH_USB_H */
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index a3f44f6c2da1..0b436e15f4cd 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -629,13 +629,13 @@ DEFINE_GUARD(pm_runtime_active_auto, struct device *,
* device.
*/
DEFINE_GUARD_COND(pm_runtime_active, _try,
- pm_runtime_get_active(_T, RPM_TRANSPARENT))
+ pm_runtime_get_active(_T, RPM_TRANSPARENT), _RET == 0)
DEFINE_GUARD_COND(pm_runtime_active, _try_enabled,
- pm_runtime_resume_and_get(_T))
+ pm_runtime_resume_and_get(_T), _RET == 0)
DEFINE_GUARD_COND(pm_runtime_active_auto, _try,
- pm_runtime_get_active(_T, RPM_TRANSPARENT))
+ pm_runtime_get_active(_T, RPM_TRANSPARENT), _RET == 0)
DEFINE_GUARD_COND(pm_runtime_active_auto, _try_enabled,
- pm_runtime_resume_and_get(_T))
+ pm_runtime_resume_and_get(_T), _RET == 0)
/**
* pm_runtime_put_sync - Drop device usage counter and run "idle check" if 0.
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index fb3fec9affaa..a7cc3d1f4fd1 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -4204,6 +4204,9 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk,
struct sk_buff_head *sk_queue,
unsigned int flags, int *off, int *err);
struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags, int *err);
+__poll_t datagram_poll_queue(struct file *file, struct socket *sock,
+ struct poll_table_struct *wait,
+ struct sk_buff_head *rcv_queue);
__poll_t datagram_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait);
int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h
index 3068c3084eb6..6ccd1b2af993 100644
--- a/include/linux/usb/pd.h
+++ b/include/linux/usb/pd.h
@@ -6,6 +6,7 @@
#ifndef __LINUX_USB_PD_H
#define __LINUX_USB_PD_H
+#include <linux/bitfield.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/usb/typec.h>
@@ -271,9 +272,11 @@ enum pd_pdo_type {
enum pd_apdo_type {
APDO_TYPE_PPS = 0,
+ APDO_TYPE_EPR_AVS = 1,
+ APDO_TYPE_SPR_AVS = 2,
};
-#define PDO_APDO_TYPE_SHIFT 28 /* Only valid value currently is 0x0 - PPS */
+#define PDO_APDO_TYPE_SHIFT 28
#define PDO_APDO_TYPE_MASK 0x3
#define PDO_APDO_TYPE(t) ((t) << PDO_APDO_TYPE_SHIFT)
@@ -297,6 +300,35 @@ enum pd_apdo_type {
PDO_PPS_APDO_MIN_VOLT(min_mv) | PDO_PPS_APDO_MAX_VOLT(max_mv) | \
PDO_PPS_APDO_MAX_CURR(max_ma))
+/*
+ * Applicable only to EPR AVS APDO source cap as per
+ * Table 6.15 EPR Adjustable Voltage Supply APDO – Source
+ */
+#define PDO_EPR_AVS_APDO_PEAK_CURRENT GENMASK(27, 26)
+
+/*
+ * Applicable to both EPR AVS APDO source and sink cap as per
+ * Table 6.15 EPR Adjustable Voltage Supply APDO – Source
+ * Table 6.22 EPR Adjustable Voltage Supply APDO – Sink
+ */
+#define PDO_EPR_AVS_APDO_MAX_VOLT GENMASK(25, 17) /* 100mV unit */
+#define PDO_EPR_AVS_APDO_MIN_VOLT GENMASK(15, 8) /* 100mV unit */
+#define PDO_EPR_AVS_APDO_PDP GENMASK(7, 0) /* 1W unit */
+
+/*
+ * Applicable only SPR AVS APDO source cap as per
+ * Table 6.14 SPR Adjustable Voltage Supply APDO – Source
+ */
+#define PDO_SPR_AVS_APDO_PEAK_CURRENT GENMASK(27, 26)
+
+/*
+ * Applicable to both SPR AVS APDO source and sink cap as per
+ * Table 6.14 SPR Adjustable Voltage Supply APDO – Source
+ * Table 6.21 SPR Adjustable Voltage Supply APDO – Sink
+ */
+#define PDO_SPR_AVS_APDO_9V_TO_15V_MAX_CURR GENMASK(19, 10) /* 10mA unit */
+#define PDO_SPR_AVS_APDO_15V_TO_20V_MAX_CURR GENMASK(9, 0) /* 10mA unit */
+
static inline enum pd_pdo_type pdo_type(u32 pdo)
{
return (pdo >> PDO_TYPE_SHIFT) & PDO_TYPE_MASK;
@@ -350,6 +382,41 @@ static inline unsigned int pdo_pps_apdo_max_current(u32 pdo)
PDO_PPS_APDO_CURR_MASK) * 50;
}
+static inline unsigned int pdo_epr_avs_apdo_src_peak_current(u32 pdo)
+{
+ return FIELD_GET(PDO_EPR_AVS_APDO_PEAK_CURRENT, pdo);
+}
+
+static inline unsigned int pdo_epr_avs_apdo_min_voltage_mv(u32 pdo)
+{
+ return FIELD_GET(PDO_EPR_AVS_APDO_MIN_VOLT, pdo) * 100;
+}
+
+static inline unsigned int pdo_epr_avs_apdo_max_voltage_mv(u32 pdo)
+{
+ return FIELD_GET(PDO_EPR_AVS_APDO_MIN_VOLT, pdo) * 100;
+}
+
+static inline unsigned int pdo_epr_avs_apdo_pdp_w(u32 pdo)
+{
+ return FIELD_GET(PDO_EPR_AVS_APDO_PDP, pdo);
+}
+
+static inline unsigned int pdo_spr_avs_apdo_src_peak_current(u32 pdo)
+{
+ return FIELD_GET(PDO_SPR_AVS_APDO_PEAK_CURRENT, pdo);
+}
+
+static inline unsigned int pdo_spr_avs_apdo_9v_to_15v_max_current_ma(u32 pdo)
+{
+ return FIELD_GET(PDO_SPR_AVS_APDO_9V_TO_15V_MAX_CURR, pdo) * 10;
+}
+
+static inline unsigned int pdo_spr_avs_apdo_15v_to_20v_max_current_ma(u32 pdo)
+{
+ return FIELD_GET(PDO_SPR_AVS_APDO_15V_TO_20V_MAX_CURR, pdo) * 10;
+}
+
/* RDO: Request Data Object */
#define RDO_OBJ_POS_SHIFT 28
#define RDO_OBJ_POS_MASK 0x7
diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h
index 252af3f77039..309251572e2e 100644
--- a/include/linux/usb/typec.h
+++ b/include/linux/usb/typec.h
@@ -337,6 +337,7 @@ struct typec_plug *typec_register_plug(struct typec_cable *cable,
void typec_unregister_plug(struct typec_plug *plug);
void typec_set_data_role(struct typec_port *port, enum typec_data_role role);
+enum typec_data_role typec_get_data_role(struct typec_port *port);
void typec_set_pwr_role(struct typec_port *port, enum typec_role role);
void typec_set_vconn_role(struct typec_port *port, enum typec_role role);
void typec_set_pwr_opmode(struct typec_port *port, enum typec_pwr_opmode mode);
diff --git a/include/linux/usb/typec_altmode.h b/include/linux/usb/typec_altmode.h
index b3c0866ea70f..f7db3bd4c90e 100644
--- a/include/linux/usb/typec_altmode.h
+++ b/include/linux/usb/typec_altmode.h
@@ -173,6 +173,19 @@ typec_altmode_get_svdm_version(struct typec_altmode *altmode)
}
/**
+ * typec_altmode_get_data_role - Get port data role
+ * @altmode: Handle to the alternate mode
+ *
+ * Alt Mode drivers should only issue Enter Mode through the port if they are
+ * the DFP.
+ */
+static inline enum typec_data_role
+typec_altmode_get_data_role(struct typec_altmode *altmode)
+{
+ return typec_get_data_role(typec_altmode2port(altmode));
+}
+
+/**
* struct typec_altmode_driver - USB Type-C alternate mode device driver
* @id_table: Null terminated array of SVIDs
* @probe: Callback for device binding
diff --git a/include/linux/usb/typec_tbt.h b/include/linux/usb/typec_tbt.h
index 55dcea12082c..0b570f1b8bc8 100644
--- a/include/linux/usb/typec_tbt.h
+++ b/include/linux/usb/typec_tbt.h
@@ -55,6 +55,7 @@ struct typec_thunderbolt_data {
/* TBT3 Device Enter Mode VDO bits */
#define TBT_ENTER_MODE_CABLE_SPEED(s) TBT_SET_CABLE_SPEED(s)
+#define TBT_ENTER_MODE_UNI_DIR_LSRX BIT(23)
#define TBT_ENTER_MODE_ACTIVE_CABLE BIT(24)
#endif /* __USB_TYPEC_TBT_H */
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 20e0584db1dd..4d1780848d0e 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -401,6 +401,10 @@ virtio_net_hdr_tnl_from_skb(const struct sk_buff *skb,
if (!tnl_hdr_negotiated)
return -EINVAL;
+ vhdr->hash_hdr.hash_value = 0;
+ vhdr->hash_hdr.hash_report = 0;
+ vhdr->hash_hdr.padding = 0;
+
/* Let the basic parsing deal with plain GSO features. */
skb_shinfo(skb)->gso_type &= ~tnl_gso_type;
ret = virtio_net_hdr_from_skb(skb, hdr, true, false, vlan_hlen);
diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
index 40ff19f52a8d..517489a7ec60 100644
--- a/include/uapi/drm/xe_drm.h
+++ b/include/uapi/drm/xe_drm.h
@@ -1013,6 +1013,20 @@ struct drm_xe_vm_destroy {
* valid on VMs with DRM_XE_VM_CREATE_FLAG_FAULT_MODE set. The CPU address
* mirror flag are only valid for DRM_XE_VM_BIND_OP_MAP operations, the BO
* handle MBZ, and the BO offset MBZ.
+ * - %DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET - Can be used in combination with
+ * %DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR to reset madvises when the underlying
+ * CPU address space range is unmapped (typically with munmap(2) or brk(2)).
+ * The madvise values set with &DRM_IOCTL_XE_MADVISE are reset to the values
+ * that were present immediately after the &DRM_IOCTL_XE_VM_BIND.
+ * The reset GPU virtual address range is the intersection of the range bound
+ * using &DRM_IOCTL_XE_VM_BIND and the virtual CPU address space range
+ * unmapped.
+ * This functionality is present to mimic the behaviour of CPU address space
+ * madvises set using madvise(2), which are typically reset on unmap.
+ * Note: free(3) may or may not call munmap(2) and/or brk(2), and may thus
+ * not invoke autoreset. Neither will stack variables going out of scope.
+ * Therefore it's recommended to always explicitly reset the madvises when
+ * freeing the memory backing a region used in a &DRM_IOCTL_XE_MADVISE call.
*
* The @prefetch_mem_region_instance for %DRM_XE_VM_BIND_OP_PREFETCH can also be:
* - %DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC, which ensures prefetching occurs in
@@ -1119,6 +1133,7 @@ struct drm_xe_vm_bind_op {
#define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3)
#define DRM_XE_VM_BIND_FLAG_CHECK_PXP (1 << 4)
#define DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR (1 << 5)
+#define DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET (1 << 6)
/** @flags: Bind flags */
__u32 flags;
diff --git a/include/uapi/linux/usb/cdc.h b/include/uapi/linux/usb/cdc.h
index 1924cf665448..7bd5d12d8b26 100644
--- a/include/uapi/linux/usb/cdc.h
+++ b/include/uapi/linux/usb/cdc.h
@@ -104,8 +104,10 @@ struct usb_cdc_union_desc {
__u8 bDescriptorSubType;
__u8 bMasterInterface0;
- __u8 bSlaveInterface0;
- /* ... and there could be other slave interfaces */
+ union {
+ __u8 bSlaveInterface0;
+ __DECLARE_FLEX_ARRAY(__u8, bSlaveInterfaces);
+ };
} __attribute__ ((packed));
/* "Country Selection Functional Descriptor" from CDC spec 5.2.3.9 */
@@ -115,8 +117,10 @@ struct usb_cdc_country_functional_desc {
__u8 bDescriptorSubType;
__u8 iCountryCodeRelDate;
- __le16 wCountyCode0;
- /* ... and there can be a lot of country codes */
+ union {
+ __le16 wCountryCode0;
+ __DECLARE_FLEX_ARRAY(__le16, wCountryCodes);
+ };
} __attribute__ ((packed));
/* "Network Channel Terminal Functional Descriptor" from CDC spec 5.2.3.11 */
diff --git a/io_uring/fdinfo.c b/io_uring/fdinfo.c
index ff3364531c77..294c75a8a3bd 100644
--- a/io_uring/fdinfo.c
+++ b/io_uring/fdinfo.c
@@ -59,7 +59,6 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
{
struct io_overflow_cqe *ocqe;
struct io_rings *r = ctx->rings;
- struct rusage sq_usage;
unsigned int sq_mask = ctx->sq_entries - 1, cq_mask = ctx->cq_entries - 1;
unsigned int sq_head = READ_ONCE(r->sq.head);
unsigned int sq_tail = READ_ONCE(r->sq.tail);
@@ -152,14 +151,15 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
* thread termination.
*/
if (tsk) {
+ u64 usec;
+
get_task_struct(tsk);
rcu_read_unlock();
- getrusage(tsk, RUSAGE_SELF, &sq_usage);
+ usec = io_sq_cpu_usec(tsk);
put_task_struct(tsk);
sq_pid = sq->task_pid;
sq_cpu = sq->sq_cpu;
- sq_total_time = (sq_usage.ru_stime.tv_sec * 1000000
- + sq_usage.ru_stime.tv_usec);
+ sq_total_time = usec;
sq_work_time = sq->work_time;
} else {
rcu_read_unlock();
diff --git a/io_uring/filetable.c b/io_uring/filetable.c
index a21660e3145a..794ef95df293 100644
--- a/io_uring/filetable.c
+++ b/io_uring/filetable.c
@@ -57,7 +57,7 @@ void io_free_file_tables(struct io_ring_ctx *ctx, struct io_file_table *table)
static int io_install_fixed_file(struct io_ring_ctx *ctx, struct file *file,
u32 slot_index)
- __must_hold(&req->ctx->uring_lock)
+ __must_hold(&ctx->uring_lock)
{
struct io_rsrc_node *node;
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 820ef0527666..296667ba712c 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -879,7 +879,7 @@ static inline struct io_cqe io_init_cqe(u64 user_data, s32 res, u32 cflags)
}
static __cold void io_cqe_overflow(struct io_ring_ctx *ctx, struct io_cqe *cqe,
- struct io_big_cqe *big_cqe)
+ struct io_big_cqe *big_cqe)
{
struct io_overflow_cqe *ocqe;
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
index aad655e38672..a727e020fe03 100644
--- a/io_uring/kbuf.c
+++ b/io_uring/kbuf.c
@@ -155,6 +155,27 @@ static int io_provided_buffers_select(struct io_kiocb *req, size_t *len,
return 1;
}
+static bool io_should_commit(struct io_kiocb *req, unsigned int issue_flags)
+{
+ /*
+ * If we came in unlocked, we have no choice but to consume the
+ * buffer here, otherwise nothing ensures that the buffer won't
+ * get used by others. This does mean it'll be pinned until the
+ * IO completes, coming in unlocked means we're being called from
+ * io-wq context and there may be further retries in async hybrid
+ * mode. For the locked case, the caller must call commit when
+ * the transfer completes (or if we get -EAGAIN and must poll of
+ * retry).
+ */
+ if (issue_flags & IO_URING_F_UNLOCKED)
+ return true;
+
+ /* uring_cmd commits kbuf upfront, no need to auto-commit */
+ if (!io_file_can_poll(req) && req->opcode != IORING_OP_URING_CMD)
+ return true;
+ return false;
+}
+
static struct io_br_sel io_ring_buffer_select(struct io_kiocb *req, size_t *len,
struct io_buffer_list *bl,
unsigned int issue_flags)
@@ -181,17 +202,7 @@ static struct io_br_sel io_ring_buffer_select(struct io_kiocb *req, size_t *len,
sel.buf_list = bl;
sel.addr = u64_to_user_ptr(buf->addr);
- if (issue_flags & IO_URING_F_UNLOCKED || !io_file_can_poll(req)) {
- /*
- * If we came in unlocked, we have no choice but to consume the
- * buffer here, otherwise nothing ensures that the buffer won't
- * get used by others. This does mean it'll be pinned until the
- * IO completes, coming in unlocked means we're being called from
- * io-wq context and there may be further retries in async hybrid
- * mode. For the locked case, the caller must call commit when
- * the transfer completes (or if we get -EAGAIN and must poll of
- * retry).
- */
+ if (io_should_commit(req, issue_flags)) {
io_kbuf_commit(req, sel.buf_list, *len, 1);
sel.buf_list = NULL;
}
diff --git a/io_uring/net.c b/io_uring/net.c
index f99b90c762fc..a95cc9ca2a4d 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -383,7 +383,7 @@ static int io_send_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return 0;
if (sr->flags & IORING_SEND_VECTORIZED)
- return io_net_import_vec(req, kmsg, sr->buf, sr->len, ITER_SOURCE);
+ return io_net_import_vec(req, kmsg, sr->buf, sr->len, ITER_SOURCE);
return import_ubuf(ITER_SOURCE, sr->buf, sr->len, &kmsg->msg.msg_iter);
}
diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c
index a3f11349ce06..e22f072c7d5f 100644
--- a/io_uring/sqpoll.c
+++ b/io_uring/sqpoll.c
@@ -11,6 +11,7 @@
#include <linux/audit.h>
#include <linux/security.h>
#include <linux/cpuset.h>
+#include <linux/sched/cputime.h>
#include <linux/io_uring.h>
#include <uapi/linux/io_uring.h>
@@ -169,7 +170,38 @@ static inline bool io_sqd_events_pending(struct io_sq_data *sqd)
return READ_ONCE(sqd->state);
}
-static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
+struct io_sq_time {
+ bool started;
+ u64 usec;
+};
+
+u64 io_sq_cpu_usec(struct task_struct *tsk)
+{
+ u64 utime, stime;
+
+ task_cputime_adjusted(tsk, &utime, &stime);
+ do_div(stime, 1000);
+ return stime;
+}
+
+static void io_sq_update_worktime(struct io_sq_data *sqd, struct io_sq_time *ist)
+{
+ if (!ist->started)
+ return;
+ ist->started = false;
+ sqd->work_time += io_sq_cpu_usec(current) - ist->usec;
+}
+
+static void io_sq_start_worktime(struct io_sq_time *ist)
+{
+ if (ist->started)
+ return;
+ ist->started = true;
+ ist->usec = io_sq_cpu_usec(current);
+}
+
+static int __io_sq_thread(struct io_ring_ctx *ctx, struct io_sq_data *sqd,
+ bool cap_entries, struct io_sq_time *ist)
{
unsigned int to_submit;
int ret = 0;
@@ -182,6 +214,8 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
if (to_submit || !wq_list_empty(&ctx->iopoll_list)) {
const struct cred *creds = NULL;
+ io_sq_start_worktime(ist);
+
if (ctx->sq_creds != current_cred())
creds = override_creds(ctx->sq_creds);
@@ -255,23 +289,11 @@ static bool io_sq_tw_pending(struct llist_node *retry_list)
return retry_list || !llist_empty(&tctx->task_list);
}
-static void io_sq_update_worktime(struct io_sq_data *sqd, struct rusage *start)
-{
- struct rusage end;
-
- getrusage(current, RUSAGE_SELF, &end);
- end.ru_stime.tv_sec -= start->ru_stime.tv_sec;
- end.ru_stime.tv_usec -= start->ru_stime.tv_usec;
-
- sqd->work_time += end.ru_stime.tv_usec + end.ru_stime.tv_sec * 1000000;
-}
-
static int io_sq_thread(void *data)
{
struct llist_node *retry_list = NULL;
struct io_sq_data *sqd = data;
struct io_ring_ctx *ctx;
- struct rusage start;
unsigned long timeout = 0;
char buf[TASK_COMM_LEN] = {};
DEFINE_WAIT(wait);
@@ -309,6 +331,7 @@ static int io_sq_thread(void *data)
mutex_lock(&sqd->lock);
while (1) {
bool cap_entries, sqt_spin = false;
+ struct io_sq_time ist = { };
if (io_sqd_events_pending(sqd) || signal_pending(current)) {
if (io_sqd_handle_event(sqd))
@@ -317,9 +340,8 @@ static int io_sq_thread(void *data)
}
cap_entries = !list_is_singular(&sqd->ctx_list);
- getrusage(current, RUSAGE_SELF, &start);
list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
- int ret = __io_sq_thread(ctx, cap_entries);
+ int ret = __io_sq_thread(ctx, sqd, cap_entries, &ist);
if (!sqt_spin && (ret > 0 || !wq_list_empty(&ctx->iopoll_list)))
sqt_spin = true;
@@ -327,15 +349,18 @@ static int io_sq_thread(void *data)
if (io_sq_tw(&retry_list, IORING_TW_CAP_ENTRIES_VALUE))
sqt_spin = true;
- list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
- if (io_napi(ctx))
+ list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
+ if (io_napi(ctx)) {
+ io_sq_start_worktime(&ist);
io_napi_sqpoll_busy_poll(ctx);
+ }
+ }
+
+ io_sq_update_worktime(sqd, &ist);
if (sqt_spin || !time_after(jiffies, timeout)) {
- if (sqt_spin) {
- io_sq_update_worktime(sqd, &start);
+ if (sqt_spin)
timeout = jiffies + sqd->sq_thread_idle;
- }
if (unlikely(need_resched())) {
mutex_unlock(&sqd->lock);
cond_resched();
diff --git a/io_uring/sqpoll.h b/io_uring/sqpoll.h
index b83dcdec9765..fd2f6f29b516 100644
--- a/io_uring/sqpoll.h
+++ b/io_uring/sqpoll.h
@@ -29,6 +29,7 @@ void io_sq_thread_unpark(struct io_sq_data *sqd);
void io_put_sq_data(struct io_sq_data *sqd);
void io_sqpoll_wait_sq(struct io_ring_ctx *ctx);
int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx, cpumask_var_t mask);
+u64 io_sq_cpu_usec(struct task_struct *tsk);
static inline struct task_struct *sqpoll_task_locked(struct io_sq_data *sqd)
{
diff --git a/io_uring/waitid.c b/io_uring/waitid.c
index f25110fb1b12..53532ae6256c 100644
--- a/io_uring/waitid.c
+++ b/io_uring/waitid.c
@@ -250,7 +250,7 @@ int io_waitid_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return -EINVAL;
iwa = io_uring_alloc_async_data(NULL, req);
- if (!unlikely(iwa))
+ if (unlikely(!iwa))
return -ENOMEM;
iwa->req = req;
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 6ae5f48cf64e..fdee387f0d6b 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -5892,7 +5892,7 @@ static struct cgroup *cgroup_create(struct cgroup *parent, const char *name,
* if the parent has to be frozen, the child has too.
*/
cgrp->freezer.e_freeze = parent->freezer.e_freeze;
- seqcount_init(&cgrp->freezer.freeze_seq);
+ seqcount_spinlock_init(&cgrp->freezer.freeze_seq, &css_set_lock);
if (cgrp->freezer.e_freeze) {
/*
* Set the CGRP_FREEZE flag, so when a process will be
diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
index 1e5c64cb6a42..138ede653de4 100644
--- a/kernel/dma/debug.c
+++ b/kernel/dma/debug.c
@@ -23,6 +23,7 @@
#include <linux/ctype.h>
#include <linux/list.h>
#include <linux/slab.h>
+#include <linux/swiotlb.h>
#include <asm/sections.h>
#include "debug.h"
@@ -594,7 +595,9 @@ static void add_dma_entry(struct dma_debug_entry *entry, unsigned long attrs)
if (rc == -ENOMEM) {
pr_err_once("cacheline tracking ENOMEM, dma-debug disabled\n");
global_disable = true;
- } else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
+ } else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+ !(IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) &&
+ is_swiotlb_active(entry->dev))) {
err_printk(entry->dev, entry,
"cacheline tracking EEXIST, overlapping mappings aren't supported\n");
}
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 3ffa0d80ddd1..d1917b28761a 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -1030,7 +1030,7 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
void __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
const char *name)
{
- scoped_irqdesc_get_and_lock(irq, 0)
+ scoped_irqdesc_get_and_buslock(irq, 0)
__irq_do_set_handler(scoped_irqdesc, handle, is_chained, name);
}
EXPORT_SYMBOL_GPL(__irq_set_handler);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index c94837382037..400856abf672 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -659,7 +659,7 @@ void __disable_irq(struct irq_desc *desc)
static int __disable_irq_nosync(unsigned int irq)
{
- scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_GLOBAL) {
+ scoped_irqdesc_get_and_buslock(irq, IRQ_GET_DESC_CHECK_GLOBAL) {
__disable_irq(scoped_irqdesc);
return 0;
}
@@ -789,7 +789,7 @@ void __enable_irq(struct irq_desc *desc)
*/
void enable_irq(unsigned int irq)
{
- scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_GLOBAL) {
+ scoped_irqdesc_get_and_buslock(irq, IRQ_GET_DESC_CHECK_GLOBAL) {
struct irq_desc *desc = scoped_irqdesc;
if (WARN(!desc->irq_data.chip, "enable_irq before setup/request_irq: irq %u\n", irq))
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index cee1793e8277..25970dbbb279 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6437,6 +6437,16 @@ static void sync_throttle(struct task_group *tg, int cpu)
cfs_rq->throttle_count = pcfs_rq->throttle_count;
cfs_rq->throttled_clock_pelt = rq_clock_pelt(cpu_rq(cpu));
+
+ /*
+ * It is not enough to sync the "pelt_clock_throttled" indicator
+ * with the parent cfs_rq when the hierarchy is not queued.
+ * Always join a throttled hierarchy with PELT clock throttled
+ * and leaf it to the first enqueue, or distribution to
+ * unthrottle the PELT clock.
+ */
+ if (cfs_rq->throttle_count)
+ cfs_rq->pelt_clock_throttled = 1;
}
/* conditionally throttle active cfs_rq's from put_prev_entity() */
@@ -13187,6 +13197,8 @@ static void propagate_entity_cfs_rq(struct sched_entity *se)
if (!cfs_rq_pelt_clock_throttled(cfs_rq))
list_add_leaf_cfs_rq(cfs_rq);
}
+
+ assert_list_leaf_cfs_rq(rq_of(cfs_rq));
}
#else /* !CONFIG_FAIR_GROUP_SCHED: */
static void propagate_entity_cfs_rq(struct sched_entity *se) { }
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 1f5d07067f60..361f9101cef9 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -3740,11 +3740,9 @@ static inline int mm_cid_get(struct rq *rq, struct task_struct *t,
struct mm_struct *mm)
{
struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid;
- struct cpumask *cpumask;
int cid;
lockdep_assert_rq_held(rq);
- cpumask = mm_cidmask(mm);
cid = __this_cpu_read(pcpu_cid->cid);
if (mm_cid_is_valid(cid)) {
mm_cid_snapshot_time(rq, mm);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index b6974fce800c..3a4d3b2e3f74 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -3070,7 +3070,7 @@ static int __init tk_aux_sysfs_init(void)
return -ENOMEM;
}
- for (int i = 0; i <= MAX_AUX_CLOCKS; i++) {
+ for (int i = 0; i < MAX_AUX_CLOCKS; i++) {
char id[2] = { [0] = '0' + i, };
struct kobject *clk = kobject_create_and_add(id, auxo);
diff --git a/kernel/trace/rv/monitors/pagefault/Kconfig b/kernel/trace/rv/monitors/pagefault/Kconfig
index 5e16625f1653..0e013f00c33b 100644
--- a/kernel/trace/rv/monitors/pagefault/Kconfig
+++ b/kernel/trace/rv/monitors/pagefault/Kconfig
@@ -5,6 +5,7 @@ config RV_MON_PAGEFAULT
select RV_LTL_MONITOR
depends on RV_MON_RTAPP
depends on X86 || RISCV
+ depends on MMU
default y
select LTL_MON_EVENTS_ID
bool "pagefault monitor"
diff --git a/kernel/trace/rv/rv.c b/kernel/trace/rv/rv.c
index 48338520376f..43e9ea473cda 100644
--- a/kernel/trace/rv/rv.c
+++ b/kernel/trace/rv/rv.c
@@ -501,7 +501,7 @@ static void *enabled_monitors_next(struct seq_file *m, void *p, loff_t *pos)
list_for_each_entry_continue(mon, &rv_monitors_list, list) {
if (mon->enabled)
- return mon;
+ return &mon->list;
}
return NULL;
@@ -509,7 +509,7 @@ static void *enabled_monitors_next(struct seq_file *m, void *p, loff_t *pos)
static void *enabled_monitors_start(struct seq_file *m, loff_t *pos)
{
- struct rv_monitor *mon;
+ struct list_head *head;
loff_t l;
mutex_lock(&rv_interface_lock);
@@ -517,15 +517,15 @@ static void *enabled_monitors_start(struct seq_file *m, loff_t *pos)
if (list_empty(&rv_monitors_list))
return NULL;
- mon = list_entry(&rv_monitors_list, struct rv_monitor, list);
+ head = &rv_monitors_list;
for (l = 0; l <= *pos; ) {
- mon = enabled_monitors_next(m, mon, &l);
- if (!mon)
+ head = enabled_monitors_next(m, head, &l);
+ if (!head)
break;
}
- return mon;
+ return head;
}
/*
diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig
index eea17e36a22b..8886055e938f 100644
--- a/lib/crypto/Kconfig
+++ b/lib/crypto/Kconfig
@@ -97,7 +97,7 @@ config CRYPTO_LIB_POLY1305
config CRYPTO_LIB_POLY1305_ARCH
bool
- depends on CRYPTO_LIB_POLY1305 && !UML
+ depends on CRYPTO_LIB_POLY1305 && !UML && !KMSAN
default y if ARM
default y if ARM64 && KERNEL_MODE_NEON
default y if MIPS
diff --git a/mm/damon/core.c b/mm/damon/core.c
index 93848b4c6944..109b050c795a 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -452,6 +452,9 @@ void damon_destroy_scheme(struct damos *s)
damos_for_each_filter_safe(f, next, s)
damos_destroy_filter(f);
+ damos_for_each_ops_filter_safe(f, next, s)
+ damos_destroy_filter(f);
+
kfree(s->migrate_dests.node_id_arr);
kfree(s->migrate_dests.weight_arr);
damon_del_scheme(s);
@@ -832,7 +835,7 @@ int damos_commit_quota_goals(struct damos_quota *dst, struct damos_quota *src)
src_goal->metric, src_goal->target_value);
if (!new_goal)
return -ENOMEM;
- damos_commit_quota_goal_union(new_goal, src_goal);
+ damos_commit_quota_goal(new_goal, src_goal);
damos_add_quota_goal(dst, new_goal);
}
return 0;
@@ -1450,7 +1453,7 @@ int damon_call(struct damon_ctx *ctx, struct damon_call_control *control)
INIT_LIST_HEAD(&control->list);
mutex_lock(&ctx->call_controls_lock);
- list_add_tail(&ctx->call_controls, &control->list);
+ list_add_tail(&control->list, &ctx->call_controls);
mutex_unlock(&ctx->call_controls_lock);
if (!damon_is_running(ctx))
return -EINVAL;
diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
index 2fc722f998f8..cd6815ecc04e 100644
--- a/mm/damon/sysfs.c
+++ b/mm/damon/sysfs.c
@@ -1473,13 +1473,14 @@ static int damon_sysfs_commit_input(void *data)
if (IS_ERR(param_ctx))
return PTR_ERR(param_ctx);
test_ctx = damon_new_ctx();
+ if (!test_ctx)
+ return -ENOMEM;
err = damon_commit_ctx(test_ctx, param_ctx);
- if (err) {
- damon_destroy_ctx(test_ctx);
+ if (err)
goto out;
- }
err = damon_commit_ctx(kdamond->damon_ctx, param_ctx);
out:
+ damon_destroy_ctx(test_ctx);
damon_destroy_ctx(param_ctx);
return err;
}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 1b81680b4225..1d1b74950332 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -4109,6 +4109,9 @@ static bool thp_underused(struct folio *folio)
if (khugepaged_max_ptes_none == HPAGE_PMD_NR - 1)
return false;
+ if (folio_contain_hwpoisoned_page(folio))
+ return false;
+
for (i = 0; i < folio_nr_pages(folio); i++) {
if (pages_identical(folio_page(folio, i), ZERO_PAGE(0))) {
if (++num_zero_pages > khugepaged_max_ptes_none)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 795ee393eac0..0455119716ec 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -7614,13 +7614,12 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
p4d_t *p4d = p4d_offset(pgd, addr);
pud_t *pud = pud_offset(p4d, addr);
- i_mmap_assert_write_locked(vma->vm_file->f_mapping);
- hugetlb_vma_assert_locked(vma);
if (sz != PMD_SIZE)
return 0;
if (!ptdesc_pmd_is_shared(virt_to_ptdesc(ptep)))
return 0;
-
+ i_mmap_assert_write_locked(vma->vm_file->f_mapping);
+ hugetlb_vma_assert_locked(vma);
pud_clear(pud);
/*
* Once our caller drops the rmap lock, some other process might be
diff --git a/mm/migrate.c b/mm/migrate.c
index e3065c9edb55..c0e9f15be2a2 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -301,8 +301,9 @@ static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw,
struct page *page = folio_page(folio, idx);
pte_t newpte;
- if (PageCompound(page))
+ if (PageCompound(page) || PageHWPoison(page))
return false;
+
VM_BUG_ON_PAGE(!PageAnon(page), page);
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(pte_present(old_pte), page);
diff --git a/mm/mremap.c b/mm/mremap.c
index 35de0a7b910e..bd7314898ec5 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -1237,10 +1237,10 @@ static int copy_vma_and_data(struct vma_remap_struct *vrm,
}
/*
- * Perform final tasks for MADV_DONTUNMAP operation, clearing mlock() and
- * account flags on remaining VMA by convention (it cannot be mlock()'d any
- * longer, as pages in range are no longer mapped), and removing anon_vma_chain
- * links from it (if the entire VMA was copied over).
+ * Perform final tasks for MADV_DONTUNMAP operation, clearing mlock() flag on
+ * remaining VMA by convention (it cannot be mlock()'d any longer, as pages in
+ * range are no longer mapped), and removing anon_vma_chain links from it if the
+ * entire VMA was copied over.
*/
static void dontunmap_complete(struct vma_remap_struct *vrm,
struct vm_area_struct *new_vma)
@@ -1250,11 +1250,8 @@ static void dontunmap_complete(struct vma_remap_struct *vrm,
unsigned long old_start = vrm->vma->vm_start;
unsigned long old_end = vrm->vma->vm_end;
- /*
- * We always clear VM_LOCKED[ONFAULT] | VM_ACCOUNT on the old
- * vma.
- */
- vm_flags_clear(vrm->vma, VM_LOCKED_MASK | VM_ACCOUNT);
+ /* We always clear VM_LOCKED[ONFAULT] on the old VMA. */
+ vm_flags_clear(vrm->vma, VM_LOCKED_MASK);
/*
* anon_vma links of the old vma is no longer needed after its page
diff --git a/mm/page_owner.c b/mm/page_owner.c
index c3ca21132c2c..589ec37c94aa 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -168,6 +168,9 @@ static void add_stack_record_to_list(struct stack_record *stack_record,
unsigned long flags;
struct stack *stack;
+ if (!gfpflags_allow_spinning(gfp_mask))
+ return;
+
set_current_in_page_owner();
stack = kmalloc(sizeof(*stack), gfp_nested_mask(gfp_mask));
if (!stack) {
diff --git a/mm/slub.c b/mm/slub.c
index a8fcc7e6f25a..d4367f25b20d 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2052,9 +2052,9 @@ static inline void mark_objexts_empty(struct slabobj_ext *obj_exts)
}
}
-static inline void mark_failed_objexts_alloc(struct slab *slab)
+static inline bool mark_failed_objexts_alloc(struct slab *slab)
{
- slab->obj_exts = OBJEXTS_ALLOC_FAIL;
+ return cmpxchg(&slab->obj_exts, 0, OBJEXTS_ALLOC_FAIL) == 0;
}
static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
@@ -2076,7 +2076,7 @@ static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
#else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) {}
-static inline void mark_failed_objexts_alloc(struct slab *slab) {}
+static inline bool mark_failed_objexts_alloc(struct slab *slab) { return false; }
static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
struct slabobj_ext *vec, unsigned int objects) {}
@@ -2124,8 +2124,14 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
slab_nid(slab));
}
if (!vec) {
- /* Mark vectors which failed to allocate */
- mark_failed_objexts_alloc(slab);
+ /*
+ * Try to mark vectors which failed to allocate.
+ * If this operation fails, there may be a racing process
+ * that has already completed the allocation.
+ */
+ if (!mark_failed_objexts_alloc(slab) &&
+ slab_obj_exts(slab))
+ return 0;
return -ENOMEM;
}
@@ -2136,6 +2142,7 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
#ifdef CONFIG_MEMCG
new_exts |= MEMCG_DATA_OBJEXTS;
#endif
+retry:
old_exts = READ_ONCE(slab->obj_exts);
handle_failed_objexts_alloc(old_exts, vec, objects);
if (new_slab) {
@@ -2145,8 +2152,7 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
* be simply assigned.
*/
slab->obj_exts = new_exts;
- } else if ((old_exts & ~OBJEXTS_FLAGS_MASK) ||
- cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) {
+ } else if (old_exts & ~OBJEXTS_FLAGS_MASK) {
/*
* If the slab is already in use, somebody can allocate and
* assign slabobj_exts in parallel. In this case the existing
@@ -2158,6 +2164,9 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
else
kfree(vec);
return 0;
+ } else if (cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) {
+ /* Retry if a racing thread changed slab->obj_exts from under us. */
+ goto retry;
}
if (allow_spin)
@@ -3419,7 +3428,6 @@ static void *alloc_single_from_new_slab(struct kmem_cache *s, struct slab *slab,
if (!allow_spin && !spin_trylock_irqsave(&n->list_lock, flags)) {
/* Unlucky, discard newly allocated slab */
- slab->frozen = 1;
defer_deactivate_slab(slab, NULL);
return NULL;
}
@@ -6468,9 +6476,12 @@ static void free_deferred_objects(struct irq_work *work)
struct slab *slab = container_of(pos, struct slab, llnode);
#ifdef CONFIG_SLUB_TINY
- discard_slab(slab->slab_cache, slab);
+ free_slab(slab->slab_cache, slab);
#else
- deactivate_slab(slab->slab_cache, slab, slab->flush_freelist);
+ if (slab->frozen)
+ deactivate_slab(slab->slab_cache, slab, slab->flush_freelist);
+ else
+ free_slab(slab->slab_cache, slab);
#endif
}
}
diff --git a/net/core/datagram.c b/net/core/datagram.c
index cb4b9ef2e4e3..c285c6465923 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -920,21 +920,22 @@ fault:
EXPORT_SYMBOL(skb_copy_and_csum_datagram_msg);
/**
- * datagram_poll - generic datagram poll
+ * datagram_poll_queue - same as datagram_poll, but on a specific receive
+ * queue
* @file: file struct
* @sock: socket
* @wait: poll table
+ * @rcv_queue: receive queue to poll
*
- * Datagram poll: Again totally generic. This also handles
- * sequenced packet sockets providing the socket receive queue
- * is only ever holding data ready to receive.
+ * Performs polling on the given receive queue, handling shutdown, error,
+ * and connection state. This is useful for protocols that deliver
+ * userspace-bound packets through a custom queue instead of
+ * sk->sk_receive_queue.
*
- * Note: when you *don't* use this routine for this protocol,
- * and you use a different write policy from sock_writeable()
- * then please supply your own write_space callback.
+ * Return: poll bitmask indicating the socket's current state
*/
-__poll_t datagram_poll(struct file *file, struct socket *sock,
- poll_table *wait)
+__poll_t datagram_poll_queue(struct file *file, struct socket *sock,
+ poll_table *wait, struct sk_buff_head *rcv_queue)
{
struct sock *sk = sock->sk;
__poll_t mask;
@@ -956,7 +957,7 @@ __poll_t datagram_poll(struct file *file, struct socket *sock,
mask |= EPOLLHUP;
/* readable? */
- if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
+ if (!skb_queue_empty_lockless(rcv_queue))
mask |= EPOLLIN | EPOLLRDNORM;
/* Connection-based need to check for termination and startup */
@@ -978,4 +979,27 @@ __poll_t datagram_poll(struct file *file, struct socket *sock,
return mask;
}
+EXPORT_SYMBOL(datagram_poll_queue);
+
+/**
+ * datagram_poll - generic datagram poll
+ * @file: file struct
+ * @sock: socket
+ * @wait: poll table
+ *
+ * Datagram poll: Again totally generic. This also handles
+ * sequenced packet sockets providing the socket receive queue
+ * is only ever holding data ready to receive.
+ *
+ * Note: when you *don't* use this routine for this protocol,
+ * and you use a different write policy from sock_writeable()
+ * then please supply your own write_space callback.
+ *
+ * Return: poll bitmask indicating the socket's current state
+ */
+__poll_t datagram_poll(struct file *file, struct socket *sock, poll_table *wait)
+{
+ return datagram_poll_queue(file, sock, wait,
+ &sock->sk->sk_receive_queue);
+}
EXPORT_SYMBOL(datagram_poll);
diff --git a/net/core/gro.c b/net/core/gro.c
index 5ba4504cfd28..76f9c3712422 100644
--- a/net/core/gro.c
+++ b/net/core/gro.c
@@ -639,6 +639,8 @@ EXPORT_SYMBOL(gro_receive_skb);
static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
{
+ struct skb_shared_info *shinfo;
+
if (unlikely(skb->pfmemalloc)) {
consume_skb(skb);
return;
@@ -655,8 +657,12 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
skb->encapsulation = 0;
skb->ip_summed = CHECKSUM_NONE;
- skb_shinfo(skb)->gso_type = 0;
- skb_shinfo(skb)->gso_size = 0;
+
+ shinfo = skb_shinfo(skb);
+ shinfo->gso_type = 0;
+ shinfo->gso_size = 0;
+ shinfo->hwtstamps.hwtstamp = 0;
+
if (unlikely(skb->slow_gro)) {
skb_orphan(skb);
skb_ext_reset(skb);
diff --git a/net/core/gro_cells.c b/net/core/gro_cells.c
index b43911562f4d..fd57b845de33 100644
--- a/net/core/gro_cells.c
+++ b/net/core/gro_cells.c
@@ -43,12 +43,11 @@ drop:
if (skb_queue_len(&cell->napi_skbs) == 1)
napi_schedule(&cell->napi);
- if (have_bh_lock)
- local_unlock_nested_bh(&gcells->cells->bh_lock);
-
res = NET_RX_SUCCESS;
unlock:
+ if (have_bh_lock)
+ local_unlock_nested_bh(&gcells->cells->bh_lock);
rcu_read_unlock();
return res;
}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 8040ff7c356e..576d5ec3bb36 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -4715,9 +4715,6 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
int err;
u16 vid;
- if (!netlink_capable(skb, CAP_NET_ADMIN))
- return -EPERM;
-
if (!del_bulk) {
err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX,
NULL, extack);
diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
index b120470246cc..c96b63adf96f 100644
--- a/net/hsr/hsr_netlink.c
+++ b/net/hsr/hsr_netlink.c
@@ -34,12 +34,18 @@ static int hsr_newlink(struct net_device *dev,
struct netlink_ext_ack *extack)
{
struct net *link_net = rtnl_newlink_link_net(params);
+ struct net_device *link[2], *interlink = NULL;
struct nlattr **data = params->data;
enum hsr_version proto_version;
unsigned char multicast_spec;
u8 proto = HSR_PROTOCOL_HSR;
- struct net_device *link[2], *interlink = NULL;
+ if (!net_eq(link_net, dev_net(dev))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "HSR slaves/interlink must be on the same net namespace than HSR link");
+ return -EINVAL;
+ }
+
if (!data) {
NL_SET_ERR_MSG_MOD(extack, "No slave devices specified");
return -EINVAL;
diff --git a/net/mptcp/pm_kernel.c b/net/mptcp/pm_kernel.c
index e0f44dc232aa..2ae95476dba3 100644
--- a/net/mptcp/pm_kernel.c
+++ b/net/mptcp/pm_kernel.c
@@ -370,6 +370,10 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
}
subflow:
+ /* No need to try establishing subflows to remote id0 if not allowed */
+ if (mptcp_pm_add_addr_c_flag_case(msk))
+ goto exit;
+
/* check if should create a new subflow */
while (msk->pm.local_addr_used < endp_subflow_max &&
msk->pm.extra_subflows < limit_extra_subflows) {
@@ -401,6 +405,8 @@ subflow:
__mptcp_subflow_connect(sk, &local, &addrs[i]);
spin_lock_bh(&msk->pm.lock);
}
+
+exit:
mptcp_pm_nl_check_work_pending(msk);
}
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index 5c1652181805..f5a7d5a38755 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -169,13 +169,14 @@ next_chunk:
chunk->head_skb = chunk->skb;
/* skbs with "cover letter" */
- if (chunk->head_skb && chunk->skb->data_len == chunk->skb->len)
+ if (chunk->head_skb && chunk->skb->data_len == chunk->skb->len) {
+ if (WARN_ON(!skb_shinfo(chunk->skb)->frag_list)) {
+ __SCTP_INC_STATS(dev_net(chunk->skb->dev),
+ SCTP_MIB_IN_PKT_DISCARDS);
+ sctp_chunk_free(chunk);
+ goto next_chunk;
+ }
chunk->skb = skb_shinfo(chunk->skb)->frag_list;
-
- if (WARN_ON(!chunk->skb)) {
- __SCTP_INC_STATS(dev_net(chunk->skb->dev), SCTP_MIB_IN_PKT_DISCARDS);
- sctp_chunk_free(chunk);
- goto next_chunk;
}
}
diff --git a/net/smc/smc_inet.c b/net/smc/smc_inet.c
index a944e7dcb8b9..a94084b4a498 100644
--- a/net/smc/smc_inet.c
+++ b/net/smc/smc_inet.c
@@ -56,7 +56,6 @@ static struct inet_protosw smc_inet_protosw = {
.protocol = IPPROTO_SMC,
.prot = &smc_inet_prot,
.ops = &smc_inet_stream_ops,
- .flags = INET_PROTOSW_ICSK,
};
#if IS_ENABLED(CONFIG_IPV6)
@@ -104,27 +103,15 @@ static struct inet_protosw smc_inet6_protosw = {
.protocol = IPPROTO_SMC,
.prot = &smc_inet6_prot,
.ops = &smc_inet6_stream_ops,
- .flags = INET_PROTOSW_ICSK,
};
#endif /* CONFIG_IPV6 */
-static unsigned int smc_sync_mss(struct sock *sk, u32 pmtu)
-{
- /* No need pass it through to clcsock, mss can always be set by
- * sock_create_kern or smc_setsockopt.
- */
- return 0;
-}
-
static int smc_inet_init_sock(struct sock *sk)
{
struct net *net = sock_net(sk);
/* init common smc sock */
smc_sk_init(net, sk, IPPROTO_SMC);
-
- inet_csk(sk)->icsk_sync_mss = smc_sync_mss;
-
/* create clcsock */
return smc_create_clcsk(net, sk, sk->sk_family);
}
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 4c2db6cca557..76763247a377 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -487,12 +487,26 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
goto err;
}
- if (vsk->transport) {
- if (vsk->transport == new_transport) {
- ret = 0;
- goto err;
- }
+ if (vsk->transport && vsk->transport == new_transport) {
+ ret = 0;
+ goto err;
+ }
+ /* We increase the module refcnt to prevent the transport unloading
+ * while there are open sockets assigned to it.
+ */
+ if (!new_transport || !try_module_get(new_transport->module)) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ /* It's safe to release the mutex after a successful try_module_get().
+ * Whichever transport `new_transport` points at, it won't go away until
+ * the last module_put() below or in vsock_deassign_transport().
+ */
+ mutex_unlock(&vsock_register_mutex);
+
+ if (vsk->transport) {
/* transport->release() must be called with sock lock acquired.
* This path can only be taken during vsock_connect(), where we
* have already held the sock lock. In the other cases, this
@@ -512,20 +526,6 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
vsk->peer_shutdown = 0;
}
- /* We increase the module refcnt to prevent the transport unloading
- * while there are open sockets assigned to it.
- */
- if (!new_transport || !try_module_get(new_transport->module)) {
- ret = -ENODEV;
- goto err;
- }
-
- /* It's safe to release the mutex after a successful try_module_get().
- * Whichever transport `new_transport` points at, it won't go away until
- * the last module_put() below or in vsock_deassign_transport().
- */
- mutex_unlock(&vsock_register_mutex);
-
if (sk->sk_type == SOCK_SEQPACKET) {
if (!new_transport->seqpacket_allow ||
!new_transport->seqpacket_allow(remote_cid)) {
diff --git a/net/xfrm/espintcp.c b/net/xfrm/espintcp.c
index fc7a603b04f1..bf744ac9d5a7 100644
--- a/net/xfrm/espintcp.c
+++ b/net/xfrm/espintcp.c
@@ -555,14 +555,10 @@ static void espintcp_close(struct sock *sk, long timeout)
static __poll_t espintcp_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
- __poll_t mask = datagram_poll(file, sock, wait);
struct sock *sk = sock->sk;
struct espintcp_ctx *ctx = espintcp_getctx(sk);
- if (!skb_queue_empty(&ctx->ike_queue))
- mask |= EPOLLIN | EPOLLRDNORM;
-
- return mask;
+ return datagram_poll_queue(file, sock, wait, &ctx->ike_queue);
}
static void build_protos(struct proto *espintcp_prot,
diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h
index 2e43c66635a2..d6906465e17e 100644
--- a/rust/bindings/bindings_helper.h
+++ b/rust/bindings/bindings_helper.h
@@ -80,6 +80,7 @@
#include <linux/slab.h>
#include <linux/task_work.h>
#include <linux/tracepoint.h>
+#include <linux/usb.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <linux/xarray.h>
diff --git a/rust/helpers/helpers.c b/rust/helpers/helpers.c
index 551da6c9b506..16049d6e713f 100644
--- a/rust/helpers/helpers.c
+++ b/rust/helpers/helpers.c
@@ -56,6 +56,7 @@
#include "task.c"
#include "time.c"
#include "uaccess.c"
+#include "usb.c"
#include "vmalloc.c"
#include "wait.c"
#include "workqueue.c"
diff --git a/rust/kernel/auxiliary.rs b/rust/kernel/auxiliary.rs
index e11848bbf206..7a3b0b9c418e 100644
--- a/rust/kernel/auxiliary.rs
+++ b/rust/kernel/auxiliary.rs
@@ -217,13 +217,7 @@ impl<Ctx: device::DeviceContext> Device<Ctx> {
/// Returns a reference to the parent [`device::Device`], if any.
pub fn parent(&self) -> Option<&device::Device> {
- let ptr: *const Self = self;
- // CAST: `Device<Ctx: DeviceContext>` types are transparent to each other.
- let ptr: *const Device = ptr.cast();
- // SAFETY: `ptr` was derived from `&self`.
- let this = unsafe { &*ptr };
-
- this.as_ref().parent()
+ self.as_ref().parent()
}
}
diff --git a/rust/kernel/device.rs b/rust/kernel/device.rs
index 1321e6f0b53c..a849b7dde2fd 100644
--- a/rust/kernel/device.rs
+++ b/rust/kernel/device.rs
@@ -251,7 +251,7 @@ impl<Ctx: DeviceContext> Device<Ctx> {
/// Returns a reference to the parent device, if any.
#[cfg_attr(not(CONFIG_AUXILIARY_BUS), expect(dead_code))]
- pub(crate) fn parent(&self) -> Option<&Self> {
+ pub(crate) fn parent(&self) -> Option<&Device> {
// SAFETY:
// - By the type invariant `self.as_raw()` is always valid.
// - The parent device is only ever set at device creation.
@@ -264,7 +264,7 @@ impl<Ctx: DeviceContext> Device<Ctx> {
// - Since `parent` is not NULL, it must be a valid pointer to a `struct device`.
// - `parent` is valid for the lifetime of `self`, since a `struct device` holds a
// reference count of its parent.
- Some(unsafe { Self::from_raw(parent) })
+ Some(unsafe { Device::from_raw(parent) })
}
}
diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
index 3dd7bebe7888..cd191686fef6 100644
--- a/rust/kernel/lib.rs
+++ b/rust/kernel/lib.rs
@@ -139,6 +139,8 @@ pub mod tracepoint;
pub mod transmute;
pub mod types;
pub mod uaccess;
+#[cfg(CONFIG_USB = "y")]
+pub mod usb;
pub mod workqueue;
pub mod xarray;
diff --git a/samples/rust/Kconfig b/samples/rust/Kconfig
index c376eb899b7a..c1cc787a9add 100644
--- a/samples/rust/Kconfig
+++ b/samples/rust/Kconfig
@@ -107,7 +107,7 @@ config SAMPLE_RUST_DRIVER_PLATFORM
config SAMPLE_RUST_DRIVER_USB
tristate "USB Driver"
- depends on USB = y && BROKEN
+ depends on USB = y
help
This option builds the Rust USB driver sample.
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index a5770570b106..620854fdaaf6 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -217,6 +217,7 @@ static bool is_rust_noreturn(const struct symbol *func)
* these come from the Rust standard library).
*/
return str_ends_with(func->name, "_4core5sliceSp15copy_from_slice17len_mismatch_fail") ||
+ str_ends_with(func->name, "_4core6option13expect_failed") ||
str_ends_with(func->name, "_4core6option13unwrap_failed") ||
str_ends_with(func->name, "_4core6result13unwrap_failed") ||
str_ends_with(func->name, "_4core9panicking5panic") ||
@@ -4710,8 +4711,8 @@ static int check_abs_references(struct objtool_file *file)
for_each_reloc(sec->rsec, reloc) {
if (arch_absolute_reloc(file->elf, reloc)) {
- WARN("section %s has absolute relocation at offset 0x%lx",
- sec->name, reloc_offset(reloc));
+ WARN("section %s has absolute relocation at offset 0x%llx",
+ sec->name, (unsigned long long)reloc_offset(reloc));
ret++;
}
}
diff --git a/tools/testing/selftests/cgroup/lib/include/cgroup_util.h b/tools/testing/selftests/cgroup/lib/include/cgroup_util.h
index 9dc90a1b386d..7ab2824ed7b5 100644
--- a/tools/testing/selftests/cgroup/lib/include/cgroup_util.h
+++ b/tools/testing/selftests/cgroup/lib/include/cgroup_util.h
@@ -25,6 +25,26 @@ static inline int values_close(long a, long b, int err)
return labs(a - b) <= (a + b) / 100 * err;
}
+/*
+ * Checks if two given values differ by less than err% of their sum and assert
+ * with detailed debug info if not.
+ */
+static inline int values_close_report(long a, long b, int err)
+{
+ long diff = labs(a - b);
+ long limit = (a + b) / 100 * err;
+ double actual_err = (a + b) ? (100.0 * diff / (a + b)) : 0.0;
+ int close = diff <= limit;
+
+ if (!close)
+ fprintf(stderr,
+ "[FAIL] actual=%ld expected=%ld | diff=%ld | limit=%ld | "
+ "tolerance=%d%% | actual_error=%.2f%%\n",
+ a, b, diff, limit, err, actual_err);
+
+ return close;
+}
+
extern ssize_t read_text(const char *path, char *buf, size_t max_len);
extern ssize_t write_text(const char *path, char *buf, ssize_t len);
diff --git a/tools/testing/selftests/cgroup/test_cpu.c b/tools/testing/selftests/cgroup/test_cpu.c
index 2a60e6c41940..d54e2317efff 100644
--- a/tools/testing/selftests/cgroup/test_cpu.c
+++ b/tools/testing/selftests/cgroup/test_cpu.c
@@ -219,7 +219,7 @@ static int test_cpucg_stats(const char *root)
if (user_usec <= 0)
goto cleanup;
- if (!values_close(usage_usec, expected_usage_usec, 1))
+ if (!values_close_report(usage_usec, expected_usage_usec, 1))
goto cleanup;
ret = KSFT_PASS;
@@ -291,7 +291,7 @@ static int test_cpucg_nice(const char *root)
user_usec = cg_read_key_long(cpucg, "cpu.stat", "user_usec");
nice_usec = cg_read_key_long(cpucg, "cpu.stat", "nice_usec");
- if (!values_close(nice_usec, expected_nice_usec, 1))
+ if (!values_close_report(nice_usec, expected_nice_usec, 1))
goto cleanup;
ret = KSFT_PASS;
@@ -404,7 +404,7 @@ overprovision_validate(const struct cpu_hogger *children, int num_children)
goto cleanup;
delta = children[i + 1].usage - children[i].usage;
- if (!values_close(delta, children[0].usage, 35))
+ if (!values_close_report(delta, children[0].usage, 35))
goto cleanup;
}
@@ -444,7 +444,7 @@ underprovision_validate(const struct cpu_hogger *children, int num_children)
int ret = KSFT_FAIL, i;
for (i = 0; i < num_children - 1; i++) {
- if (!values_close(children[i + 1].usage, children[0].usage, 15))
+ if (!values_close_report(children[i + 1].usage, children[0].usage, 15))
goto cleanup;
}
@@ -573,16 +573,16 @@ run_cpucg_nested_weight_test(const char *root, bool overprovisioned)
nested_leaf_usage = leaf[1].usage + leaf[2].usage;
if (overprovisioned) {
- if (!values_close(leaf[0].usage, nested_leaf_usage, 15))
+ if (!values_close_report(leaf[0].usage, nested_leaf_usage, 15))
goto cleanup;
- } else if (!values_close(leaf[0].usage * 2, nested_leaf_usage, 15))
+ } else if (!values_close_report(leaf[0].usage * 2, nested_leaf_usage, 15))
goto cleanup;
child_usage = cg_read_key_long(child, "cpu.stat", "usage_usec");
if (child_usage <= 0)
goto cleanup;
- if (!values_close(child_usage, nested_leaf_usage, 1))
+ if (!values_close_report(child_usage, nested_leaf_usage, 1))
goto cleanup;
ret = KSFT_PASS;
@@ -691,7 +691,7 @@ static int test_cpucg_max(const char *root)
expected_usage_usec
= n_periods * quota_usec + MIN(remainder_usec, quota_usec);
- if (!values_close(usage_usec, expected_usage_usec, 10))
+ if (!values_close_report(usage_usec, expected_usage_usec, 10))
goto cleanup;
ret = KSFT_PASS;
@@ -762,7 +762,7 @@ static int test_cpucg_max_nested(const char *root)
expected_usage_usec
= n_periods * quota_usec + MIN(remainder_usec, quota_usec);
- if (!values_close(usage_usec, expected_usage_usec, 10))
+ if (!values_close_report(usage_usec, expected_usage_usec, 10))
goto cleanup;
ret = KSFT_PASS;
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index c90d8e8b95cb..78a1aa4ecff2 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -2324,7 +2324,7 @@ laminar_endp_tests()
{
# no laminar endpoints: routing rules are used
if reset_with_tcp_filter "without a laminar endpoint" ns1 10.0.2.2 REJECT &&
- mptcp_lib_kallsyms_has "mptcp_pm_get_endp_laminar_max$"; then
+ continue_if mptcp_lib_kallsyms_has "mptcp_pm_get_endp_laminar_max$"; then
pm_nl_set_limits $ns1 0 2
pm_nl_set_limits $ns2 2 2
pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
@@ -2336,7 +2336,7 @@ laminar_endp_tests()
# laminar endpoints: this endpoint is used
if reset_with_tcp_filter "with a laminar endpoint" ns1 10.0.2.2 REJECT &&
- mptcp_lib_kallsyms_has "mptcp_pm_get_endp_laminar_max$"; then
+ continue_if mptcp_lib_kallsyms_has "mptcp_pm_get_endp_laminar_max$"; then
pm_nl_set_limits $ns1 0 2
pm_nl_set_limits $ns2 2 2
pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
@@ -2348,7 +2348,7 @@ laminar_endp_tests()
# laminar endpoints: these endpoints are used
if reset_with_tcp_filter "with multiple laminar endpoints" ns1 10.0.2.2 REJECT &&
- mptcp_lib_kallsyms_has "mptcp_pm_get_endp_laminar_max$"; then
+ continue_if mptcp_lib_kallsyms_has "mptcp_pm_get_endp_laminar_max$"; then
pm_nl_set_limits $ns1 0 2
pm_nl_set_limits $ns2 2 2
pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
@@ -2363,7 +2363,7 @@ laminar_endp_tests()
# laminar endpoints: only one endpoint is used
if reset_with_tcp_filter "single laminar endpoint" ns1 10.0.2.2 REJECT &&
- mptcp_lib_kallsyms_has "mptcp_pm_get_endp_laminar_max$"; then
+ continue_if mptcp_lib_kallsyms_has "mptcp_pm_get_endp_laminar_max$"; then
pm_nl_set_limits $ns1 0 2
pm_nl_set_limits $ns2 2 2
pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
@@ -2376,7 +2376,7 @@ laminar_endp_tests()
# laminar endpoints: subflow and laminar flags
if reset_with_tcp_filter "sublow + laminar endpoints" ns1 10.0.2.2 REJECT &&
- mptcp_lib_kallsyms_has "mptcp_pm_get_endp_laminar_max$"; then
+ continue_if mptcp_lib_kallsyms_has "mptcp_pm_get_endp_laminar_max$"; then
pm_nl_set_limits $ns1 0 4
pm_nl_set_limits $ns2 2 4
pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
@@ -3939,7 +3939,7 @@ endpoint_tests()
# subflow_rebuild_header is needed to support the implicit flag
# userspace pm type prevents add_addr
if reset "implicit EP" &&
- mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
+ continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
pm_nl_set_limits $ns1 2 2
pm_nl_set_limits $ns2 2 2
pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
@@ -3964,7 +3964,7 @@ endpoint_tests()
fi
if reset_with_tcp_filter "delete and re-add" ns2 10.0.3.2 REJECT OUTPUT &&
- mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
+ continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
start_events
pm_nl_set_limits $ns1 0 3
pm_nl_set_limits $ns2 0 3
@@ -4040,7 +4040,7 @@ endpoint_tests()
# remove and re-add
if reset_with_events "delete re-add signal" &&
- mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
+ continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
ip netns exec $ns1 sysctl -q net.mptcp.add_addr_timeout=0
pm_nl_set_limits $ns1 0 3
pm_nl_set_limits $ns2 3 3
@@ -4115,7 +4115,7 @@ endpoint_tests()
# flush and re-add
if reset_with_tcp_filter "flush re-add" ns2 10.0.3.2 REJECT OUTPUT &&
- mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
+ continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
pm_nl_set_limits $ns1 0 2
pm_nl_set_limits $ns2 1 2
# broadcast IP: no packet for this address will be received on ns1
diff --git a/tools/testing/selftests/net/sctp_hello.c b/tools/testing/selftests/net/sctp_hello.c
index f02f1f95d227..a04dac0b8027 100644
--- a/tools/testing/selftests/net/sctp_hello.c
+++ b/tools/testing/selftests/net/sctp_hello.c
@@ -29,7 +29,6 @@ static void set_addr(struct sockaddr_storage *ss, char *ip, char *port, int *len
static int do_client(int argc, char *argv[])
{
struct sockaddr_storage ss;
- char buf[] = "hello";
int csk, ret, len;
if (argc < 5) {
@@ -56,16 +55,10 @@ static int do_client(int argc, char *argv[])
set_addr(&ss, argv[3], argv[4], &len);
ret = connect(csk, (struct sockaddr *)&ss, len);
- if (ret < 0) {
- printf("failed to connect to peer\n");
+ if (ret < 0)
return -1;
- }
- ret = send(csk, buf, strlen(buf) + 1, 0);
- if (ret < 0) {
- printf("failed to send msg %d\n", ret);
- return -1;
- }
+ recv(csk, NULL, 0, 0);
close(csk);
return 0;
@@ -75,7 +68,6 @@ int main(int argc, char *argv[])
{
struct sockaddr_storage ss;
int lsk, csk, ret, len;
- char buf[20];
if (argc < 2 || (strcmp(argv[1], "server") && strcmp(argv[1], "client"))) {
printf("%s server|client ...\n", argv[0]);
@@ -125,11 +117,6 @@ int main(int argc, char *argv[])
return -1;
}
- ret = recv(csk, buf, sizeof(buf), 0);
- if (ret <= 0) {
- printf("failed to recv msg %d\n", ret);
- return -1;
- }
close(csk);
close(lsk);
diff --git a/tools/testing/selftests/net/sctp_vrf.sh b/tools/testing/selftests/net/sctp_vrf.sh
index c854034b6aa1..667b211aa8a1 100755
--- a/tools/testing/selftests/net/sctp_vrf.sh
+++ b/tools/testing/selftests/net/sctp_vrf.sh
@@ -20,9 +20,9 @@ setup() {
modprobe sctp_diag
setup_ns CLIENT_NS1 CLIENT_NS2 SERVER_NS
- ip net exec $CLIENT_NS1 sysctl -w net.ipv6.conf.default.accept_dad=0 2>&1 >/dev/null
- ip net exec $CLIENT_NS2 sysctl -w net.ipv6.conf.default.accept_dad=0 2>&1 >/dev/null
- ip net exec $SERVER_NS sysctl -w net.ipv6.conf.default.accept_dad=0 2>&1 >/dev/null
+ ip net exec $CLIENT_NS1 sysctl -wq net.ipv6.conf.default.accept_dad=0
+ ip net exec $CLIENT_NS2 sysctl -wq net.ipv6.conf.default.accept_dad=0
+ ip net exec $SERVER_NS sysctl -wq net.ipv6.conf.default.accept_dad=0
ip -n $SERVER_NS link add veth1 type veth peer name veth1 netns $CLIENT_NS1
ip -n $SERVER_NS link add veth2 type veth peer name veth1 netns $CLIENT_NS2
@@ -62,17 +62,40 @@ setup() {
}
cleanup() {
- ip netns exec $SERVER_NS pkill sctp_hello 2>&1 >/dev/null
+ wait_client $CLIENT_NS1
+ wait_client $CLIENT_NS2
+ stop_server
cleanup_ns $CLIENT_NS1 $CLIENT_NS2 $SERVER_NS
}
-wait_server() {
+start_server() {
local IFACE=$1
local CNT=0
- until ip netns exec $SERVER_NS ss -lS src $SERVER_IP:$SERVER_PORT | \
- grep LISTEN | grep "$IFACE" 2>&1 >/dev/null; do
- [ $((CNT++)) = "20" ] && { RET=3; return $RET; }
+ ip netns exec $SERVER_NS ./sctp_hello server $AF $SERVER_IP $SERVER_PORT $IFACE &
+ disown
+ until ip netns exec $SERVER_NS ss -SlH | grep -q "$IFACE"; do
+ [ $((CNT++)) -eq 30 ] && { RET=3; return $RET; }
+ sleep 0.1
+ done
+}
+
+stop_server() {
+ local CNT=0
+
+ ip netns exec $SERVER_NS pkill sctp_hello
+ while ip netns exec $SERVER_NS ss -SaH | grep -q .; do
+ [ $((CNT++)) -eq 30 ] && break
+ sleep 0.1
+ done
+}
+
+wait_client() {
+ local CLIENT_NS=$1
+ local CNT=0
+
+ while ip netns exec $CLIENT_NS ss -SaH | grep -q .; do
+ [ $((CNT++)) -eq 30 ] && break
sleep 0.1
done
}
@@ -81,14 +104,12 @@ do_test() {
local CLIENT_NS=$1
local IFACE=$2
- ip netns exec $SERVER_NS pkill sctp_hello 2>&1 >/dev/null
- ip netns exec $SERVER_NS ./sctp_hello server $AF $SERVER_IP \
- $SERVER_PORT $IFACE 2>&1 >/dev/null &
- disown
- wait_server $IFACE || return $RET
+ start_server $IFACE || return $RET
timeout 3 ip netns exec $CLIENT_NS ./sctp_hello client $AF \
- $SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT 2>&1 >/dev/null
+ $SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT
RET=$?
+ wait_client $CLIENT_NS
+ stop_server
return $RET
}
@@ -96,25 +117,21 @@ do_testx() {
local IFACE1=$1
local IFACE2=$2
- ip netns exec $SERVER_NS pkill sctp_hello 2>&1 >/dev/null
- ip netns exec $SERVER_NS ./sctp_hello server $AF $SERVER_IP \
- $SERVER_PORT $IFACE1 2>&1 >/dev/null &
- disown
- wait_server $IFACE1 || return $RET
- ip netns exec $SERVER_NS ./sctp_hello server $AF $SERVER_IP \
- $SERVER_PORT $IFACE2 2>&1 >/dev/null &
- disown
- wait_server $IFACE2 || return $RET
+ start_server $IFACE1 || return $RET
+ start_server $IFACE2 || return $RET
timeout 3 ip netns exec $CLIENT_NS1 ./sctp_hello client $AF \
- $SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT 2>&1 >/dev/null && \
+ $SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT && \
timeout 3 ip netns exec $CLIENT_NS2 ./sctp_hello client $AF \
- $SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT 2>&1 >/dev/null
+ $SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT
RET=$?
+ wait_client $CLIENT_NS1
+ wait_client $CLIENT_NS2
+ stop_server
return $RET
}
testup() {
- ip netns exec $SERVER_NS sysctl -w net.sctp.l3mdev_accept=1 2>&1 >/dev/null
+ ip netns exec $SERVER_NS sysctl -wq net.sctp.l3mdev_accept=1
echo -n "TEST 01: nobind, connect from client 1, l3mdev_accept=1, Y "
do_test $CLIENT_NS1 || { echo "[FAIL]"; return $RET; }
echo "[PASS]"
@@ -123,7 +140,7 @@ testup() {
do_test $CLIENT_NS2 && { echo "[FAIL]"; return $RET; }
echo "[PASS]"
- ip netns exec $SERVER_NS sysctl -w net.sctp.l3mdev_accept=0 2>&1 >/dev/null
+ ip netns exec $SERVER_NS sysctl -wq net.sctp.l3mdev_accept=0
echo -n "TEST 03: nobind, connect from client 1, l3mdev_accept=0, N "
do_test $CLIENT_NS1 && { echo "[FAIL]"; return $RET; }
echo "[PASS]"
@@ -160,7 +177,7 @@ testup() {
do_testx vrf-1 vrf-2 || { echo "[FAIL]"; return $RET; }
echo "[PASS]"
- echo -n "TEST 12: bind vrf-2 & 1 in server, connect from client 1 & 2, N "
+ echo -n "TEST 12: bind vrf-2 & 1 in server, connect from client 1 & 2, Y "
do_testx vrf-2 vrf-1 || { echo "[FAIL]"; return $RET; }
echo "[PASS]"
}