summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap2
-rw-r--r--CREDITS4
-rw-r--r--Documentation/ABI/removed/sysfs-class-cxl (renamed from Documentation/ABI/obsolete/sysfs-class-cxl)55
-rw-r--r--Documentation/ABI/stable/sysfs-devices-node6
-rw-r--r--Documentation/ABI/testing/sysfs-bus-cxl53
-rw-r--r--Documentation/admin-guide/device-mapper/dm-crypt.rst5
-rw-r--r--Documentation/admin-guide/device-mapper/dm-integrity.rst5
-rw-r--r--Documentation/admin-guide/device-mapper/verity.rst20
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt2
-rw-r--r--Documentation/admin-guide/sysctl/fs.rst25
-rw-r--r--Documentation/arch/powerpc/cxl.rst470
-rw-r--r--Documentation/arch/powerpc/index.rst1
-rw-r--r--Documentation/block/ublk.rst37
-rw-r--r--Documentation/devicetree/bindings/mfd/aspeed-lpc.yaml2
-rw-r--r--Documentation/devicetree/bindings/rtc/adi,max31335.yaml4
-rw-r--r--Documentation/devicetree/bindings/rtc/nxp,pcf2127.yaml3
-rw-r--r--Documentation/devicetree/bindings/rtc/qcom-pm8xxx-rtc.yaml5
-rw-r--r--Documentation/devicetree/bindings/serial/8250.yaml2
-rw-r--r--Documentation/devicetree/bindings/serial/fsl-lpuart.yaml1
-rw-r--r--Documentation/devicetree/bindings/serial/nvidia,tegra264-utc.yaml73
-rw-r--r--Documentation/devicetree/bindings/serial/pl011.yaml3
-rw-r--r--Documentation/devicetree/bindings/serial/samsung_uart.yaml4
-rw-r--r--Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml21
-rw-r--r--Documentation/devicetree/bindings/serial/sprd-uart.yaml9
-rw-r--r--Documentation/devicetree/bindings/usb/generic-xhci.yaml2
-rw-r--r--Documentation/devicetree/bindings/usb/microchip,usb2514.yaml35
-rw-r--r--Documentation/devicetree/bindings/usb/parade,ps8830.yaml140
-rw-r--r--Documentation/devicetree/bindings/usb/qcom,dwc3.yaml2
-rw-r--r--Documentation/devicetree/bindings/usb/richtek,rt1711h.yaml3
-rw-r--r--Documentation/devicetree/bindings/usb/rockchip,dwc3.yaml19
-rw-r--r--Documentation/devicetree/bindings/usb/samsung,exynos-dwc3.yaml44
-rw-r--r--Documentation/devicetree/bindings/usb/snps,dwc3-common.yaml11
-rw-r--r--Documentation/devicetree/bindings/usb/usb-device.yaml6
-rw-r--r--Documentation/driver-api/cxl/maturity-map.rst2
-rw-r--r--Documentation/driver-api/serial/driver.rst4
-rw-r--r--Documentation/driver-api/tty/tty_driver.rst4
-rw-r--r--Documentation/driver-api/tty/tty_struct.rst2
-rw-r--r--Documentation/driver-api/usb/writing_musb_glue_layer.rst2
-rw-r--r--Documentation/features/core/mseal_sys_mappings/arch-support.txt30
-rw-r--r--Documentation/filesystems/9p.rst6
-rw-r--r--Documentation/rust/arch-support.rst1
-rw-r--r--Documentation/trace/debugging.rst2
-rw-r--r--Documentation/usb/CREDITS2
-rw-r--r--Documentation/userspace-api/ioctl/ioctl-number.rst4
-rw-r--r--Documentation/userspace-api/mseal.rst21
-rw-r--r--MAINTAINERS82
-rw-r--r--arch/alpha/kernel/srmcons.c62
-rw-r--r--arch/arm/Kconfig3
-rw-r--r--arch/arm/Makefile1
-rw-r--r--arch/arm/include/asm/vmlinux.lds.h14
-rw-r--r--arch/arm/kernel/smp.c3
-rw-r--r--arch/arm/kernel/vmlinux-xip.lds.S2
-rw-r--r--arch/arm/kernel/vmlinux.lds.S2
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/include/asm/cputype.h2
-rw-r--r--arch/arm64/include/asm/pgtable.h5
-rw-r--r--arch/arm64/include/asm/traps.h4
-rw-r--r--arch/arm64/kernel/compat_alignment.c2
-rw-r--r--arch/arm64/kernel/proton-pack.c1
-rw-r--r--arch/arm64/kernel/vdso.c9
-rw-r--r--arch/arm64/mm/mmu.c3
-rw-r--r--arch/csky/include/asm/pgalloc.h7
-rw-r--r--arch/hexagon/include/asm/pgalloc.h7
-rw-r--r--arch/loongarch/Kconfig7
-rw-r--r--arch/loongarch/configs/loongson3_defconfig11
-rw-r--r--arch/loongarch/include/asm/cache.h2
-rw-r--r--arch/loongarch/include/asm/irq.h2
-rw-r--r--arch/loongarch/include/asm/pgalloc.h7
-rw-r--r--arch/loongarch/include/asm/stacktrace.h3
-rw-r--r--arch/loongarch/include/asm/unwind_hints.h10
-rw-r--r--arch/loongarch/kernel/env.c2
-rw-r--r--arch/loongarch/kernel/kgdb.c5
-rw-r--r--arch/loongarch/net/bpf_jit.c12
-rw-r--r--arch/loongarch/net/bpf_jit.h5
-rw-r--r--arch/loongarch/vdso/Makefile3
-rw-r--r--arch/loongarch/vdso/vgetrandom-chacha.S13
-rw-r--r--arch/m68k/include/asm/sun3_pgalloc.h7
-rw-r--r--arch/microblaze/mm/init.c2
-rw-r--r--arch/mips/include/asm/pgalloc.h7
-rw-r--r--arch/nios2/include/asm/pgalloc.h7
-rw-r--r--arch/openrisc/include/asm/pgalloc.h7
-rw-r--r--arch/powerpc/configs/skiroot_defconfig1
-rw-r--r--arch/powerpc/include/asm/copro.h6
-rw-r--r--arch/powerpc/include/asm/device.h3
-rw-r--r--arch/powerpc/include/asm/pnv-pci.h17
-rw-r--r--arch/powerpc/mm/book3s64/hash_native.c13
-rw-r--r--arch/powerpc/mm/book3s64/hash_utils.c10
-rw-r--r--arch/powerpc/mm/book3s64/pgtable.c1
-rw-r--r--arch/powerpc/mm/book3s64/slice.c6
-rw-r--r--arch/powerpc/mm/copro_fault.c11
-rw-r--r--arch/powerpc/platforms/cell/spufs/gang.c1
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c63
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h2
-rw-r--r--arch/powerpc/platforms/powernv/Makefile1
-rw-r--r--arch/powerpc/platforms/powernv/pci-cxl.c153
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c43
-rw-r--r--arch/powerpc/platforms/powernv/pci.c61
-rw-r--r--arch/powerpc/platforms/powernv/pci.h2
-rw-r--r--arch/riscv/include/asm/pgalloc.h26
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/hypfs/hypfs_diag_fs.c2
-rw-r--r--arch/s390/kernel/vdso.c2
-rw-r--r--arch/sh/include/asm/pgalloc.h7
-rw-r--r--arch/um/Kconfig1
-rw-r--r--arch/um/drivers/Kconfig12
-rw-r--r--arch/um/drivers/Makefile3
-rw-r--r--arch/um/drivers/random.c2
-rw-r--r--arch/um/drivers/rtc_user.c2
-rw-r--r--arch/um/drivers/ubd.h6
-rw-r--r--arch/um/drivers/ubd_kern.c25
-rw-r--r--arch/um/drivers/ubd_user.c14
-rw-r--r--arch/um/drivers/virt-pci.c699
-rw-r--r--arch/um/drivers/virt-pci.h41
-rw-r--r--arch/um/drivers/virtio_pcidev.c628
-rw-r--r--arch/um/include/asm/Kbuild1
-rw-r--r--arch/um/include/asm/pgalloc.h21
-rw-r--r--arch/um/include/asm/processor-generic.h2
-rw-r--r--arch/um/include/asm/uaccess.h20
-rw-r--r--arch/um/include/linux/time-internal.h2
-rw-r--r--arch/um/include/shared/arch.h2
-rw-r--r--arch/um/include/shared/as-layout.h2
-rw-r--r--arch/um/include/shared/irq_user.h3
-rw-r--r--arch/um/include/shared/kern_util.h12
-rw-r--r--arch/um/include/shared/os.h8
-rw-r--r--arch/um/include/shared/sigio.h1
-rw-r--r--arch/um/kernel/Makefile2
-rw-r--r--arch/um/kernel/irq.c3
-rw-r--r--arch/um/kernel/maccess.c19
-rw-r--r--arch/um/kernel/mem.c11
-rw-r--r--arch/um/kernel/sigio.c26
-rw-r--r--arch/um/kernel/skas/syscall.c11
-rw-r--r--arch/um/kernel/trap.c28
-rw-r--r--arch/um/kernel/um_arch.c3
-rw-r--r--arch/um/os-Linux/helper.c67
-rw-r--r--arch/um/os-Linux/process.c51
-rw-r--r--arch/um/os-Linux/sigio.c352
-rw-r--r--arch/um/os-Linux/signal.c4
-rw-r--r--arch/um/os-Linux/skas/process.c8
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/Makefile.um7
-rw-r--r--arch/x86/coco/tdx/tdx.c34
-rw-r--r--arch/x86/entry/vdso/vma.c5
-rw-r--r--arch/x86/include/asm/arch_hweight.h6
-rw-r--r--arch/x86/include/asm/iosf_mbi.h7
-rw-r--r--arch/x86/include/asm/irqflags.h40
-rw-r--r--arch/x86/include/asm/paravirt.h20
-rw-r--r--arch/x86/include/asm/paravirt_types.h3
-rw-r--r--arch/x86/include/asm/smap.h23
-rw-r--r--arch/x86/include/asm/tdx.h4
-rw-r--r--arch/x86/include/asm/xen/hypercall.h6
-rw-r--r--arch/x86/kernel/paravirt.c14
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--arch/x86/lib/copy_user_64.S18
-rw-r--r--arch/x86/mm/init_64.c15
-rw-r--r--arch/x86/mm/pat/set_memory.c1
-rw-r--r--arch/x86/mm/pgtable.c8
-rw-r--r--arch/x86/platform/intel/iosf_mbi.c13
-rw-r--r--arch/x86/power/cpu.c14
-rw-r--r--arch/x86/tools/insn_decoder_test.c2
-rw-r--r--arch/x86/um/asm/barrier.h6
-rw-r--r--arch/x86/um/asm/module.h24
-rw-r--r--arch/x86/um/os-Linux/mcontext.c15
-rw-r--r--arch/x86/um/shared/sysdep/faultinfo_32.h12
-rw-r--r--arch/x86/um/shared/sysdep/faultinfo_64.h12
-rw-r--r--arch/x86/um/vdso/vma.c17
-rw-r--r--block/blk-mq.c29
-rw-r--r--crypto/testmgr.c157
-rw-r--r--drivers/acpi/acpi_pnp.c2
-rw-r--r--drivers/acpi/acpi_video.c9
-rw-r--r--drivers/acpi/apei/ghes.c103
-rw-r--r--drivers/acpi/nfit/core.c2
-rw-r--r--drivers/acpi/numa/hmat.c44
-rw-r--r--drivers/acpi/numa/srat.c22
-rw-r--r--drivers/acpi/platform_profile.c13
-rw-r--r--drivers/acpi/processor_idle.c4
-rw-r--r--drivers/acpi/resource.c7
-rw-r--r--drivers/acpi/x86/utils.c3
-rw-r--r--drivers/base/node.c2
-rw-r--r--drivers/block/ublk_drv.c223
-rw-r--r--drivers/bus/fsl-mc/dpmcp.c22
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-allocator.c5
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-private.h6
-rw-r--r--drivers/bus/fsl-mc/mc-io.c20
-rw-r--r--drivers/clk/clkdev.c9
-rw-r--r--drivers/counter/microchip-tcb-capture.c19
-rw-r--r--drivers/counter/stm32-lptimer-cnt.c24
-rw-r--r--drivers/cpufreq/cpufreq.c6
-rw-r--r--drivers/cxl/Kconfig4
-rw-r--r--drivers/cxl/core/Makefile3
-rw-r--r--drivers/cxl/core/acpi.c11
-rw-r--r--drivers/cxl/core/cdat.c102
-rw-r--r--drivers/cxl/core/core.h10
-rw-r--r--drivers/cxl/core/hdm.c382
-rw-r--r--drivers/cxl/core/mbox.c141
-rw-r--r--drivers/cxl/core/mce.c65
-rw-r--r--drivers/cxl/core/mce.h20
-rw-r--r--drivers/cxl/core/memdev.c83
-rw-r--r--drivers/cxl/core/pci.c97
-rw-r--r--drivers/cxl/core/port.c38
-rw-r--r--drivers/cxl/core/ras.c119
-rw-r--r--drivers/cxl/core/region.c336
-rw-r--r--drivers/cxl/core/trace.h81
-rw-r--r--drivers/cxl/cxl.h52
-rw-r--r--drivers/cxl/cxlmem.h77
-rw-r--r--drivers/cxl/cxlpci.h6
-rw-r--r--drivers/cxl/mem.c2
-rw-r--r--drivers/cxl/pci.c7
-rw-r--r--drivers/cxl/pmem.c81
-rw-r--r--drivers/firewire/core-cdev.c42
-rw-r--r--drivers/firmware/cirrus/cs_dsp.c2
-rw-r--r--drivers/firmware/efi/cper.c6
-rw-r--r--drivers/firmware/efi/cper_cxl.c39
-rw-r--r--drivers/firmware/efi/cper_cxl.h66
-rw-r--r--drivers/gpu/drm/i915/i915_iosf_mbi.h6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c1
-rw-r--r--drivers/input/mouse/cyapa.c4
-rw-r--r--drivers/input/touchscreen/tsc2007.h2
-rw-r--r--drivers/input/touchscreen/tsc2007_core.c5
-rw-r--r--drivers/md/Kconfig1
-rw-r--r--drivers/md/dm-bufio.c4
-rw-r--r--drivers/md/dm-cache-target.c96
-rw-r--r--drivers/md/dm-crypt.c41
-rw-r--r--drivers/md/dm-delay.c18
-rw-r--r--drivers/md/dm-ebs-target.c7
-rw-r--r--drivers/md/dm-integrity.c48
-rw-r--r--drivers/md/dm-stripe.c2
-rw-r--r--drivers/md/dm-table.c4
-rw-r--r--drivers/md/dm-vdo/block-map.c13
-rw-r--r--drivers/md/dm-vdo/constants.h3
-rw-r--r--drivers/md/dm-vdo/dedupe.c20
-rw-r--r--drivers/md/dm-vdo/encodings.c20
-rw-r--r--drivers/md/dm-vdo/indexer/index-layout.c5
-rw-r--r--drivers/md/dm-vdo/indexer/index-session.c6
-rw-r--r--drivers/md/dm-vdo/indexer/indexer.h53
-rw-r--r--drivers/md/dm-vdo/io-submitter.c6
-rw-r--r--drivers/md/dm-vdo/io-submitter.h18
-rw-r--r--drivers/md/dm-vdo/packer.h2
-rw-r--r--drivers/md/dm-vdo/priority-table.c2
-rw-r--r--drivers/md/dm-vdo/recovery-journal.h6
-rw-r--r--drivers/md/dm-vdo/slab-depot.c193
-rw-r--r--drivers/md/dm-vdo/slab-depot.h13
-rw-r--r--drivers/md/dm-vdo/types.h3
-rw-r--r--drivers/md/dm-vdo/vdo.c11
-rw-r--r--drivers/md/dm-vdo/vio.c54
-rw-r--r--drivers/md/dm-vdo/vio.h13
-rw-r--r--drivers/md/dm-vdo/wait-queue.c2
-rw-r--r--drivers/md/dm-verity-target.c62
-rw-r--r--drivers/md/dm.c8
-rw-r--r--drivers/media/dvb-frontends/dib8000.c5
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/cxl/Kconfig28
-rw-r--r--drivers/misc/cxl/Makefile14
-rw-r--r--drivers/misc/cxl/api.c532
-rw-r--r--drivers/misc/cxl/base.c126
-rw-r--r--drivers/misc/cxl/context.c362
-rw-r--r--drivers/misc/cxl/cxl.h1135
-rw-r--r--drivers/misc/cxl/cxllib.c271
-rw-r--r--drivers/misc/cxl/debugfs.c134
-rw-r--r--drivers/misc/cxl/fault.c341
-rw-r--r--drivers/misc/cxl/file.c699
-rw-r--r--drivers/misc/cxl/flash.c538
-rw-r--r--drivers/misc/cxl/guest.c1208
-rw-r--r--drivers/misc/cxl/hcalls.c643
-rw-r--r--drivers/misc/cxl/hcalls.h200
-rw-r--r--drivers/misc/cxl/irq.c450
-rw-r--r--drivers/misc/cxl/main.c383
-rw-r--r--drivers/misc/cxl/native.c1592
-rw-r--r--drivers/misc/cxl/of.c346
-rw-r--r--drivers/misc/cxl/pci.c2103
-rw-r--r--drivers/misc/cxl/sysfs.c771
-rw-r--r--drivers/misc/cxl/trace.c9
-rw-r--r--drivers/misc/cxl/trace.h691
-rw-r--r--drivers/misc/cxl/vphb.c309
-rw-r--r--drivers/misc/lkdtm/perms.c14
-rw-r--r--drivers/net/caif/caif_serial.c2
-rw-r--r--drivers/nvdimm/claim.c11
-rw-r--r--drivers/nvdimm/label.c3
-rw-r--r--drivers/nvdimm/nd-core.h4
-rw-r--r--drivers/nvdimm/region_devs.c41
-rw-r--r--drivers/nvme/host/Kconfig13
-rw-r--r--drivers/nvme/host/core.c2
-rw-r--r--drivers/nvme/host/ioctl.c68
-rw-r--r--drivers/nvme/host/pci.c3
-rw-r--r--drivers/nvme/target/debugfs.c2
-rw-r--r--drivers/nvme/target/pci-epf.c63
-rw-r--r--drivers/platform/x86/gigabyte-wmi.c4
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c35
-rw-r--r--drivers/pnp/isapnp/core.c1
-rw-r--r--drivers/regulator/rk808-regulator.c4
-rw-r--r--drivers/rtc/Kconfig7
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-ab-eoz9.c24
-rw-r--r--drivers/rtc/rtc-ab8500.c11
-rw-r--r--drivers/rtc/rtc-aspeed.c16
-rw-r--r--drivers/rtc/rtc-cros-ec.c30
-rw-r--r--drivers/rtc/rtc-ds1307.c4
-rw-r--r--drivers/rtc/rtc-ds1343.c8
-rw-r--r--drivers/rtc/rtc-ds2404.c14
-rw-r--r--drivers/rtc/rtc-ds3232.c24
-rw-r--r--drivers/rtc/rtc-ep93xx.c16
-rw-r--r--drivers/rtc/rtc-fsl-ftm-alarm.c2
-rw-r--r--drivers/rtc/rtc-ftrtc010.c17
-rw-r--r--drivers/rtc/rtc-m48t86.c14
-rw-r--r--drivers/rtc/rtc-max31335.c165
-rw-r--r--drivers/rtc/rtc-max77686.c37
-rw-r--r--drivers/rtc/rtc-meson-vrtc.c12
-rw-r--r--drivers/rtc/rtc-meson.c16
-rw-r--r--drivers/rtc/rtc-mpfs.c10
-rw-r--r--drivers/rtc/rtc-nxp-bbnsm.c29
-rw-r--r--drivers/rtc/rtc-pcf50633.c284
-rw-r--r--drivers/rtc/rtc-pcf85063.c25
-rw-r--r--drivers/rtc/rtc-pl030.c16
-rw-r--r--drivers/rtc/rtc-pl031.c8
-rw-r--r--drivers/rtc/rtc-pm8xxx.c220
-rw-r--r--drivers/rtc/rtc-renesas-rtca3.c15
-rw-r--r--drivers/rtc/rtc-rv3032.c8
-rw-r--r--drivers/rtc/rtc-rx8581.c85
-rw-r--r--drivers/rtc/rtc-rzn1.c108
-rw-r--r--drivers/rtc/rtc-s35390a.c22
-rw-r--r--drivers/rtc/rtc-s5m.c58
-rw-r--r--drivers/rtc/rtc-sd2405al.c16
-rw-r--r--drivers/rtc/rtc-sd3078.c71
-rw-r--r--drivers/rtc/rtc-stm32.c10
-rw-r--r--drivers/soc/fsl/qe/qe_ic.c4
-rw-r--r--drivers/spi/Kconfig4
-rw-r--r--drivers/spi/spi-amd.c2
-rw-r--r--drivers/spi/spi-bcm2835.c18
-rw-r--r--drivers/spi/spi-cadence-quadspi.c2
-rw-r--r--drivers/spi/spi-cadence-xspi.c2
-rw-r--r--drivers/spi/spi-fsl-qspi.c31
-rw-r--r--drivers/spi/spi-qpic-snand.c2
-rw-r--r--drivers/spi/spi-rockchip.c2
-rw-r--r--drivers/staging/gpib/Kconfig5
-rw-r--r--drivers/staging/gpib/agilent_82350b/agilent_82350b.c260
-rw-r--r--drivers/staging/gpib/agilent_82350b/agilent_82350b.h50
-rw-r--r--drivers/staging/gpib/agilent_82357a/agilent_82357a.c527
-rw-r--r--drivers/staging/gpib/cb7210/Makefile1
-rw-r--r--drivers/staging/gpib/cb7210/cb7210.c287
-rw-r--r--drivers/staging/gpib/cb7210/cb7210.h45
-rw-r--r--drivers/staging/gpib/cec/cec.h29
-rw-r--r--drivers/staging/gpib/cec/cec_gpib.c96
-rw-r--r--drivers/staging/gpib/common/gpib_os.c328
-rw-r--r--drivers/staging/gpib/common/iblib.c167
-rw-r--r--drivers/staging/gpib/common/ibsys.h14
-rw-r--r--drivers/staging/gpib/eastwood/fluke_gpib.c204
-rw-r--r--drivers/staging/gpib/fmh_gpib/fmh_gpib.c280
-rw-r--r--drivers/staging/gpib/gpio/gpib_bitbang.c138
-rw-r--r--drivers/staging/gpib/hp_82335/hp82335.c83
-rw-r--r--drivers/staging/gpib/hp_82335/hp82335.h30
-rw-r--r--drivers/staging/gpib/hp_82341/hp_82341.c161
-rw-r--r--drivers/staging/gpib/hp_82341/hp_82341.h40
-rw-r--r--drivers/staging/gpib/include/gpibP.h10
-rw-r--r--drivers/staging/gpib/include/gpib_proto.h58
-rw-r--r--drivers/staging/gpib/include/gpib_types.h58
-rw-r--r--drivers/staging/gpib/include/nec7210.h60
-rw-r--r--drivers/staging/gpib/include/tms9914.h52
-rw-r--r--drivers/staging/gpib/ines/Makefile1
-rw-r--r--drivers/staging/gpib/ines/ines.h54
-rw-r--r--drivers/staging/gpib/ines/ines_gpib.c240
-rw-r--r--drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c363
-rw-r--r--drivers/staging/gpib/nec7210/nec7210.c147
-rw-r--r--drivers/staging/gpib/ni_usb/ni_usb_gpib.c611
-rw-r--r--drivers/staging/gpib/pc2/pc2_gpib.c352
-rw-r--r--drivers/staging/gpib/tms9914/tms9914.c117
-rw-r--r--drivers/staging/gpib/tnt4882/Makefile1
-rw-r--r--drivers/staging/gpib/tnt4882/mite.c17
-rw-r--r--drivers/staging/gpib/tnt4882/tnt4882_gpib.c964
-rw-r--r--drivers/staging/gpib/uapi/gpib_user.h29
-rw-r--r--drivers/staging/greybus/uart.c4
-rw-r--r--drivers/staging/rtl8723bs/Kconfig1
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ap.c96
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com.c3
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_intf.h27
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_hal.h1
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_io.h92
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mp.h341
-rw-r--r--drivers/staging/rtl8723bs/os_dep/os_intfs.c2
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.c2
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c2
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c127
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c65
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h2
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c14
-rw-r--r--drivers/thunderbolt/retimer.c8
-rw-r--r--drivers/thunderbolt/tb.c16
-rw-r--r--drivers/thunderbolt/tunnel.c16
-rw-r--r--drivers/tty/Kconfig2
-rw-r--r--drivers/tty/moxa.c251
-rw-r--r--drivers/tty/n_tty.c212
-rw-r--r--drivers/tty/serdev/core.c11
-rw-r--r--drivers/tty/serdev/serdev-ttyport.c9
-rw-r--r--drivers/tty/serial/8250/8250_dma.c2
-rw-r--r--drivers/tty/serial/8250/8250_dw.c73
-rw-r--r--drivers/tty/serial/8250/8250_fsl.c8
-rw-r--r--drivers/tty/serial/8250/8250_ni.c461
-rw-r--r--drivers/tty/serial/8250/8250_omap.c2
-rw-r--r--drivers/tty/serial/8250/8250_pci.c46
-rw-r--r--drivers/tty/serial/8250/8250_port.c61
-rw-r--r--drivers/tty/serial/8250/8250_rsa.c21
-rw-r--r--drivers/tty/serial/8250/Kconfig13
-rw-r--r--drivers/tty/serial/8250/Makefile1
-rw-r--r--drivers/tty/serial/Kconfig42
-rw-r--r--drivers/tty/serial/Makefile2
-rw-r--r--drivers/tty/serial/amba-pl011.c149
-rw-r--r--drivers/tty/serial/atmel_serial.c2
-rw-r--r--drivers/tty/serial/fsl_lpuart.c489
-rw-r--r--drivers/tty/serial/icom.c9
-rw-r--r--drivers/tty/serial/imx.c2
-rw-r--r--drivers/tty/serial/kgdb_nmi.c280
-rw-r--r--drivers/tty/serial/kgdboc.c8
-rw-r--r--drivers/tty/serial/ma35d1_serial.c2
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c1
-rw-r--r--drivers/tty/serial/pch_uart.c1
-rw-r--r--drivers/tty/serial/serial_core.c10
-rw-r--r--drivers/tty/serial/serial_mctrl_gpio.c62
-rw-r--r--drivers/tty/serial/serial_mctrl_gpio.h27
-rw-r--r--drivers/tty/serial/sh-sci.c98
-rw-r--r--drivers/tty/serial/stm32-usart.c6
-rw-r--r--drivers/tty/serial/sunsu.c178
-rw-r--r--drivers/tty/serial/tegra-utc.c625
-rw-r--r--drivers/tty/tty_audit.c10
-rw-r--r--drivers/tty/tty_io.c8
-rw-r--r--drivers/tty/tty_ldsem.c17
-rw-r--r--drivers/usb/cdns3/cdns3-gadget.c4
-rw-r--r--drivers/usb/cdns3/cdns3-ti.c107
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.c2
-rw-r--r--drivers/usb/cdns3/core.c5
-rw-r--r--drivers/usb/cdns3/core.h2
-rw-r--r--drivers/usb/cdns3/host.c11
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c10
-rw-r--r--drivers/usb/common/usb-conn-gpio.c2
-rw-r--r--drivers/usb/core/config.c51
-rw-r--r--drivers/usb/core/hcd.c4
-rw-r--r--drivers/usb/core/hub.c4
-rw-r--r--drivers/usb/core/urb.c2
-rw-r--r--drivers/usb/dwc2/core.c1
-rw-r--r--drivers/usb/dwc2/core.h23
-rw-r--r--drivers/usb/dwc2/gadget.c116
-rw-r--r--drivers/usb/dwc2/hcd.c99
-rw-r--r--drivers/usb/dwc2/platform.c38
-rw-r--r--drivers/usb/dwc3/dwc3-am62.c12
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c9
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c10
-rw-r--r--drivers/usb/dwc3/dwc3-st.c2
-rw-r--r--drivers/usb/dwc3/gadget.c69
-rw-r--r--drivers/usb/gadget/function/uvc_queue.c2
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/dev.c3
-rw-r--r--drivers/usb/host/max3421-hcd.c7
-rw-r--r--drivers/usb/host/xhci-histb.c2
-rw-r--r--drivers/usb/host/xhci-mem.c34
-rw-r--r--drivers/usb/host/xhci-mvebu.c10
-rw-r--r--drivers/usb/host/xhci-mvebu.h6
-rw-r--r--drivers/usb/host/xhci-pci.c8
-rw-r--r--drivers/usb/host/xhci-plat.c13
-rw-r--r--drivers/usb/host/xhci-plat.h1
-rw-r--r--drivers/usb/host/xhci-ring.c420
-rw-r--r--drivers/usb/host/xhci-tegra.c10
-rw-r--r--drivers/usb/host/xhci.c41
-rw-r--r--drivers/usb/host/xhci.h30
-rw-r--r--drivers/usb/misc/onboard_usb_dev.h9
-rw-r--r--drivers/usb/misc/usb251xb.c6
-rw-r--r--drivers/usb/musb/jz4740.c4
-rw-r--r--drivers/usb/musb/mediatek.c2
-rw-r--r--drivers/usb/musb/mpfs.c2
-rw-r--r--drivers/usb/musb/musb_core.c14
-rw-r--r--drivers/usb/musb/sunxi.c4
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c8
-rw-r--r--drivers/usb/phy/phy-ulpi.c23
-rw-r--r--drivers/usb/serial/mos7840.c13
-rw-r--r--drivers/usb/storage/alauda.c8
-rw-r--r--drivers/usb/storage/datafab.c14
-rw-r--r--drivers/usb/storage/initializers.c2
-rw-r--r--drivers/usb/storage/jumpshot.c10
-rw-r--r--drivers/usb/storage/realtek_cr.c6
-rw-r--r--drivers/usb/storage/sddr09.c14
-rw-r--r--drivers/usb/storage/sddr55.c4
-rw-r--r--drivers/usb/storage/shuttle_usbat.c2
-rw-r--r--drivers/usb/storage/transport.c2
-rw-r--r--drivers/usb/typec/altmodes/thunderbolt.c10
-rw-r--r--drivers/usb/typec/mux/Kconfig10
-rw-r--r--drivers/usb/typec/mux/Makefile1
-rw-r--r--drivers/usb/typec/mux/ps883x.c466
-rw-r--r--drivers/usb/typec/ucsi/cros_ec_ucsi.c22
-rw-r--r--drivers/usb/typec/ucsi/debugfs.c6
-rw-r--r--drivers/usb/typec/ucsi/trace.c2
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c19
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h10
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c29
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c97
-rw-r--r--fs/9p/vfs_inode_dotl.c2
-rw-r--r--fs/bcachefs/Kconfig1
-rw-r--r--fs/bcachefs/acl.c4
-rw-r--r--fs/bcachefs/alloc_background.c95
-rw-r--r--fs/bcachefs/alloc_background.h6
-rw-r--r--fs/bcachefs/alloc_foreground.c79
-rw-r--r--fs/bcachefs/backpointers.c24
-rw-r--r--fs/bcachefs/bcachefs.h7
-rw-r--r--fs/bcachefs/btree_gc.c4
-rw-r--r--fs/bcachefs/btree_io.c17
-rw-r--r--fs/bcachefs/btree_iter.c188
-rw-r--r--fs/bcachefs/btree_iter.h122
-rw-r--r--fs/bcachefs/btree_key_cache.c32
-rw-r--r--fs/bcachefs/btree_node_scan.c8
-rw-r--r--fs/bcachefs/btree_types.h1
-rw-r--r--fs/bcachefs/btree_update.c26
-rw-r--r--fs/bcachefs/btree_update_interior.c12
-rw-r--r--fs/bcachefs/btree_write_buffer.c10
-rw-r--r--fs/bcachefs/buckets.c16
-rw-r--r--fs/bcachefs/buckets.h21
-rw-r--r--fs/bcachefs/buckets_types.h5
-rw-r--r--fs/bcachefs/chardev.c14
-rw-r--r--fs/bcachefs/compress.c5
-rw-r--r--fs/bcachefs/data_update.c8
-rw-r--r--fs/bcachefs/debug.c4
-rw-r--r--fs/bcachefs/dirent.c16
-rw-r--r--fs/bcachefs/disk_accounting.c4
-rw-r--r--fs/bcachefs/disk_groups.c4
-rw-r--r--fs/bcachefs/ec.c18
-rw-r--r--fs/bcachefs/error.c7
-rw-r--r--fs/bcachefs/extent_update.c6
-rw-r--r--fs/bcachefs/fs-io-buffered.c6
-rw-r--r--fs/bcachefs/fs-io.c14
-rw-r--r--fs/bcachefs/fs.c24
-rw-r--r--fs/bcachefs/fsck.c30
-rw-r--r--fs/bcachefs/inode.c18
-rw-r--r--fs/bcachefs/io_misc.c18
-rw-r--r--fs/bcachefs/io_read.c14
-rw-r--r--fs/bcachefs/io_write.c40
-rw-r--r--fs/bcachefs/journal.c14
-rw-r--r--fs/bcachefs/journal_io.c8
-rw-r--r--fs/bcachefs/migrate.c4
-rw-r--r--fs/bcachefs/move.c14
-rw-r--r--fs/bcachefs/movinggc.c8
-rw-r--r--fs/bcachefs/namei.c38
-rw-r--r--fs/bcachefs/quota.c2
-rw-r--r--fs/bcachefs/rebalance.c12
-rw-r--r--fs/bcachefs/recovery.c6
-rw-r--r--fs/bcachefs/reflink.c23
-rw-r--r--fs/bcachefs/sb-members.h23
-rw-r--r--fs/bcachefs/snapshot.c13
-rw-r--r--fs/bcachefs/str_hash.c2
-rw-r--r--fs/bcachefs/str_hash.h8
-rw-r--r--fs/bcachefs/subvolume.c4
-rw-r--r--fs/bcachefs/subvolume.h14
-rw-r--r--fs/bcachefs/super-io.c21
-rw-r--r--fs/bcachefs/super.c85
-rw-r--r--fs/bcachefs/tests.c30
-rw-r--r--fs/bcachefs/xattr.c2
-rw-r--r--fs/cachefiles/namei.c7
-rw-r--r--fs/exec.c15
-rw-r--r--fs/exportfs/expfs.c1
-rw-r--r--fs/fuse/dev.c162
-rw-r--r--fs/fuse/dev_uring.c43
-rw-r--r--fs/fuse/dev_uring_i.h18
-rw-r--r--fs/fuse/dir.c11
-rw-r--r--fs/fuse/fuse_dev_i.h4
-rw-r--r--fs/fuse/fuse_i.h47
-rw-r--r--fs/fuse/inode.c51
-rw-r--r--fs/fuse/sysctl.c24
-rw-r--r--fs/hostfs/hostfs.h2
-rw-r--r--fs/hostfs/hostfs_kern.c7
-rw-r--r--fs/hostfs/hostfs_user.c59
-rw-r--r--fs/namespace.c7
-rw-r--r--fs/nfs/client.c5
-rw-r--r--fs/nfs/delegation.c66
-rw-r--r--fs/nfs/dir.c2
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c24
-rw-r--r--fs/nfs/fs_context.c71
-rw-r--r--fs/nfs/inode.c2
-rw-r--r--fs/nfs/internal.h5
-rw-r--r--fs/nfs/nfs3client.c2
-rw-r--r--fs/nfs/nfs3proc.c2
-rw-r--r--fs/nfs/nfs42proc.c172
-rw-r--r--fs/nfs/nfs42xdr.c86
-rw-r--r--fs/nfs/nfs4client.c7
-rw-r--r--fs/nfs/nfs4proc.c17
-rw-r--r--fs/nfs/nfs4state.c14
-rw-r--r--fs/nfs/nfs4trace.h11
-rw-r--r--fs/nfs/nfs4xdr.c19
-rw-r--r--fs/nfs/super.c4
-rw-r--r--fs/nfs/sysfs.c82
-rw-r--r--fs/nfs/write.c4
-rw-r--r--fs/ntfs3/attrib.c3
-rw-r--r--fs/ntfs3/file.c42
-rw-r--r--fs/ntfs3/frecord.c63
-rw-r--r--fs/ntfs3/fsntfs.c28
-rw-r--r--fs/ntfs3/index.c4
-rw-r--r--fs/ntfs3/inode.c40
-rw-r--r--fs/ntfs3/ntfs.h2
-rw-r--r--fs/ntfs3/ntfs_fs.h6
-rw-r--r--fs/ntfs3/super.c89
-rw-r--r--fs/smb/server/auth.c4
-rw-r--r--fs/smb/server/connection.h11
-rw-r--r--fs/smb/server/mgmt/user_session.c18
-rw-r--r--fs/smb/server/smb2pdu.c21
-rw-r--r--fs/smb/server/smbacl.c21
-rw-r--r--fs/userfaultfd.c51
-rw-r--r--include/asm-generic/tlb.h14
-rw-r--r--include/cxl/event.h101
-rw-r--r--include/linux/acpi.h11
-rw-r--r--include/linux/bvec.h6
-rw-r--r--include/linux/context_tracking_irq.h8
-rw-r--r--include/linux/cper.h8
-rw-r--r--include/linux/fsl/mc.h2
-rw-r--r--include/linux/io_uring/cmd.h1
-rw-r--r--include/linux/iomap.h15
-rw-r--r--include/linux/kdb.h2
-rw-r--r--include/linux/kgdb.h11
-rw-r--r--include/linux/linkage.h4
-rw-r--r--include/linux/mfd/mt6397/rtc.h5
-rw-r--r--include/linux/mm.h10
-rw-r--r--include/linux/nfs4.h2
-rw-r--r--include/linux/nfs_fs_sb.h8
-rw-r--r--include/linux/nfs_xdr.h5
-rw-r--r--include/linux/node.h7
-rw-r--r--include/linux/objtool.h2
-rw-r--r--include/linux/page-flags.h37
-rw-r--r--include/linux/page_ref.h2
-rw-r--r--include/linux/platform_data/cros_ec_commands.h1
-rw-r--r--include/linux/rcupdate.h2
-rw-r--r--include/linux/rtc.h1
-rw-r--r--include/linux/sched/smt.h2
-rw-r--r--include/linux/seq_buf.h4
-rw-r--r--include/linux/seq_file.h1
-rw-r--r--include/linux/serdev.h6
-rw-r--r--include/linux/sort.h11
-rw-r--r--include/linux/string.h4
-rw-r--r--include/linux/sunrpc/clnt.h5
-rw-r--r--include/linux/sunrpc/sched.h1
-rw-r--r--include/linux/sunrpc/xprtmultipath.h1
-rw-r--r--include/linux/trace.h4
-rw-r--r--include/linux/trace_seq.h8
-rw-r--r--include/linux/tty.h53
-rw-r--r--include/linux/tty_driver.h180
-rw-r--r--include/linux/tty_ldisc.h1
-rw-r--r--include/linux/usb.h8
-rw-r--r--include/linux/usb/musb.h2
-rw-r--r--include/linux/usb/ulpi.h9
-rw-r--r--include/misc/cxl-base.h48
-rw-r--r--include/misc/cxl.h265
-rw-r--r--include/misc/cxllib.h129
-rw-r--r--include/trace/events/sunrpc.h1
-rw-r--r--include/uapi/linux/fuse.h12
-rw-r--r--include/uapi/linux/ublk_cmd.h25
-rw-r--r--include/uapi/linux/usb/ch9.h15
-rw-r--r--include/uapi/misc/cxl.h156
-rw-r--r--init/Kconfig27
-rw-r--r--io_uring/Kconfig1
-rw-r--r--io_uring/io_uring.c18
-rw-r--r--io_uring/io_uring.h3
-rw-r--r--io_uring/msg_ring.c11
-rw-r--r--io_uring/net.c135
-rw-r--r--io_uring/refs.h7
-rw-r--r--io_uring/rsrc.c126
-rw-r--r--io_uring/uring_cmd.c22
-rw-r--r--io_uring/uring_cmd.h1
-rw-r--r--io_uring/zcrx.c8
-rw-r--r--kernel/debug/debug_core.c14
-rw-r--r--kernel/debug/kdb/kdb_io.c4
-rw-r--r--kernel/debug/kdb/kdb_main.c85
-rw-r--r--kernel/events/uprobes.c3
-rw-r--r--kernel/exit.c11
-rw-r--r--kernel/panic.c6
-rw-r--r--kernel/rcu/Kconfig2
-rw-r--r--kernel/sched/ext.c8
-rw-r--r--kernel/sched/ext_idle.c12
-rw-r--r--kernel/signal.c8
-rw-r--r--kernel/trace/Kconfig3
-rw-r--r--kernel/trace/ftrace.c1
-rw-r--r--kernel/trace/ring_buffer.c5
-rw-r--r--kernel/trace/rv/rv.c3
-rw-r--r--kernel/trace/trace.c78
-rw-r--r--kernel/trace/trace.h17
-rw-r--r--kernel/trace/trace_events.c7
-rw-r--r--lib/sg_split.c2
-rw-r--r--lib/sort.c110
-rw-r--r--lib/vdso/datastore.c3
-rw-r--r--lib/vsprintf.c9
-rw-r--r--mm/damon/core.c9
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/kasan/kasan_test_c.c5
-rw-r--r--mm/memblock.c3
-rw-r--r--mm/memory_hotplug.c12
-rw-r--r--mm/mm_init.c12
-rw-r--r--mm/mremap.c3
-rw-r--r--mm/page_alloc.c9
-rw-r--r--mm/page_isolation.c9
-rw-r--r--mm/zswap.c30
-rw-r--r--net/9p/client.c44
-rw-r--r--net/9p/error.c21
-rw-r--r--net/9p/trans_fd.c73
-rw-r--r--net/sunrpc/clnt.c33
-rw-r--r--net/sunrpc/rpcb_clnt.c5
-rw-r--r--net/sunrpc/sched.c2
-rw-r--r--net/sunrpc/sysfs.c202
-rw-r--r--net/sunrpc/xprtmultipath.c21
-rw-r--r--net/xdp/xsk.c5
-rw-r--r--rust/Makefile12
-rw-r--r--rust/compiler_builtins.rs24
-rw-r--r--samples/trace_events/trace-events-sample.h8
-rw-r--r--scripts/Makefile.lib4
-rw-r--r--scripts/Makefile.vmlinux_o15
-rw-r--r--scripts/generate_rust_target.rs4
-rw-r--r--scripts/sorttable.c2
-rw-r--r--security/Kconfig21
-rw-r--r--sound/hda/intel-sdw-acpi.c2
-rw-r--r--sound/pci/hda/patch_realtek.c65
-rw-r--r--sound/pci/hda/tas2781_hda_i2c.c30
-rw-r--r--sound/soc/codecs/rt5665.c24
-rw-r--r--sound/soc/codecs/sma1307.c11
-rw-r--r--sound/soc/codecs/wcd934x.c2
-rw-r--r--sound/soc/codecs/wsa883x.c2
-rw-r--r--sound/soc/codecs/wsa884x.c2
-rw-r--r--sound/soc/fsl/imx-card.c4
-rw-r--r--sound/soc/qcom/qdsp6/q6apm-dai.c60
-rw-r--r--sound/soc/qcom/qdsp6/q6apm.c18
-rw-r--r--sound/soc/qcom/qdsp6/q6apm.h3
-rw-r--r--sound/soc/qcom/qdsp6/q6asm-dai.c19
-rw-r--r--sound/soc/sof/intel/hda-dsp.c8
-rw-r--r--sound/soc/sof/intel/hda.c4
-rw-r--r--sound/soc/sof/intel/hda.h8
-rw-r--r--sound/soc/sof/intel/ptl.c33
-rw-r--r--tools/objtool/Documentation/objtool.txt10
-rw-r--r--tools/objtool/arch/loongarch/decode.c14
-rw-r--r--tools/objtool/arch/loongarch/orc.c8
-rw-r--r--tools/objtool/arch/x86/decode.c15
-rw-r--r--tools/objtool/arch/x86/orc.c6
-rw-r--r--tools/objtool/arch/x86/special.c38
-rw-r--r--tools/objtool/builtin-check.c132
-rw-r--r--tools/objtool/check.c647
-rw-r--r--tools/objtool/elf.c156
-rw-r--r--tools/objtool/include/objtool/builtin.h6
-rw-r--r--tools/objtool/include/objtool/check.h3
-rw-r--r--tools/objtool/include/objtool/elf.h30
-rw-r--r--tools/objtool/include/objtool/objtool.h2
-rw-r--r--tools/objtool/include/objtool/special.h4
-rw-r--r--tools/objtool/include/objtool/warn.h62
-rw-r--r--tools/objtool/objtool.c15
-rw-r--r--tools/objtool/orc_dump.c30
-rw-r--r--tools/objtool/special.c25
-rw-r--r--tools/sched_ext/include/scx/common.bpf.h85
-rw-r--r--tools/sched_ext/include/scx/enum_defs.autogen.h3
-rw-r--r--tools/sched_ext/include/scx/enums.autogen.bpf.h24
-rw-r--r--tools/sched_ext/include/scx/enums.autogen.h8
-rw-r--r--tools/sched_ext/include/scx/enums.h3
-rw-r--r--tools/testing/cxl/Kbuild3
-rw-r--r--tools/testing/cxl/test/cxl.c32
-rw-r--r--tools/testing/cxl/test/mem.c32
-rw-r--r--tools/testing/selftests/Makefile1
-rw-r--r--tools/testing/selftests/bpf/progs/test_module_attach.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_subprogs_extable.c6
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c6
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_private_stack.c6
-rw-r--r--tools/testing/selftests/clone3/clone3_selftests.h2
-rwxr-xr-xtools/testing/selftests/mm/va_high_addr_switch.sh28
-rw-r--r--tools/testing/selftests/mseal_system_mappings/.gitignore2
-rw-r--r--tools/testing/selftests/mseal_system_mappings/Makefile6
-rw-r--r--tools/testing/selftests/mseal_system_mappings/config1
-rw-r--r--tools/testing/selftests/mseal_system_mappings/sysmap_is_sealed.c119
-rw-r--r--tools/testing/selftests/pidfd/pidfd.h8
-rw-r--r--tools/testing/selftests/rtc/.gitignore1
-rw-r--r--tools/testing/selftests/rtc/Makefile2
-rw-r--r--tools/testing/selftests/rtc/rtctest.c19
-rw-r--r--tools/testing/selftests/rtc/setdate.c77
-rw-r--r--tools/testing/selftests/ublk/Makefile5
-rw-r--r--tools/testing/selftests/ublk/kublk.c8
-rw-r--r--tools/testing/selftests/ublk/kublk.h4
-rw-r--r--tools/testing/selftests/ublk/null.c11
-rw-r--r--tools/testing/selftests/ublk/stripe.c69
-rwxr-xr-xtools/testing/selftests/ublk/test_common.sh6
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_02.sh44
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_03.sh28
-rwxr-xr-xtools/testing/selftests/ublk/test_loop_01.sh14
-rwxr-xr-xtools/testing/selftests/ublk/test_loop_03.sh14
-rwxr-xr-xtools/testing/selftests/ublk/test_loop_05.sh28
-rwxr-xr-xtools/testing/selftests/ublk/test_stress_01.sh6
-rwxr-xr-xtools/testing/selftests/ublk/test_stress_02.sh6
-rwxr-xr-xtools/testing/selftests/ublk/test_stripe_01.sh14
-rwxr-xr-xtools/testing/selftests/ublk/test_stripe_03.sh30
-rw-r--r--tools/testing/selftests/x86/test_mremap_vdso.c43
783 files changed, 15724 insertions, 26062 deletions
diff --git a/.mailmap b/.mailmap
index c85576e4695f..4f7cd8e23177 100644
--- a/.mailmap
+++ b/.mailmap
@@ -549,6 +549,8 @@ Nicolas Pitre <nico@fluxnic.net> <nicolas.pitre@linaro.org>
Nicolas Pitre <nico@fluxnic.net> <nico@linaro.org>
Nicolas Saenz Julienne <nsaenz@kernel.org> <nsaenzjulienne@suse.de>
Nicolas Saenz Julienne <nsaenz@kernel.org> <nsaenzjulienne@suse.com>
+Nicolas Schier <nicolas.schier@linux.dev> <n.schier@avm.de>
+Nicolas Schier <nicolas.schier@linux.dev> <nicolas@fjasle.eu>
Niklas Söderlund <niklas.soderlund+renesas@ragnatech.se>
Nikolay Aleksandrov <razor@blackwall.org> <naleksan@redhat.com>
Nikolay Aleksandrov <razor@blackwall.org> <nikolay@redhat.com>
diff --git a/CREDITS b/CREDITS
index 0dabce0f03f0..1b77fba6c27e 100644
--- a/CREDITS
+++ b/CREDITS
@@ -317,6 +317,10 @@ S: Code 930.5, Goddard Space Flight Center
S: Greenbelt, Maryland 20771
S: USA
+N: Joel Becker
+E: jlbec@evilplan.org
+D: configfs
+
N: Adam Belay
E: ambx1@neo.rr.com
D: Linux Plug and Play Support
diff --git a/Documentation/ABI/obsolete/sysfs-class-cxl b/Documentation/ABI/removed/sysfs-class-cxl
index 8cba1b626985..266c413b96e8 100644
--- a/Documentation/ABI/obsolete/sysfs-class-cxl
+++ b/Documentation/ABI/removed/sysfs-class-cxl
@@ -1,5 +1,4 @@
-The cxl driver is no longer maintained, and will be removed from the kernel in
-the near future.
+The cxl driver was removed in 6.15.
Please note that attributes that are shared between devices are stored in
the directory pointed to by the symlink device/.
@@ -10,7 +9,7 @@ For example, the real path of the attribute /sys/class/cxl/afu0.0s/irqs_max is
Slave contexts (eg. /sys/class/cxl/afu0.0s):
What: /sys/class/cxl/<afu>/afu_err_buf
-Date: September 2014
+Date: September 2014, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
AFU Error Buffer contents. The contents of this file are
@@ -21,7 +20,7 @@ Description: read only
What: /sys/class/cxl/<afu>/irqs_max
-Date: September 2014
+Date: September 2014, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read/write
Decimal value of maximum number of interrupts that can be
@@ -32,7 +31,7 @@ Description: read/write
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<afu>/irqs_min
-Date: September 2014
+Date: September 2014, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
Decimal value of the minimum number of interrupts that
@@ -42,7 +41,7 @@ Description: read only
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<afu>/mmio_size
-Date: September 2014
+Date: September 2014, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
Decimal value of the size of the MMIO space that may be mmapped
@@ -50,7 +49,7 @@ Description: read only
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<afu>/modes_supported
-Date: September 2014
+Date: September 2014, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
List of the modes this AFU supports. One per line.
@@ -58,7 +57,7 @@ Description: read only
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<afu>/mode
-Date: September 2014
+Date: September 2014, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read/write
The current mode the AFU is using. Will be one of the modes
@@ -68,7 +67,7 @@ Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<afu>/prefault_mode
-Date: September 2014
+Date: September 2014, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read/write
Set the mode for prefaulting in segments into the segment table
@@ -88,7 +87,7 @@ Description: read/write
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<afu>/reset
-Date: September 2014
+Date: September 2014, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: write only
Writing 1 here will reset the AFU provided there are not
@@ -96,14 +95,14 @@ Description: write only
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<afu>/api_version
-Date: September 2014
+Date: September 2014, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
Decimal value of the current version of the kernel/user API.
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<afu>/api_version_compatible
-Date: September 2014
+Date: September 2014, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
Decimal value of the lowest version of the userspace API
@@ -117,7 +116,7 @@ An AFU may optionally export one or more PCIe like configuration records, known
as AFU configuration records, which will show up here (if present).
What: /sys/class/cxl/<afu>/cr<config num>/vendor
-Date: February 2015
+Date: February 2015, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
Hexadecimal value of the vendor ID found in this AFU
@@ -125,7 +124,7 @@ Description: read only
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<afu>/cr<config num>/device
-Date: February 2015
+Date: February 2015, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
Hexadecimal value of the device ID found in this AFU
@@ -133,7 +132,7 @@ Description: read only
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<afu>/cr<config num>/class
-Date: February 2015
+Date: February 2015, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
Hexadecimal value of the class code found in this AFU
@@ -141,7 +140,7 @@ Description: read only
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<afu>/cr<config num>/config
-Date: February 2015
+Date: February 2015, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
This binary file provides raw access to the AFU configuration
@@ -155,7 +154,7 @@ Users: https://github.com/ibm-capi/libcxl
Master contexts (eg. /sys/class/cxl/afu0.0m)
What: /sys/class/cxl/<afu>m/mmio_size
-Date: September 2014
+Date: September 2014, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
Decimal value of the size of the MMIO space that may be mmapped
@@ -163,14 +162,14 @@ Description: read only
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<afu>m/pp_mmio_len
-Date: September 2014
+Date: September 2014, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
Decimal value of the Per Process MMIO space length.
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<afu>m/pp_mmio_off
-Date: September 2014
+Date: September 2014, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
(not in a guest)
@@ -181,21 +180,21 @@ Users: https://github.com/ibm-capi/libcxl
Card info (eg. /sys/class/cxl/card0)
What: /sys/class/cxl/<card>/caia_version
-Date: September 2014
+Date: September 2014, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
Identifies the CAIA Version the card implements.
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<card>/psl_revision
-Date: September 2014
+Date: September 2014, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
Identifies the revision level of the PSL.
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<card>/base_image
-Date: September 2014
+Date: September 2014, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
(not in a guest)
@@ -206,7 +205,7 @@ Description: read only
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<card>/image_loaded
-Date: September 2014
+Date: September 2014, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
(not in a guest)
@@ -215,7 +214,7 @@ Description: read only
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<card>/load_image_on_perst
-Date: December 2014
+Date: December 2014, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read/write
(not in a guest)
@@ -232,7 +231,7 @@ Description: read/write
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<card>/reset
-Date: October 2014
+Date: October 2014, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: write only
Writing 1 will issue a PERST to card provided there are no
@@ -243,7 +242,7 @@ Description: write only
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<card>/perst_reloads_same_image
-Date: July 2015
+Date: July 2015, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read/write
(not in a guest)
@@ -257,7 +256,7 @@ Description: read/write
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<card>/psl_timebase_synced
-Date: March 2016
+Date: March 2016, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
Returns 1 if the psl timebase register is synchronized
@@ -265,7 +264,7 @@ Description: read only
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<card>/tunneled_ops_supported
-Date: May 2018
+Date: May 2018, removed February 2025
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
Returns 1 if tunneled operations are supported in capi mode,
diff --git a/Documentation/ABI/stable/sysfs-devices-node b/Documentation/ABI/stable/sysfs-devices-node
index 402af4b2b905..a02707cb7cbc 100644
--- a/Documentation/ABI/stable/sysfs-devices-node
+++ b/Documentation/ABI/stable/sysfs-devices-node
@@ -177,6 +177,12 @@ Description:
The cache write policy: 0 for write-back, 1 for write-through,
other or unknown.
+What: /sys/devices/system/node/nodeX/memory_side_cache/indexY/address_mode
+Date: March 2025
+Contact: Dave Jiang <dave.jiang@intel.com>
+Description:
+ The address mode: 0 for reserved, 1 for extended-linear.
+
What: /sys/devices/system/node/nodeX/x86/sgx_total_bytes
Date: November 2021
Contact: Jarkko Sakkinen <jarkko@kernel.org>
diff --git a/Documentation/ABI/testing/sysfs-bus-cxl b/Documentation/ABI/testing/sysfs-bus-cxl
index 3f5627a1210a..99bb3faf7a0e 100644
--- a/Documentation/ABI/testing/sysfs-bus-cxl
+++ b/Documentation/ABI/testing/sysfs-bus-cxl
@@ -1,5 +1,5 @@
What: /sys/bus/cxl/flush
-Date: Januarry, 2022
+Date: January, 2022
KernelVersion: v5.18
Contact: linux-cxl@vger.kernel.org
Description:
@@ -18,6 +18,24 @@ Description:
specification.
+What: /sys/bus/cxl/devices/memX/payload_max
+Date: December, 2020
+KernelVersion: v5.12
+Contact: linux-cxl@vger.kernel.org
+Description:
+ (RO) Maximum size (in bytes) of the mailbox command payload
+ registers. Linux caps this at 1MB if the device reports a
+ larger size.
+
+
+What: /sys/bus/cxl/devices/memX/label_storage_size
+Date: May, 2021
+KernelVersion: v5.13
+Contact: linux-cxl@vger.kernel.org
+Description:
+ (RO) Size (in bytes) of the Label Storage Area (LSA).
+
+
What: /sys/bus/cxl/devices/memX/ram/size
Date: December, 2020
KernelVersion: v5.12
@@ -33,7 +51,7 @@ Date: May, 2023
KernelVersion: v6.8
Contact: linux-cxl@vger.kernel.org
Description:
- (RO) For CXL host platforms that support "QoS Telemmetry"
+ (RO) For CXL host platforms that support "QoS Telemetry"
this attribute conveys a comma delimited list of platform
specific cookies that identifies a QoS performance class
for the volatile partition of the CXL mem device. These
@@ -60,7 +78,7 @@ Date: May, 2023
KernelVersion: v6.8
Contact: linux-cxl@vger.kernel.org
Description:
- (RO) For CXL host platforms that support "QoS Telemmetry"
+ (RO) For CXL host platforms that support "QoS Telemetry"
this attribute conveys a comma delimited list of platform
specific cookies that identifies a QoS performance class
for the persistent partition of the CXL mem device. These
@@ -321,14 +339,13 @@ KernelVersion: v6.0
Contact: linux-cxl@vger.kernel.org
Description:
(RW) When a CXL decoder is of devtype "cxl_decoder_endpoint" it
- translates from a host physical address range, to a device local
- address range. Device-local address ranges are further split
- into a 'ram' (volatile memory) range and 'pmem' (persistent
- memory) range. The 'mode' attribute emits one of 'ram', 'pmem',
- 'mixed', or 'none'. The 'mixed' indication is for error cases
- when a decoder straddles the volatile/persistent partition
- boundary, and 'none' indicates the decoder is not actively
- decoding, or no DPA allocation policy has been set.
+ translates from a host physical address range, to a device
+ local address range. Device-local address ranges are further
+ split into a 'ram' (volatile memory) range and 'pmem'
+ (persistent memory) range. The 'mode' attribute emits one of
+ 'ram', 'pmem', or 'none'. The 'none' indicates the decoder is
+ not actively decoding, or no DPA allocation policy has been
+ set.
'mode' can be written, when the decoder is in the 'disabled'
state, with either 'ram' or 'pmem' to set the boundaries for the
@@ -423,7 +440,7 @@ Date: May, 2023
KernelVersion: v6.5
Contact: linux-cxl@vger.kernel.org
Description:
- (RO) For CXL host platforms that support "QoS Telemmetry" this
+ (RO) For CXL host platforms that support "QoS Telemetry" this
root-decoder-only attribute conveys a platform specific cookie
that identifies a QoS performance class for the CXL Window.
This class-id can be compared against a similar "qos_class"
@@ -586,3 +603,15 @@ Description:
See Documentation/ABI/stable/sysfs-devices-node. access0 provides
the number to the closest initiator and access1 provides the
number to the closest CPU.
+
+
+What: /sys/bus/cxl/devices/nvdimm-bridge0/ndbusX/nmemY/cxl/dirty_shutdown
+Date: Feb, 2025
+KernelVersion: v6.15
+Contact: linux-cxl@vger.kernel.org
+Description:
+ (RO) The device dirty shutdown count value, which is the number
+ of times the device could have incurred in potential data loss.
+ The count is persistent across power loss and wraps back to 0
+ upon overflow. If this file is not present, the device does not
+ have the necessary support for dirty tracking.
diff --git a/Documentation/admin-guide/device-mapper/dm-crypt.rst b/Documentation/admin-guide/device-mapper/dm-crypt.rst
index 9f8139ff97d6..4467f6d4b632 100644
--- a/Documentation/admin-guide/device-mapper/dm-crypt.rst
+++ b/Documentation/admin-guide/device-mapper/dm-crypt.rst
@@ -146,6 +146,11 @@ integrity:<bytes>:<type>
integrity for the encrypted device. The additional space is then
used for storing authentication tag (and persistent IV if needed).
+integrity_key_size:<bytes>
+ Optionally set the integrity key size if it differs from the digest size.
+ It allows the use of wrapped key algorithms where the key size is
+ independent of the cryptographic key size.
+
sector_size:<bytes>
Use <bytes> as the encryption unit instead of 512 bytes sectors.
This option can be in range 512 - 4096 bytes and must be power of two.
diff --git a/Documentation/admin-guide/device-mapper/dm-integrity.rst b/Documentation/admin-guide/device-mapper/dm-integrity.rst
index d8a5f14d0e3c..c2e18ecc065c 100644
--- a/Documentation/admin-guide/device-mapper/dm-integrity.rst
+++ b/Documentation/admin-guide/device-mapper/dm-integrity.rst
@@ -92,6 +92,11 @@ Target arguments:
allowed. This mode is useful for data recovery if the
device cannot be activated in any of the other standard
modes.
+ I - inline mode - in this mode, dm-integrity will store integrity
+ data directly in the underlying device sectors.
+ The underlying device must have an integrity profile that
+ allows storing user integrity data and provides enough
+ space for the selected integrity tag.
5. the number of additional arguments
diff --git a/Documentation/admin-guide/device-mapper/verity.rst b/Documentation/admin-guide/device-mapper/verity.rst
index a65c1602cb23..8c3f1f967a3c 100644
--- a/Documentation/admin-guide/device-mapper/verity.rst
+++ b/Documentation/admin-guide/device-mapper/verity.rst
@@ -87,6 +87,15 @@ panic_on_corruption
Panic the device when a corrupted block is discovered. This option is
not compatible with ignore_corruption and restart_on_corruption.
+restart_on_error
+ Restart the system when an I/O error is detected.
+ This option can be combined with the restart_on_corruption option.
+
+panic_on_error
+ Panic the device when an I/O error is detected. This option is
+ not compatible with the restart_on_error option but can be combined
+ with the panic_on_corruption option.
+
ignore_zero_blocks
Do not verify blocks that are expected to contain zeroes and always return
zeroes instead. This may be useful if the partition contains unused blocks
@@ -142,8 +151,15 @@ root_hash_sig_key_desc <key_description>
already in the secondary trusted keyring.
try_verify_in_tasklet
- If verity hashes are in cache, verify data blocks in kernel tasklet instead
- of workqueue. This option can reduce IO latency.
+ If verity hashes are in cache and the IO size does not exceed the limit,
+ verify data blocks in bottom half instead of workqueue. This option can
+ reduce IO latency. The size limits can be configured via
+ /sys/module/dm_verity/parameters/use_bh_bytes. The four parameters
+ correspond to limits for IOPRIO_CLASS_NONE, IOPRIO_CLASS_RT,
+ IOPRIO_CLASS_BE and IOPRIO_CLASS_IDLE in turn.
+ For example:
+ <none>,<rt>,<be>,<idle>
+ 4096,4096,4096,4096
Theory of operation
===================
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 3f35d5b8c296..f5af86b3c4a2 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -7288,6 +7288,8 @@
This is just one of many ways that can clear memory. Make sure your system
keeps the content of memory across reboots before relying on this option.
+ NB: Both the mapped address and size must be page aligned for the architecture.
+
See also Documentation/trace/debugging.rst
diff --git a/Documentation/admin-guide/sysctl/fs.rst b/Documentation/admin-guide/sysctl/fs.rst
index 08e89e031714..6c54718c9d04 100644
--- a/Documentation/admin-guide/sysctl/fs.rst
+++ b/Documentation/admin-guide/sysctl/fs.rst
@@ -347,3 +347,28 @@ filesystems:
``/proc/sys/fs/fuse/max_pages_limit`` is a read/write file for
setting/getting the maximum number of pages that can be used for servicing
requests in FUSE.
+
+``/proc/sys/fs/fuse/default_request_timeout`` is a read/write file for
+setting/getting the default timeout (in seconds) for a fuse server to
+reply to a kernel-issued request in the event where the server did not
+specify a timeout at mount. If the server set a timeout,
+then default_request_timeout will be ignored. The default
+"default_request_timeout" is set to 0. 0 indicates no default timeout.
+The maximum value that can be set is 65535.
+
+``/proc/sys/fs/fuse/max_request_timeout`` is a read/write file for
+setting/getting the maximum timeout (in seconds) for a fuse server to
+reply to a kernel-issued request. A value greater than 0 automatically opts
+the server into a timeout that will be set to at most "max_request_timeout",
+even if the server did not specify a timeout and default_request_timeout is
+set to 0. If max_request_timeout is greater than 0 and the server set a timeout
+greater than max_request_timeout or default_request_timeout is set to a value
+greater than max_request_timeout, the system will use max_request_timeout as the
+timeout. 0 indicates no max request timeout. The maximum value that can be set
+is 65535.
+
+For timeouts, if the server does not respond to the request by the time
+the set timeout elapses, then the connection to the fuse server will be aborted.
+Please note that the timeouts are not 100% precise (eg you may set 60 seconds but
+the timeout may kick in after 70 seconds). The upper margin of error for the
+timeout is roughly FUSE_TIMEOUT_TIMER_FREQ seconds.
diff --git a/Documentation/arch/powerpc/cxl.rst b/Documentation/arch/powerpc/cxl.rst
deleted file mode 100644
index 778adda740d2..000000000000
--- a/Documentation/arch/powerpc/cxl.rst
+++ /dev/null
@@ -1,470 +0,0 @@
-====================================
-Coherent Accelerator Interface (CXL)
-====================================
-
-Introduction
-============
-
- The coherent accelerator interface is designed to allow the
- coherent connection of accelerators (FPGAs and other devices) to a
- POWER system. These devices need to adhere to the Coherent
- Accelerator Interface Architecture (CAIA).
-
- IBM refers to this as the Coherent Accelerator Processor Interface
- or CAPI. In the kernel it's referred to by the name CXL to avoid
- confusion with the ISDN CAPI subsystem.
-
- Coherent in this context means that the accelerator and CPUs can
- both access system memory directly and with the same effective
- addresses.
-
- **This driver is deprecated and will be removed in a future release.**
-
-Hardware overview
-=================
-
- ::
-
- POWER8/9 FPGA
- +----------+ +---------+
- | | | |
- | CPU | | AFU |
- | | | |
- | | | |
- | | | |
- +----------+ +---------+
- | PHB | | |
- | +------+ | PSL |
- | | CAPP |<------>| |
- +---+------+ PCIE +---------+
-
- The POWER8/9 chip has a Coherently Attached Processor Proxy (CAPP)
- unit which is part of the PCIe Host Bridge (PHB). This is managed
- by Linux by calls into OPAL. Linux doesn't directly program the
- CAPP.
-
- The FPGA (or coherently attached device) consists of two parts.
- The POWER Service Layer (PSL) and the Accelerator Function Unit
- (AFU). The AFU is used to implement specific functionality behind
- the PSL. The PSL, among other things, provides memory address
- translation services to allow each AFU direct access to userspace
- memory.
-
- The AFU is the core part of the accelerator (eg. the compression,
- crypto etc function). The kernel has no knowledge of the function
- of the AFU. Only userspace interacts directly with the AFU.
-
- The PSL provides the translation and interrupt services that the
- AFU needs. This is what the kernel interacts with. For example, if
- the AFU needs to read a particular effective address, it sends
- that address to the PSL, the PSL then translates it, fetches the
- data from memory and returns it to the AFU. If the PSL has a
- translation miss, it interrupts the kernel and the kernel services
- the fault. The context to which this fault is serviced is based on
- who owns that acceleration function.
-
- - POWER8 and PSL Version 8 are compliant to the CAIA Version 1.0.
- - POWER9 and PSL Version 9 are compliant to the CAIA Version 2.0.
-
- This PSL Version 9 provides new features such as:
-
- * Interaction with the nest MMU on the P9 chip.
- * Native DMA support.
- * Supports sending ASB_Notify messages for host thread wakeup.
- * Supports Atomic operations.
- * etc.
-
- Cards with a PSL9 won't work on a POWER8 system and cards with a
- PSL8 won't work on a POWER9 system.
-
-AFU Modes
-=========
-
- There are two programming modes supported by the AFU. Dedicated
- and AFU directed. AFU may support one or both modes.
-
- When using dedicated mode only one MMU context is supported. In
- this mode, only one userspace process can use the accelerator at
- time.
-
- When using AFU directed mode, up to 16K simultaneous contexts can
- be supported. This means up to 16K simultaneous userspace
- applications may use the accelerator (although specific AFUs may
- support fewer). In this mode, the AFU sends a 16 bit context ID
- with each of its requests. This tells the PSL which context is
- associated with each operation. If the PSL can't translate an
- operation, the ID can also be accessed by the kernel so it can
- determine the userspace context associated with an operation.
-
-
-MMIO space
-==========
-
- A portion of the accelerator MMIO space can be directly mapped
- from the AFU to userspace. Either the whole space can be mapped or
- just a per context portion. The hardware is self describing, hence
- the kernel can determine the offset and size of the per context
- portion.
-
-
-Interrupts
-==========
-
- AFUs may generate interrupts that are destined for userspace. These
- are received by the kernel as hardware interrupts and passed onto
- userspace by a read syscall documented below.
-
- Data storage faults and error interrupts are handled by the kernel
- driver.
-
-
-Work Element Descriptor (WED)
-=============================
-
- The WED is a 64-bit parameter passed to the AFU when a context is
- started. Its format is up to the AFU hence the kernel has no
- knowledge of what it represents. Typically it will be the
- effective address of a work queue or status block where the AFU
- and userspace can share control and status information.
-
-
-
-
-User API
-========
-
-1. AFU character devices
-^^^^^^^^^^^^^^^^^^^^^^^^
-
- For AFUs operating in AFU directed mode, two character device
- files will be created. /dev/cxl/afu0.0m will correspond to a
- master context and /dev/cxl/afu0.0s will correspond to a slave
- context. Master contexts have access to the full MMIO space an
- AFU provides. Slave contexts have access to only the per process
- MMIO space an AFU provides.
-
- For AFUs operating in dedicated process mode, the driver will
- only create a single character device per AFU called
- /dev/cxl/afu0.0d. This will have access to the entire MMIO space
- that the AFU provides (like master contexts in AFU directed).
-
- The types described below are defined in include/uapi/misc/cxl.h
-
- The following file operations are supported on both slave and
- master devices.
-
- A userspace library libcxl is available here:
-
- https://github.com/ibm-capi/libcxl
-
- This provides a C interface to this kernel API.
-
-open
-----
-
- Opens the device and allocates a file descriptor to be used with
- the rest of the API.
-
- A dedicated mode AFU only has one context and only allows the
- device to be opened once.
-
- An AFU directed mode AFU can have many contexts, the device can be
- opened once for each context that is available.
-
- When all available contexts are allocated the open call will fail
- and return -ENOSPC.
-
- Note:
- IRQs need to be allocated for each context, which may limit
- the number of contexts that can be created, and therefore
- how many times the device can be opened. The POWER8 CAPP
- supports 2040 IRQs and 3 are used by the kernel, so 2037 are
- left. If 1 IRQ is needed per context, then only 2037
- contexts can be allocated. If 4 IRQs are needed per context,
- then only 2037/4 = 509 contexts can be allocated.
-
-
-ioctl
------
-
- CXL_IOCTL_START_WORK:
- Starts the AFU context and associates it with the current
- process. Once this ioctl is successfully executed, all memory
- mapped into this process is accessible to this AFU context
- using the same effective addresses. No additional calls are
- required to map/unmap memory. The AFU memory context will be
- updated as userspace allocates and frees memory. This ioctl
- returns once the AFU context is started.
-
- Takes a pointer to a struct cxl_ioctl_start_work
-
- ::
-
- struct cxl_ioctl_start_work {
- __u64 flags;
- __u64 work_element_descriptor;
- __u64 amr;
- __s16 num_interrupts;
- __s16 reserved1;
- __s32 reserved2;
- __u64 reserved3;
- __u64 reserved4;
- __u64 reserved5;
- __u64 reserved6;
- };
-
- flags:
- Indicates which optional fields in the structure are
- valid.
-
- work_element_descriptor:
- The Work Element Descriptor (WED) is a 64-bit argument
- defined by the AFU. Typically this is an effective
- address pointing to an AFU specific structure
- describing what work to perform.
-
- amr:
- Authority Mask Register (AMR), same as the powerpc
- AMR. This field is only used by the kernel when the
- corresponding CXL_START_WORK_AMR value is specified in
- flags. If not specified the kernel will use a default
- value of 0.
-
- num_interrupts:
- Number of userspace interrupts to request. This field
- is only used by the kernel when the corresponding
- CXL_START_WORK_NUM_IRQS value is specified in flags.
- If not specified the minimum number required by the
- AFU will be allocated. The min and max number can be
- obtained from sysfs.
-
- reserved fields:
- For ABI padding and future extensions
-
- CXL_IOCTL_GET_PROCESS_ELEMENT:
- Get the current context id, also known as the process element.
- The value is returned from the kernel as a __u32.
-
-
-mmap
-----
-
- An AFU may have an MMIO space to facilitate communication with the
- AFU. If it does, the MMIO space can be accessed via mmap. The size
- and contents of this area are specific to the particular AFU. The
- size can be discovered via sysfs.
-
- In AFU directed mode, master contexts are allowed to map all of
- the MMIO space and slave contexts are allowed to only map the per
- process MMIO space associated with the context. In dedicated
- process mode the entire MMIO space can always be mapped.
-
- This mmap call must be done after the START_WORK ioctl.
-
- Care should be taken when accessing MMIO space. Only 32 and 64-bit
- accesses are supported by POWER8. Also, the AFU will be designed
- with a specific endianness, so all MMIO accesses should consider
- endianness (recommend endian(3) variants like: le64toh(),
- be64toh() etc). These endian issues equally apply to shared memory
- queues the WED may describe.
-
-
-read
-----
-
- Reads events from the AFU. Blocks if no events are pending
- (unless O_NONBLOCK is supplied). Returns -EIO in the case of an
- unrecoverable error or if the card is removed.
-
- read() will always return an integral number of events.
-
- The buffer passed to read() must be at least 4K bytes.
-
- The result of the read will be a buffer of one or more events,
- each event is of type struct cxl_event, of varying size::
-
- struct cxl_event {
- struct cxl_event_header header;
- union {
- struct cxl_event_afu_interrupt irq;
- struct cxl_event_data_storage fault;
- struct cxl_event_afu_error afu_error;
- };
- };
-
- The struct cxl_event_header is defined as
-
- ::
-
- struct cxl_event_header {
- __u16 type;
- __u16 size;
- __u16 process_element;
- __u16 reserved1;
- };
-
- type:
- This defines the type of event. The type determines how
- the rest of the event is structured. These types are
- described below and defined by enum cxl_event_type.
-
- size:
- This is the size of the event in bytes including the
- struct cxl_event_header. The start of the next event can
- be found at this offset from the start of the current
- event.
-
- process_element:
- Context ID of the event.
-
- reserved field:
- For future extensions and padding.
-
- If the event type is CXL_EVENT_AFU_INTERRUPT then the event
- structure is defined as
-
- ::
-
- struct cxl_event_afu_interrupt {
- __u16 flags;
- __u16 irq; /* Raised AFU interrupt number */
- __u32 reserved1;
- };
-
- flags:
- These flags indicate which optional fields are present
- in this struct. Currently all fields are mandatory.
-
- irq:
- The IRQ number sent by the AFU.
-
- reserved field:
- For future extensions and padding.
-
- If the event type is CXL_EVENT_DATA_STORAGE then the event
- structure is defined as
-
- ::
-
- struct cxl_event_data_storage {
- __u16 flags;
- __u16 reserved1;
- __u32 reserved2;
- __u64 addr;
- __u64 dsisr;
- __u64 reserved3;
- };
-
- flags:
- These flags indicate which optional fields are present in
- this struct. Currently all fields are mandatory.
-
- address:
- The address that the AFU unsuccessfully attempted to
- access. Valid accesses will be handled transparently by the
- kernel but invalid accesses will generate this event.
-
- dsisr:
- This field gives information on the type of fault. It is a
- copy of the DSISR from the PSL hardware when the address
- fault occurred. The form of the DSISR is as defined in the
- CAIA.
-
- reserved fields:
- For future extensions
-
- If the event type is CXL_EVENT_AFU_ERROR then the event structure
- is defined as
-
- ::
-
- struct cxl_event_afu_error {
- __u16 flags;
- __u16 reserved1;
- __u32 reserved2;
- __u64 error;
- };
-
- flags:
- These flags indicate which optional fields are present in
- this struct. Currently all fields are Mandatory.
-
- error:
- Error status from the AFU. Defined by the AFU.
-
- reserved fields:
- For future extensions and padding
-
-
-2. Card character device (powerVM guest only)
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
- In a powerVM guest, an extra character device is created for the
- card. The device is only used to write (flash) a new image on the
- FPGA accelerator. Once the image is written and verified, the
- device tree is updated and the card is reset to reload the updated
- image.
-
-open
-----
-
- Opens the device and allocates a file descriptor to be used with
- the rest of the API. The device can only be opened once.
-
-ioctl
------
-
-CXL_IOCTL_DOWNLOAD_IMAGE / CXL_IOCTL_VALIDATE_IMAGE:
- Starts and controls flashing a new FPGA image. Partial
- reconfiguration is not supported (yet), so the image must contain
- a copy of the PSL and AFU(s). Since an image can be quite large,
- the caller may have to iterate, splitting the image in smaller
- chunks.
-
- Takes a pointer to a struct cxl_adapter_image::
-
- struct cxl_adapter_image {
- __u64 flags;
- __u64 data;
- __u64 len_data;
- __u64 len_image;
- __u64 reserved1;
- __u64 reserved2;
- __u64 reserved3;
- __u64 reserved4;
- };
-
- flags:
- These flags indicate which optional fields are present in
- this struct. Currently all fields are mandatory.
-
- data:
- Pointer to a buffer with part of the image to write to the
- card.
-
- len_data:
- Size of the buffer pointed to by data.
-
- len_image:
- Full size of the image.
-
-
-Sysfs Class
-===========
-
- A cxl sysfs class is added under /sys/class/cxl to facilitate
- enumeration and tuning of the accelerators. Its layout is
- described in Documentation/ABI/obsolete/sysfs-class-cxl
-
-
-Udev rules
-==========
-
- The following udev rules could be used to create a symlink to the
- most logical chardev to use in any programming mode (afuX.Yd for
- dedicated, afuX.Ys for afu directed), since the API is virtually
- identical for each::
-
- SUBSYSTEM=="cxl", ATTRS{mode}=="dedicated_process", SYMLINK="cxl/%b"
- SUBSYSTEM=="cxl", ATTRS{mode}=="afu_directed", \
- KERNEL=="afu[0-9]*.[0-9]*s", SYMLINK="cxl/%b"
diff --git a/Documentation/arch/powerpc/index.rst b/Documentation/arch/powerpc/index.rst
index 995268530f21..0560cbae5fa1 100644
--- a/Documentation/arch/powerpc/index.rst
+++ b/Documentation/arch/powerpc/index.rst
@@ -12,7 +12,6 @@ powerpc
bootwrapper
cpu_families
cpu_features
- cxl
dawr-power9
dexcr
dscr
diff --git a/Documentation/block/ublk.rst b/Documentation/block/ublk.rst
index 1e0e7358e14a..854f823b46c2 100644
--- a/Documentation/block/ublk.rst
+++ b/Documentation/block/ublk.rst
@@ -309,18 +309,35 @@ with specified IO tag in the command data:
``UBLK_IO_COMMIT_AND_FETCH_REQ`` to the server, ublkdrv needs to copy
the server buffer (pages) read to the IO request pages.
-Future development
-==================
-
Zero copy
---------
-Zero copy is a generic requirement for nbd, fuse or similar drivers. A
-problem [#xiaoguang]_ Xiaoguang mentioned is that pages mapped to userspace
-can't be remapped any more in kernel with existing mm interfaces. This can
-occurs when destining direct IO to ``/dev/ublkb*``. Also, he reported that
-big requests (IO size >= 256 KB) may benefit a lot from zero copy.
-
+ublk zero copy relies on io_uring's fixed kernel buffer, which provides
+two APIs: `io_buffer_register_bvec()` and `io_buffer_unregister_bvec`.
+
+ublk adds IO command of `UBLK_IO_REGISTER_IO_BUF` to call
+`io_buffer_register_bvec()` for ublk server to register client request
+buffer into io_uring buffer table, then ublk server can submit io_uring
+IOs with the registered buffer index. IO command of `UBLK_IO_UNREGISTER_IO_BUF`
+calls `io_buffer_unregister_bvec()` to unregister the buffer, which is
+guaranteed to be live between calling `io_buffer_register_bvec()` and
+`io_buffer_unregister_bvec()`. Any io_uring operation which supports this
+kind of kernel buffer will grab one reference of the buffer until the
+operation is completed.
+
+ublk server implementing zero copy or user copy has to be CAP_SYS_ADMIN and
+be trusted, because it is ublk server's responsibility to make sure IO buffer
+filled with data for handling read command, and ublk server has to return
+correct result to ublk driver when handling READ command, and the result
+has to match with how many bytes filled to the IO buffer. Otherwise,
+uninitialized kernel IO buffer will be exposed to client application.
+
+ublk server needs to align the parameter of `struct ublk_param_dma_align`
+with backend for zero copy to work correctly.
+
+For reaching best IO performance, ublk server should align its segment
+parameter of `struct ublk_param_segment` with backend for avoiding
+unnecessary IO split, which usually hurts io_uring performance.
References
==========
@@ -332,5 +349,3 @@ References
.. [#userspace_nbdublk] https://gitlab.com/rwmjones/libnbd/-/tree/nbdublk
.. [#userspace_readme] https://github.com/ming1/ubdsrv/blob/master/README
-
-.. [#xiaoguang] https://lore.kernel.org/linux-block/YoOr6jBfgVm8GvWg@stefanha-x1.localdomain/
diff --git a/Documentation/devicetree/bindings/mfd/aspeed-lpc.yaml b/Documentation/devicetree/bindings/mfd/aspeed-lpc.yaml
index 5dfe77aca167..d88854e60b7f 100644
--- a/Documentation/devicetree/bindings/mfd/aspeed-lpc.yaml
+++ b/Documentation/devicetree/bindings/mfd/aspeed-lpc.yaml
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-# # Copyright (c) 2021 Aspeed Tehchnology Inc.
+# # Copyright (c) 2021 Aspeed Technology Inc.
%YAML 1.2
---
$id: http://devicetree.org/schemas/mfd/aspeed-lpc.yaml#
diff --git a/Documentation/devicetree/bindings/rtc/adi,max31335.yaml b/Documentation/devicetree/bindings/rtc/adi,max31335.yaml
index 0125cf6727cc..bce7558d0d87 100644
--- a/Documentation/devicetree/bindings/rtc/adi,max31335.yaml
+++ b/Documentation/devicetree/bindings/rtc/adi,max31335.yaml
@@ -18,7 +18,9 @@ allOf:
properties:
compatible:
- const: adi,max31335
+ enum:
+ - adi,max31331
+ - adi,max31335
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/rtc/nxp,pcf2127.yaml b/Documentation/devicetree/bindings/rtc/nxp,pcf2127.yaml
index 2d9fe5a75b06..11fcf0ca1ae0 100644
--- a/Documentation/devicetree/bindings/rtc/nxp,pcf2127.yaml
+++ b/Documentation/devicetree/bindings/rtc/nxp,pcf2127.yaml
@@ -8,6 +8,7 @@ title: NXP PCF2127 Real Time Clock
allOf:
- $ref: rtc.yaml#
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
maintainers:
- Alexandre Belloni <alexandre.belloni@bootlin.com>
@@ -34,7 +35,7 @@ required:
- compatible
- reg
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/rtc/qcom-pm8xxx-rtc.yaml b/Documentation/devicetree/bindings/rtc/qcom-pm8xxx-rtc.yaml
index d274bb7a534b..68ef3208c886 100644
--- a/Documentation/devicetree/bindings/rtc/qcom-pm8xxx-rtc.yaml
+++ b/Documentation/devicetree/bindings/rtc/qcom-pm8xxx-rtc.yaml
@@ -50,6 +50,11 @@ properties:
items:
- const: offset
+ qcom,no-alarm:
+ type: boolean
+ description:
+ RTC alarm is not owned by the OS
+
wakeup-source: true
required:
diff --git a/Documentation/devicetree/bindings/serial/8250.yaml b/Documentation/devicetree/bindings/serial/8250.yaml
index 0bde2379e864..dc0d52920575 100644
--- a/Documentation/devicetree/bindings/serial/8250.yaml
+++ b/Documentation/devicetree/bindings/serial/8250.yaml
@@ -77,7 +77,6 @@ properties:
- altr,16550-FIFO64
- altr,16550-FIFO128
- fsl,16550-FIFO64
- - fsl,ns16550
- andestech,uart16550
- nxp,lpc1850-uart
- opencores,uart16550-rtlsvn105
@@ -86,6 +85,7 @@ properties:
- items:
- enum:
- ns16750
+ - fsl,ns16550
- cavium,octeon-3860-uart
- xlnx,xps-uart16550-2.00.b
- ralink,rt2880-uart
diff --git a/Documentation/devicetree/bindings/serial/fsl-lpuart.yaml b/Documentation/devicetree/bindings/serial/fsl-lpuart.yaml
index 3f9ace89dee9..c42261b5a80a 100644
--- a/Documentation/devicetree/bindings/serial/fsl-lpuart.yaml
+++ b/Documentation/devicetree/bindings/serial/fsl-lpuart.yaml
@@ -30,6 +30,7 @@ properties:
- items:
- enum:
- fsl,imx93-lpuart
+ - fsl,imx94-lpuart
- fsl,imx95-lpuart
- const: fsl,imx8ulp-lpuart
- const: fsl,imx7ulp-lpuart
diff --git a/Documentation/devicetree/bindings/serial/nvidia,tegra264-utc.yaml b/Documentation/devicetree/bindings/serial/nvidia,tegra264-utc.yaml
new file mode 100644
index 000000000000..572cc574da64
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/nvidia,tegra264-utc.yaml
@@ -0,0 +1,73 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/serial/nvidia,tegra264-utc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NVIDIA Tegra UTC (UART Trace Controller) client
+
+maintainers:
+ - Kartik Rajput <kkartik@nvidia.com>
+ - Thierry Reding <thierry.reding@gmail.com>
+ - Jonathan Hunter <jonathanh@nvidia.com>
+
+description:
+ Represents a client interface of the Tegra UTC (UART Trace Controller). The
+ Tegra UTC allows multiple clients within the Tegra SoC to share a physical
+ UART interface. It supports up to 16 clients. Each client operates as an
+ independent UART endpoint with a dedicated interrupt and 128-character TX/RX
+ FIFOs.
+
+ The Tegra UTC clients use 8-N-1 configuration and operates on a baudrate
+ configured by the bootloader at the controller level.
+
+allOf:
+ - $ref: serial.yaml#
+
+properties:
+ compatible:
+ const: nvidia,tegra264-utc
+
+ reg:
+ items:
+ - description: TX region.
+ - description: RX region.
+
+ reg-names:
+ items:
+ - const: tx
+ - const: rx
+
+ interrupts:
+ maxItems: 1
+
+ tx-threshold:
+ minimum: 1
+ maximum: 128
+
+ rx-threshold:
+ minimum: 1
+ maximum: 128
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - interrupts
+ - tx-threshold
+ - rx-threshold
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ tegra_utc: serial@c4e0000 {
+ compatible = "nvidia,tegra264-utc";
+ reg = <0xc4e0000 0x8000>, <0xc4e8000 0x8000>;
+ reg-names = "tx", "rx";
+ interrupts = <GIC_SPI 514 IRQ_TYPE_LEVEL_HIGH>;
+ tx-threshold = <4>;
+ rx-threshold = <4>;
+ };
diff --git a/Documentation/devicetree/bindings/serial/pl011.yaml b/Documentation/devicetree/bindings/serial/pl011.yaml
index 9571041030b7..3fcf2d042372 100644
--- a/Documentation/devicetree/bindings/serial/pl011.yaml
+++ b/Documentation/devicetree/bindings/serial/pl011.yaml
@@ -92,6 +92,9 @@ properties:
3000ms.
default: 3000
+ power-domains:
+ maxItems: 1
+
resets:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/serial/samsung_uart.yaml b/Documentation/devicetree/bindings/serial/samsung_uart.yaml
index 070eba9f19d3..83d9986d8e98 100644
--- a/Documentation/devicetree/bindings/serial/samsung_uart.yaml
+++ b/Documentation/devicetree/bindings/serial/samsung_uart.yaml
@@ -42,6 +42,10 @@ properties:
- samsung,exynosautov9-uart
- samsung,exynosautov920-uart
- const: samsung,exynos850-uart
+ - items:
+ - enum:
+ - samsung,exynos7870-uart
+ - const: samsung,exynos8895-uart
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml b/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml
index 1c163cb5dff1..1aa3480d8d81 100644
--- a/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml
+++ b/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml
@@ -16,6 +16,20 @@ allOf:
- if:
properties:
compatible:
+ items:
+ - enum:
+ - renesas,r9a06g032-uart
+ - renesas,r9a06g033-uart
+ - const: renesas,rzn1-uart
+ - const: snps,dw-apb-uart
+ then:
+ properties:
+ dmas: false
+ dma-names: false
+
+ - if:
+ properties:
+ compatible:
contains:
const: starfive,jh7110-uart
then:
@@ -35,6 +49,12 @@ properties:
- renesas,r9a06g032-uart
- renesas,r9a06g033-uart
- const: renesas,rzn1-uart
+ - const: snps,dw-apb-uart
+ - items:
+ - enum:
+ - renesas,r9a06g032-uart
+ - renesas,r9a06g033-uart
+ - const: renesas,rzn1-uart
- items:
- enum:
- brcm,bcm11351-dw-apb-uart
@@ -51,6 +71,7 @@ properties:
- rockchip,rk3368-uart
- rockchip,rk3399-uart
- rockchip,rk3528-uart
+ - rockchip,rk3562-uart
- rockchip,rk3568-uart
- rockchip,rk3576-uart
- rockchip,rk3588-uart
diff --git a/Documentation/devicetree/bindings/serial/sprd-uart.yaml b/Documentation/devicetree/bindings/serial/sprd-uart.yaml
index a2a5056eba04..5bf2656afcfd 100644
--- a/Documentation/devicetree/bindings/serial/sprd-uart.yaml
+++ b/Documentation/devicetree/bindings/serial/sprd-uart.yaml
@@ -17,13 +17,18 @@ properties:
oneOf:
- items:
- enum:
- - sprd,sc9632-uart
+ - sprd,ums9632-uart
+ - const: sprd,sc9632-uart
+ - items:
+ - enum:
- sprd,sc9860-uart
- sprd,sc9863a-uart
- sprd,ums512-uart
- sprd,ums9620-uart
- const: sprd,sc9836-uart
- - const: sprd,sc9836-uart
+ - enum:
+ - sprd,sc9632-uart
+ - sprd,sc9836-uart
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/usb/generic-xhci.yaml b/Documentation/devicetree/bindings/usb/generic-xhci.yaml
index 6ceafa4af292..a2b94a138999 100644
--- a/Documentation/devicetree/bindings/usb/generic-xhci.yaml
+++ b/Documentation/devicetree/bindings/usb/generic-xhci.yaml
@@ -51,6 +51,8 @@ properties:
- const: core
- const: reg
+ dma-coherent: true
+
power-domains:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/usb/microchip,usb2514.yaml b/Documentation/devicetree/bindings/usb/microchip,usb2514.yaml
index b14e6f37b298..4e3901efed3f 100644
--- a/Documentation/devicetree/bindings/usb/microchip,usb2514.yaml
+++ b/Documentation/devicetree/bindings/usb/microchip,usb2514.yaml
@@ -9,16 +9,19 @@ title: Microchip USB2514 Hub Controller
maintainers:
- Fabio Estevam <festevam@gmail.com>
-allOf:
- - $ref: usb-device.yaml#
-
properties:
compatible:
- enum:
- - usb424,2412
- - usb424,2417
- - usb424,2514
- - usb424,2517
+ oneOf:
+ - enum:
+ - usb424,2412
+ - usb424,2417
+ - usb424,2514
+ - usb424,2517
+ - items:
+ - enum:
+ - usb424,2512
+ - usb424,2513
+ - const: usb424,2514
reg: true
@@ -28,6 +31,9 @@ properties:
vdd-supply:
description: 3.3V power supply.
+ vdda-supply:
+ description: 3.3V analog power supply.
+
clocks:
description: External 24MHz clock connected to the CLKIN pin.
maxItems: 1
@@ -43,6 +49,18 @@ patternProperties:
$ref: /schemas/usb/usb-device.yaml
additionalProperties: true
+allOf:
+ - $ref: usb-device.yaml#
+ - if:
+ not:
+ properties:
+ compatible:
+ contains:
+ const: usb424,2514
+ then:
+ properties:
+ vdda-supply: false
+
unevaluatedProperties: false
examples:
@@ -60,6 +78,7 @@ examples:
clocks = <&clks IMX6QDL_CLK_CKO>;
reset-gpios = <&gpio7 12 GPIO_ACTIVE_LOW>;
vdd-supply = <&reg_3v3_hub>;
+ vdda-supply = <&reg_3v3a_hub>;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/usb/parade,ps8830.yaml b/Documentation/devicetree/bindings/usb/parade,ps8830.yaml
new file mode 100644
index 000000000000..935d57f5d26f
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/parade,ps8830.yaml
@@ -0,0 +1,140 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/parade,ps8830.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Parade PS883x USB and DisplayPort Retimer
+
+maintainers:
+ - Abel Vesa <abel.vesa@linaro.org>
+
+properties:
+ compatible:
+ enum:
+ - parade,ps8830
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: XO Clock
+
+ reset-gpios:
+ maxItems: 1
+
+ vdd-supply:
+ description: power supply (1.07V)
+
+ vdd33-supply:
+ description: power supply (3.3V)
+
+ vdd33-cap-supply:
+ description: power supply (3.3V)
+
+ vddar-supply:
+ description: power supply (1.07V)
+
+ vddat-supply:
+ description: power supply (1.07V)
+
+ vddio-supply:
+ description: power supply (1.2V or 1.8V)
+
+ orientation-switch: true
+ retimer-switch: true
+
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+ properties:
+ port@0:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: Super Speed (SS) Output endpoint to the Type-C connector
+
+ port@1:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ description: Super Speed (SS) Input endpoint from the Super-Speed PHY
+ unevaluatedProperties: false
+
+ port@2:
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ Sideband Use (SBU) AUX lines endpoint to the Type-C connector for the purpose of
+ handling altmode muxing and orientation switching.
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - reset-gpios
+ - vdd-supply
+ - vdd33-supply
+ - vdd33-cap-supply
+ - vddat-supply
+ - vddio-supply
+ - orientation-switch
+ - retimer-switch
+
+allOf:
+ - $ref: usb-switch.yaml#
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ typec-mux@8 {
+ compatible = "parade,ps8830";
+ reg = <0x8>;
+
+ clocks = <&clk_rtmr_xo>;
+
+ vdd-supply = <&vreg_rtmr_1p15>;
+ vdd33-supply = <&vreg_rtmr_3p3>;
+ vdd33-cap-supply = <&vreg_rtmr_3p3>;
+ vddar-supply = <&vreg_rtmr_1p15>;
+ vddat-supply = <&vreg_rtmr_1p15>;
+ vddio-supply = <&vreg_rtmr_1p8>;
+
+ reset-gpios = <&tlmm 10 GPIO_ACTIVE_LOW>;
+
+ retimer-switch;
+ orientation-switch;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ endpoint {
+ remote-endpoint = <&typec_con_ss>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ endpoint {
+ remote-endpoint = <&usb_phy_ss>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ endpoint {
+ remote-endpoint = <&typec_dp_aux>;
+ };
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
index a2b3cf625e5b..64137c1619a6 100644
--- a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
+++ b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
@@ -404,6 +404,7 @@ allOf:
minItems: 2
maxItems: 3
interrupt-names:
+ minItems: 2
items:
- const: pwr_event
- const: qusb2_phy
@@ -425,6 +426,7 @@ allOf:
minItems: 3
maxItems: 4
interrupt-names:
+ minItems: 3
items:
- const: pwr_event
- const: qusb2_phy
diff --git a/Documentation/devicetree/bindings/usb/richtek,rt1711h.yaml b/Documentation/devicetree/bindings/usb/richtek,rt1711h.yaml
index 8da4d2ad1a91..ae611f7e57ca 100644
--- a/Documentation/devicetree/bindings/usb/richtek,rt1711h.yaml
+++ b/Documentation/devicetree/bindings/usb/richtek,rt1711h.yaml
@@ -30,6 +30,9 @@ properties:
interrupts:
maxItems: 1
+ vbus-supply:
+ description: VBUS power supply
+
wakeup-source:
type: boolean
diff --git a/Documentation/devicetree/bindings/usb/rockchip,dwc3.yaml b/Documentation/devicetree/bindings/usb/rockchip,dwc3.yaml
index a21cc098542d..fba2cb05ecba 100644
--- a/Documentation/devicetree/bindings/usb/rockchip,dwc3.yaml
+++ b/Documentation/devicetree/bindings/usb/rockchip,dwc3.yaml
@@ -26,6 +26,7 @@ select:
contains:
enum:
- rockchip,rk3328-dwc3
+ - rockchip,rk3562-dwc3
- rockchip,rk3568-dwc3
- rockchip,rk3576-dwc3
- rockchip,rk3588-dwc3
@@ -37,6 +38,7 @@ properties:
items:
- enum:
- rockchip,rk3328-dwc3
+ - rockchip,rk3562-dwc3
- rockchip,rk3568-dwc3
- rockchip,rk3576-dwc3
- rockchip,rk3588-dwc3
@@ -72,6 +74,7 @@ properties:
- enum:
- grf_clk
- utmi
+ - pipe
- const: pipe
power-domains:
@@ -115,6 +118,22 @@ allOf:
properties:
compatible:
contains:
+ const: rockchip,rk3562-dwc3
+ then:
+ properties:
+ clocks:
+ minItems: 4
+ maxItems: 4
+ clock-names:
+ items:
+ - const: ref_clk
+ - const: suspend_clk
+ - const: bus_clk
+ - const: pipe
+ - if:
+ properties:
+ compatible:
+ contains:
enum:
- rockchip,rk3568-dwc3
- rockchip,rk3576-dwc3
diff --git a/Documentation/devicetree/bindings/usb/samsung,exynos-dwc3.yaml b/Documentation/devicetree/bindings/usb/samsung,exynos-dwc3.yaml
index 2b3430cebe99..256bee2a03ca 100644
--- a/Documentation/devicetree/bindings/usb/samsung,exynos-dwc3.yaml
+++ b/Documentation/devicetree/bindings/usb/samsung,exynos-dwc3.yaml
@@ -11,12 +11,17 @@ maintainers:
properties:
compatible:
- enum:
- - google,gs101-dwusb3
- - samsung,exynos5250-dwusb3
- - samsung,exynos5433-dwusb3
- - samsung,exynos7-dwusb3
- - samsung,exynos850-dwusb3
+ oneOf:
+ - enum:
+ - google,gs101-dwusb3
+ - samsung,exynos5250-dwusb3
+ - samsung,exynos5433-dwusb3
+ - samsung,exynos7-dwusb3
+ - samsung,exynos7870-dwusb3
+ - samsung,exynos850-dwusb3
+ - items:
+ - const: samsung,exynos990-dwusb3
+ - const: samsung,exynos850-dwusb3
'#address-cells':
const: 1
@@ -52,7 +57,6 @@ required:
- clock-names
- ranges
- '#size-cells'
- - vdd10-supply
- vdd33-supply
allOf:
@@ -72,6 +76,8 @@ allOf:
- const: susp_clk
- const: link_aclk
- const: link_pclk
+ required:
+ - vdd10-supply
- if:
properties:
@@ -86,6 +92,8 @@ allOf:
clock-names:
items:
- const: usbdrd30
+ required:
+ - vdd10-supply
- if:
properties:
@@ -103,6 +111,8 @@ allOf:
- const: susp_clk
- const: phyclk
- const: pipe_pclk
+ required:
+ - vdd10-supply
- if:
properties:
@@ -119,6 +129,24 @@ allOf:
- const: usbdrd30
- const: usbdrd30_susp_clk
- const: usbdrd30_axius_clk
+ required:
+ - vdd10-supply
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos7870-dwusb3
+ then:
+ properties:
+ clocks:
+ minItems: 3
+ maxItems: 3
+ clock-names:
+ items:
+ - const: bus_early
+ - const: ref
+ - const: ctrl
- if:
properties:
@@ -134,6 +162,8 @@ allOf:
items:
- const: bus_early
- const: ref
+ required:
+ - vdd10-supply
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/usb/snps,dwc3-common.yaml b/Documentation/devicetree/bindings/usb/snps,dwc3-common.yaml
index c956053fd036..71249b6ba616 100644
--- a/Documentation/devicetree/bindings/usb/snps,dwc3-common.yaml
+++ b/Documentation/devicetree/bindings/usb/snps,dwc3-common.yaml
@@ -65,6 +65,17 @@ properties:
mode.
type: boolean
+ snps,reserved-endpoints:
+ description:
+ Reserve endpoints for other needs, e.g, for tracing control and output.
+ When set, the driver will avoid using them for the regular USB transfers.
+ $ref: /schemas/types.yaml#/definitions/uint8-array
+ minItems: 1
+ maxItems: 30
+ items:
+ minimum: 2
+ maximum: 31
+
snps,dis-start-transfer-quirk:
description:
When set, disable isoc START TRANSFER command failure SW work-around
diff --git a/Documentation/devicetree/bindings/usb/usb-device.yaml b/Documentation/devicetree/bindings/usb/usb-device.yaml
index da890ee60ce6..c67695681033 100644
--- a/Documentation/devicetree/bindings/usb/usb-device.yaml
+++ b/Documentation/devicetree/bindings/usb/usb-device.yaml
@@ -39,8 +39,10 @@ properties:
reg:
description: the number of the USB hub port or the USB host-controller
- port to which this device is attached. The range is 1-255.
- maxItems: 1
+ port to which this device is attached.
+ items:
+ - minimum: 1
+ maximum: 255
"#address-cells":
description: should be 1 for hub nodes with device nodes,
diff --git a/Documentation/driver-api/cxl/maturity-map.rst b/Documentation/driver-api/cxl/maturity-map.rst
index df8e2ac2a320..a2288f9df658 100644
--- a/Documentation/driver-api/cxl/maturity-map.rst
+++ b/Documentation/driver-api/cxl/maturity-map.rst
@@ -130,7 +130,7 @@ Mailbox commands
* [0] Switch CCI
* [3] Timestamp
* [1] PMEM labels
-* [0] PMEM GPF / Dirty Shutdown
+* [3] PMEM GPF / Dirty Shutdown
* [0] Scan Media
PMU
diff --git a/Documentation/driver-api/serial/driver.rst b/Documentation/driver-api/serial/driver.rst
index 84b43061c11b..fa1ebfcd4472 100644
--- a/Documentation/driver-api/serial/driver.rst
+++ b/Documentation/driver-api/serial/driver.rst
@@ -101,6 +101,6 @@ Modem control lines via GPIO
Some helpers are provided in order to set/get modem control lines via GPIO.
.. kernel-doc:: drivers/tty/serial/serial_mctrl_gpio.c
- :identifiers: mctrl_gpio_init mctrl_gpio_free mctrl_gpio_to_gpiod
+ :identifiers: mctrl_gpio_init mctrl_gpio_to_gpiod
mctrl_gpio_set mctrl_gpio_get mctrl_gpio_enable_ms
- mctrl_gpio_disable_ms
+ mctrl_gpio_disable_ms_sync mctrl_gpio_disable_ms_no_sync
diff --git a/Documentation/driver-api/tty/tty_driver.rst b/Documentation/driver-api/tty/tty_driver.rst
index cc529f863406..7138222a70f2 100644
--- a/Documentation/driver-api/tty/tty_driver.rst
+++ b/Documentation/driver-api/tty/tty_driver.rst
@@ -25,6 +25,8 @@ freed.
For reference, both allocation and deallocation functions are explained here in
detail:
+.. kernel-doc:: include/linux/tty_driver.h
+ :identifiers: tty_alloc_driver
.. kernel-doc:: drivers/tty/tty_io.c
:identifiers: __tty_alloc_driver tty_driver_kref_put
@@ -35,7 +37,7 @@ Here comes the documentation of flags accepted by tty_alloc_driver() (or
__tty_alloc_driver()):
.. kernel-doc:: include/linux/tty_driver.h
- :doc: TTY Driver Flags
+ :identifiers: tty_driver_flag
----
diff --git a/Documentation/driver-api/tty/tty_struct.rst b/Documentation/driver-api/tty/tty_struct.rst
index c72f5a4293b2..29caf1c1ca5f 100644
--- a/Documentation/driver-api/tty/tty_struct.rst
+++ b/Documentation/driver-api/tty/tty_struct.rst
@@ -72,7 +72,7 @@ TTY Struct Flags
================
.. kernel-doc:: include/linux/tty.h
- :doc: TTY Struct Flags
+ :identifiers: tty_struct_flags
TTY Struct Reference
====================
diff --git a/Documentation/driver-api/usb/writing_musb_glue_layer.rst b/Documentation/driver-api/usb/writing_musb_glue_layer.rst
index e755c8551bba..0bb96ecdf527 100644
--- a/Documentation/driver-api/usb/writing_musb_glue_layer.rst
+++ b/Documentation/driver-api/usb/writing_musb_glue_layer.rst
@@ -613,7 +613,7 @@ endpoints configuration from the hardware, so we use line 12 instruction
to bypass reading the configuration from silicon, and rely on a
hard-coded table that describes the endpoints configuration instead::
- static struct musb_fifo_cfg jz4740_musb_fifo_cfg[] = {
+ static const struct musb_fifo_cfg jz4740_musb_fifo_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 64, },
diff --git a/Documentation/features/core/mseal_sys_mappings/arch-support.txt b/Documentation/features/core/mseal_sys_mappings/arch-support.txt
new file mode 100644
index 000000000000..c6cab9760d57
--- /dev/null
+++ b/Documentation/features/core/mseal_sys_mappings/arch-support.txt
@@ -0,0 +1,30 @@
+#
+# Feature name: mseal-system-mappings
+# Kconfig: ARCH_SUPPORTS_MSEAL_SYSTEM_MAPPINGS
+# description: arch supports mseal system mappings
+#
+ -----------------------
+ | arch |status|
+ -----------------------
+ | alpha: | TODO |
+ | arc: | N/A |
+ | arm: | N/A |
+ | arm64: | ok |
+ | csky: | N/A |
+ | hexagon: | N/A |
+ | loongarch: | TODO |
+ | m68k: | N/A |
+ | microblaze: | N/A |
+ | mips: | TODO |
+ | nios2: | N/A |
+ | openrisc: | N/A |
+ | parisc: | TODO |
+ | powerpc: | TODO |
+ | riscv: | TODO |
+ | s390: | ok |
+ | sh: | N/A |
+ | sparc: | TODO |
+ | um: | TODO |
+ | x86: | ok |
+ | xtensa: | N/A |
+ -----------------------
diff --git a/Documentation/filesystems/9p.rst b/Documentation/filesystems/9p.rst
index 3078f3c9256a..be3504ca034a 100644
--- a/Documentation/filesystems/9p.rst
+++ b/Documentation/filesystems/9p.rst
@@ -40,7 +40,7 @@ For remote file server::
mount -t 9p 10.10.1.2 /mnt/9
-For Plan 9 From User Space applications (http://swtch.com/plan9)::
+For Plan 9 From User Space applications (https://9fans.github.io/plan9port/)::
mount -t 9p `namespace`/acme /mnt/9 -o trans=unix,uname=$USER
@@ -165,8 +165,8 @@ Options
do not necessarily validate cached values on the server. In other
words changes on the server are not guaranteed to be reflected
on the client system. Only use this mode of operation if you
- have an exclusive mount and the server will modify the filesystem
- underneath you.
+ have an exclusive mount and the server will not modify the
+ filesystem underneath you.
debug=n specifies debug level. The debug level is a bitmask.
diff --git a/Documentation/rust/arch-support.rst b/Documentation/rust/arch-support.rst
index 54be7ddf3e57..6e6a515d0899 100644
--- a/Documentation/rust/arch-support.rst
+++ b/Documentation/rust/arch-support.rst
@@ -15,6 +15,7 @@ support corresponds to ``S`` values in the ``MAINTAINERS`` file.
============= ================ ==============================================
Architecture Level of support Constraints
============= ================ ==============================================
+``arm`` Maintained ARMv7 Little Endian only.
``arm64`` Maintained Little Endian only.
``loongarch`` Maintained \-
``riscv`` Maintained ``riscv64`` and LLVM/Clang only.
diff --git a/Documentation/trace/debugging.rst b/Documentation/trace/debugging.rst
index 54fb16239d70..d54bc500af80 100644
--- a/Documentation/trace/debugging.rst
+++ b/Documentation/trace/debugging.rst
@@ -136,6 +136,8 @@ kernel, so only the same kernel is guaranteed to work if the mapping is
preserved. Switching to a different kernel version may find a different
layout and mark the buffer as invalid.
+NB: Both the mapped address and size must be page aligned for the architecture.
+
Using trace_printk() in the boot instance
-----------------------------------------
By default, the content of trace_printk() goes into the top level tracing
diff --git a/Documentation/usb/CREDITS b/Documentation/usb/CREDITS
index 81ea3eb29e96..ce6450a6ed7c 100644
--- a/Documentation/usb/CREDITS
+++ b/Documentation/usb/CREDITS
@@ -161,7 +161,7 @@ THANKS file in Inaky's driver):
- The people at the linux-usb mailing list, for reading so
many messages :) Ok, no more kidding; for all your advises!
- - All the people at the USB Implementors Forum for their
+ - All the people at the USB Implementers Forum for their
help and assistance.
- Nathan Myers <ncm@cantrip.org>, for his advice! (hope you
diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst
index 3d1cd7ad9d67..7a1409ecc238 100644
--- a/Documentation/userspace-api/ioctl/ioctl-number.rst
+++ b/Documentation/userspace-api/ioctl/ioctl-number.rst
@@ -376,9 +376,9 @@ Code Seq# Include File Comments
0xB8 all uapi/linux/mshv.h Microsoft Hyper-V /dev/mshv driver
<mailto:linux-hyperv@vger.kernel.org>
0xC0 00-0F linux/usb/iowarrior.h
-0xCA 00-0F uapi/misc/cxl.h
+0xCA 00-0F uapi/misc/cxl.h Dead since 6.15
0xCA 10-2F uapi/misc/ocxl.h
-0xCA 80-BF uapi/scsi/cxlflash_ioctl.h Dead since 6.14
+0xCA 80-BF uapi/scsi/cxlflash_ioctl.h Dead since 6.15
0xCB 00-1F CBM serial IEC bus in development:
<mailto:michael.klein@puffin.lb.shuttle.de>
0xCC 00-0F drivers/misc/ibmvmc.h pseries VMC driver
diff --git a/Documentation/userspace-api/mseal.rst b/Documentation/userspace-api/mseal.rst
index 41102f74c5e2..1dabfc29be0d 100644
--- a/Documentation/userspace-api/mseal.rst
+++ b/Documentation/userspace-api/mseal.rst
@@ -130,6 +130,27 @@ Use cases
- Chrome browser: protect some security sensitive data structures.
+- System mappings:
+ The system mappings are created by the kernel and includes vdso, vvar,
+ vvar_vclock, vectors (arm compat-mode), sigpage (arm compat-mode), uprobes.
+
+ Those system mappings are readonly only or execute only, memory sealing can
+ protect them from ever changing to writable or unmmap/remapped as different
+ attributes. This is useful to mitigate memory corruption issues where a
+ corrupted pointer is passed to a memory management system.
+
+ If supported by an architecture (CONFIG_ARCH_SUPPORTS_MSEAL_SYSTEM_MAPPINGS),
+ the CONFIG_MSEAL_SYSTEM_MAPPINGS seals all system mappings of this
+ architecture.
+
+ The following architectures currently support this feature: x86-64, arm64,
+ and s390.
+
+ WARNING: This feature breaks programs which rely on relocating
+ or unmapping system mappings. Known broken software at the time
+ of writing includes CHECKPOINT_RESTORE, UML, gVisor, rr. Therefore
+ this config can't be enabled universally.
+
When not to use mseal
=====================
Applications can apply sealing to any virtual memory region from userspace,
diff --git a/MAINTAINERS b/MAINTAINERS
index a2830693af7a..4c5c2e2c1278 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5544,12 +5544,12 @@ F: Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml
F: sound/soc/codecs/cros_ec_codec.*
CHROMEOS EC CHARGE CONTROL
-M: Thomas Weißschuh <thomas@weissschuh.net>
+M: Thomas Weißschuh <linux@weissschuh.net>
S: Maintained
F: drivers/power/supply/cros_charge-control.c
CHROMEOS EC HARDWARE MONITORING
-M: Thomas Weißschuh <thomas@weissschuh.net>
+M: Thomas Weißschuh <linux@weissschuh.net>
L: chrome-platform@lists.linux.dev
L: linux-hwmon@vger.kernel.org
S: Maintained
@@ -5557,7 +5557,7 @@ F: Documentation/hwmon/cros_ec_hwmon.rst
F: drivers/hwmon/cros_ec_hwmon.c
CHROMEOS EC LED DRIVER
-M: Thomas Weißschuh <thomas@weissschuh.net>
+M: Thomas Weißschuh <linux@weissschuh.net>
S: Maintained
F: drivers/leds/leds-cros_ec.c
@@ -5967,9 +5967,10 @@ S: Maintained
F: Documentation/security/snp-tdx-threat-model.rst
CONFIGFS
-M: Joel Becker <jlbec@evilplan.org>
+M: Andreas Hindborg <a.hindborg@kernel.org>
+R: Breno Leitao <leitao@debian.org>
S: Supported
-T: git git://git.infradead.org/users/hch/configfs.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/a.hindborg/linux.git configfs-next
F: fs/configfs/
F: include/linux/configfs.h
F: samples/configfs/
@@ -6435,18 +6436,6 @@ S: Maintained
W: http://www.chelsio.com
F: drivers/net/ethernet/chelsio/cxgb4vf/
-CXL (IBM Coherent Accelerator Processor Interface CAPI) DRIVER
-M: Frederic Barrat <fbarrat@linux.ibm.com>
-M: Andrew Donnellan <ajd@linux.ibm.com>
-L: linuxppc-dev@lists.ozlabs.org
-S: Obsolete
-F: Documentation/ABI/obsolete/sysfs-class-cxl
-F: Documentation/arch/powerpc/cxl.rst
-F: arch/powerpc/platforms/powernv/pci-cxl.c
-F: drivers/misc/cxl/
-F: include/misc/cxl*
-F: include/uapi/misc/cxl.h
-
CYBERPRO FB DRIVER
M: Russell King <linux@armlinux.org.uk>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -9118,7 +9107,7 @@ F: include/linux/iomap.h
FILESYSTEMS [NETFS LIBRARY]
M: David Howells <dhowells@redhat.com>
-R: Jeff Layton <jlayton@kernel.org>
+M: Paulo Alcantara <pc@manguebit.com>
L: netfs@lists.linux.dev
L: linux-fsdevel@vger.kernel.org
S: Supported
@@ -10032,7 +10021,7 @@ F: Documentation/hwmon/gigabyte_waterforce.rst
F: drivers/hwmon/gigabyte_waterforce.c
GIGABYTE WMI DRIVER
-M: Thomas Weißschuh <thomas@weissschuh.net>
+M: Thomas Weißschuh <linux@weissschuh.net>
L: platform-driver-x86@vger.kernel.org
S: Maintained
F: drivers/platform/x86/gigabyte-wmi.c
@@ -15498,6 +15487,45 @@ F: tools/mm/
F: tools/testing/selftests/mm/
N: include/linux/page[-_]*
+MEMORY MANAGEMENT - EXECMEM
+M: Andrew Morton <akpm@linux-foundation.org>
+M: Mike Rapoport <rppt@kernel.org>
+L: linux-mm@kvack.org
+S: Maintained
+F: include/linux/execmem.h
+F: mm/execmem.c
+
+MEMORY MANAGEMENT - NUMA MEMBLOCKS AND NUMA EMULATION
+M: Andrew Morton <akpm@linux-foundation.org>
+M: Mike Rapoport <rppt@kernel.org>
+L: linux-mm@kvack.org
+S: Maintained
+F: include/linux/numa_memblks.h
+F: mm/numa.c
+F: mm/numa_emulation.c
+F: mm/numa_memblks.c
+
+MEMORY MANAGEMENT - SECRETMEM
+M: Andrew Morton <akpm@linux-foundation.org>
+M: Mike Rapoport <rppt@kernel.org>
+L: linux-mm@kvack.org
+S: Maintained
+F: include/linux/secretmem.h
+F: mm/secretmem.c
+
+MEMORY MANAGEMENT - USERFAULTFD
+M: Andrew Morton <akpm@linux-foundation.org>
+R: Peter Xu <peterx@redhat.com>
+L: linux-mm@kvack.org
+S: Maintained
+F: Documentation/admin-guide/mm/userfaultfd.rst
+F: fs/userfaultfd.c
+F: include/asm-generic/pgtable_uffd.h
+F: include/linux/userfaultfd_k.h
+F: include/uapi/linux/userfaultfd.h
+F: mm/userfaultfd.c
+F: tools/testing/selftests/mm/uffd-*.[ch]
+
MEMORY MAPPING
M: Andrew Morton <akpm@linux-foundation.org>
M: Liam R. Howlett <Liam.Howlett@oracle.com>
@@ -16561,6 +16589,12 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux.git nand/next
F: drivers/mtd/nand/
F: include/linux/mtd/*nand*.h
+NATIONAL INSTRUMENTS SERIAL DRIVER
+M: Chaitanya Vadrevu <chaitanya.vadrevu@emerson.com>
+L: linux-serial@vger.kernel.org
+S: Maintained
+F: drivers/tty/serial/8250/8250_ni.c
+
NATIVE INSTRUMENTS USB SOUND INTERFACE DRIVER
M: Daniel Mack <zonque@gmail.com>
L: linux-sound@vger.kernel.org
@@ -19691,12 +19725,12 @@ F: fs/qnx6/
F: include/linux/qnx6_fs.h
QORIQ DPAA2 FSL-MC BUS DRIVER
-M: Stuart Yoder <stuyoder@gmail.com>
-M: Laurentiu Tudor <laurentiu.tudor@nxp.com>
+M: Ioana Ciornei <ioana.ciornei@nxp.com>
+L: linuxppc-dev@lists.ozlabs.org
L: linux-kernel@vger.kernel.org
S: Maintained
F: Documentation/ABI/stable/sysfs-bus-fsl-mc
-F: Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt
+F: Documentation/devicetree/bindings/misc/fsl,qoriq-mc.yaml
F: Documentation/networking/device_drivers/ethernet/freescale/dpaa2/overview.rst
F: drivers/bus/fsl-mc/
F: include/uapi/linux/fsl_mc.h
@@ -24030,7 +24064,7 @@ F: drivers/thunderbolt/dma_test.c
THUNDERBOLT DRIVER
M: Andreas Noever <andreas.noever@gmail.com>
M: Michael Jamet <michael.jamet@intel.com>
-M: Mika Westerberg <mika.westerberg@linux.intel.com>
+M: Mika Westerberg <westeri@kernel.org>
M: Yehezkel Bernat <YehezkelShB@gmail.com>
L: linux-usb@vger.kernel.org
S: Maintained
@@ -24041,7 +24075,7 @@ F: include/linux/thunderbolt.h
THUNDERBOLT NETWORK DRIVER
M: Michael Jamet <michael.jamet@intel.com>
-M: Mika Westerberg <mika.westerberg@linux.intel.com>
+M: Mika Westerberg <westeri@kernel.org>
M: Yehezkel Bernat <YehezkelShB@gmail.com>
L: netdev@vger.kernel.org
S: Maintained
diff --git a/arch/alpha/kernel/srmcons.c b/arch/alpha/kernel/srmcons.c
index 3e61073f4b30..b9cd364e814e 100644
--- a/arch/alpha/kernel/srmcons.c
+++ b/arch/alpha/kernel/srmcons.c
@@ -196,40 +196,44 @@ static const struct tty_operations srmcons_ops = {
static int __init
srmcons_init(void)
{
+ struct tty_driver *driver;
+ int err;
+
timer_setup(&srmcons_singleton.timer, srmcons_receive_chars, 0);
- if (srm_is_registered_console) {
- struct tty_driver *driver;
- int err;
-
- driver = tty_alloc_driver(MAX_SRM_CONSOLE_DEVICES, 0);
- if (IS_ERR(driver))
- return PTR_ERR(driver);
-
- tty_port_init(&srmcons_singleton.port);
-
- driver->driver_name = "srm";
- driver->name = "srm";
- driver->major = 0; /* dynamic */
- driver->minor_start = 0;
- driver->type = TTY_DRIVER_TYPE_SYSTEM;
- driver->subtype = SYSTEM_TYPE_SYSCONS;
- driver->init_termios = tty_std_termios;
- tty_set_operations(driver, &srmcons_ops);
- tty_port_link_device(&srmcons_singleton.port, driver, 0);
- err = tty_register_driver(driver);
- if (err) {
- tty_driver_kref_put(driver);
- tty_port_destroy(&srmcons_singleton.port);
- return err;
- }
- srmcons_driver = driver;
- }
- return -ENODEV;
+ if (!srm_is_registered_console)
+ return -ENODEV;
+
+ driver = tty_alloc_driver(MAX_SRM_CONSOLE_DEVICES, 0);
+ if (IS_ERR(driver))
+ return PTR_ERR(driver);
+
+ tty_port_init(&srmcons_singleton.port);
+
+ driver->driver_name = "srm";
+ driver->name = "srm";
+ driver->major = 0; /* dynamic */
+ driver->minor_start = 0;
+ driver->type = TTY_DRIVER_TYPE_SYSTEM;
+ driver->subtype = SYSTEM_TYPE_SYSCONS;
+ driver->init_termios = tty_std_termios;
+ tty_set_operations(driver, &srmcons_ops);
+ tty_port_link_device(&srmcons_singleton.port, driver, 0);
+ err = tty_register_driver(driver);
+ if (err)
+ goto err_free_drv;
+
+ srmcons_driver = driver;
+
+ return 0;
+err_free_drv:
+ tty_driver_kref_put(driver);
+ tty_port_destroy(&srmcons_singleton.port);
+
+ return err;
}
device_initcall(srmcons_init);
-
/*
* The console driver
*/
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 835b5f100e92..25ed6f1a7c7a 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -121,7 +121,7 @@ config ARM
select HAVE_KERNEL_XZ
select HAVE_KPROBES if !XIP_KERNEL && !CPU_ENDIAN_BE32 && !CPU_V7M
select HAVE_KRETPROBES if HAVE_KPROBES
- select HAVE_LD_DEAD_CODE_DATA_ELIMINATION if (LD_VERSION >= 23600 || LD_IS_LLD)
+ select HAVE_LD_DEAD_CODE_DATA_ELIMINATION if (LD_VERSION >= 23600 || LD_CAN_USE_KEEP_IN_OVERLAY)
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
select HAVE_OPTPROBES if !THUMB2_KERNEL
@@ -133,6 +133,7 @@ config ARM
select MMU_GATHER_RCU_TABLE_FREE if SMP && ARM_LPAE
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RSEQ
+ select HAVE_RUST if CPU_LITTLE_ENDIAN && CPU_32v7
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UID16
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 00ca7886b18e..4808d3ed98e4 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -150,6 +150,7 @@ endif
KBUILD_CPPFLAGS +=$(cpp-y)
KBUILD_CFLAGS +=$(CFLAGS_ABI) $(CFLAGS_ISA) $(arch-y) $(tune-y) $(call cc-option,-mshort-load-bytes,$(call cc-option,-malignment-traps,)) -msoft-float -Uarm
KBUILD_AFLAGS +=$(CFLAGS_ABI) $(AFLAGS_ISA) -Wa,$(arch-y) $(tune-y) -include asm/unified.h -msoft-float
+KBUILD_RUSTFLAGS += --target=arm-unknown-linux-gnueabi
CHECKFLAGS += -D__arm__
diff --git a/arch/arm/include/asm/vmlinux.lds.h b/arch/arm/include/asm/vmlinux.lds.h
index d60f6e83a9f7..0341973e30e1 100644
--- a/arch/arm/include/asm/vmlinux.lds.h
+++ b/arch/arm/include/asm/vmlinux.lds.h
@@ -19,7 +19,7 @@
#endif
#ifdef CONFIG_MMU
-#define ARM_MMU_KEEP(x) x
+#define ARM_MMU_KEEP(x) KEEP(x)
#define ARM_MMU_DISCARD(x)
#else
#define ARM_MMU_KEEP(x)
@@ -34,6 +34,12 @@
#define NOCROSSREFS
#endif
+#ifdef CONFIG_LD_CAN_USE_KEEP_IN_OVERLAY
+#define OVERLAY_KEEP(x) KEEP(x)
+#else
+#define OVERLAY_KEEP(x) x
+#endif
+
/* Set start/end symbol names to the LMA for the section */
#define ARM_LMA(sym, section) \
sym##_start = LOADADDR(section); \
@@ -125,13 +131,13 @@
__vectors_lma = .; \
OVERLAY 0xffff0000 : NOCROSSREFS AT(__vectors_lma) { \
.vectors { \
- *(.vectors) \
+ OVERLAY_KEEP(*(.vectors)) \
} \
.vectors.bhb.loop8 { \
- *(.vectors.bhb.loop8) \
+ OVERLAY_KEEP(*(.vectors.bhb.loop8)) \
} \
.vectors.bhb.bpiall { \
- *(.vectors.bhb.bpiall) \
+ OVERLAY_KEEP(*(.vectors.bhb.bpiall)) \
} \
} \
ARM_LMA(__vectors, .vectors); \
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 3431c0553f45..50999886a8b5 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -551,7 +551,8 @@ void show_ipi_list(struct seq_file *p, int prec)
if (!ipi_desc[i])
continue;
- seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
+ seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
+ prec >= 4 ? " " : "");
for_each_online_cpu(cpu)
seq_printf(p, "%10u ", irq_desc_kstat_cpu(ipi_desc[i], cpu));
diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S
index 5eddb75a7174..f2e8d4fac068 100644
--- a/arch/arm/kernel/vmlinux-xip.lds.S
+++ b/arch/arm/kernel/vmlinux-xip.lds.S
@@ -63,7 +63,7 @@ SECTIONS
. = ALIGN(4);
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
__start___ex_table = .;
- ARM_MMU_KEEP(KEEP(*(__ex_table)))
+ ARM_MMU_KEEP(*(__ex_table))
__stop___ex_table = .;
}
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index de373c6c2ae8..d592a203f9c6 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -74,7 +74,7 @@ SECTIONS
. = ALIGN(4);
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
__start___ex_table = .;
- ARM_MMU_KEEP(KEEP(*(__ex_table)))
+ ARM_MMU_KEEP(*(__ex_table))
__stop___ex_table = .;
}
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 748c34dc953c..a182295e6f08 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -38,6 +38,7 @@ config ARM64
select ARCH_HAS_KEEPINITRD
select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_MEM_ENCRYPT
+ select ARCH_SUPPORTS_MSEAL_SYSTEM_MAPPINGS
select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
select ARCH_HAS_NONLEAF_PMD_YOUNG if ARM64_HAFT
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index c607e0bf5e0b..d1cc0571798b 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -132,6 +132,7 @@
#define FUJITSU_CPU_PART_A64FX 0x001
#define HISI_CPU_PART_TSV110 0xD01
+#define HISI_CPU_PART_HIP09 0xD02
#define APPLE_CPU_PART_M1_ICESTORM 0x022
#define APPLE_CPU_PART_M1_FIRESTORM 0x023
@@ -218,6 +219,7 @@
#define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL)
#define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX)
#define MIDR_HISI_TSV110 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_TSV110)
+#define MIDR_HISI_HIP09 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_HIP09)
#define MIDR_APPLE_M1_ICESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM)
#define MIDR_APPLE_M1_FIRESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM)
#define MIDR_APPLE_M1_ICESTORM_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM_PRO)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 84f05f781a70..d3b538be1500 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -633,11 +633,6 @@ static inline pud_t pud_mkhuge(pud_t pud)
#define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT)
#define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
-#ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
-#define pud_special(pte) pte_special(pud_pte(pud))
-#define pud_mkspecial(pte) pte_pud(pte_mkspecial(pud_pte(pud)))
-#endif
-
#define pmd_pgprot pmd_pgprot
static inline pgprot_t pmd_pgprot(pmd_t pmd)
{
diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h
index d780d1bd2eac..82cf1f879c61 100644
--- a/arch/arm64/include/asm/traps.h
+++ b/arch/arm64/include/asm/traps.h
@@ -109,10 +109,9 @@ static inline void arm64_mops_reset_regs(struct user_pt_regs *regs, unsigned lon
int dstreg = ESR_ELx_MOPS_ISS_DESTREG(esr);
int srcreg = ESR_ELx_MOPS_ISS_SRCREG(esr);
int sizereg = ESR_ELx_MOPS_ISS_SIZEREG(esr);
- unsigned long dst, src, size;
+ unsigned long dst, size;
dst = regs->regs[dstreg];
- src = regs->regs[srcreg];
size = regs->regs[sizereg];
/*
@@ -129,6 +128,7 @@ static inline void arm64_mops_reset_regs(struct user_pt_regs *regs, unsigned lon
}
} else {
/* CPY* instruction */
+ unsigned long src = regs->regs[srcreg];
if (!(option_a ^ wrong_option)) {
/* Format is from Option B */
if (regs->pstate & PSR_N_BIT) {
diff --git a/arch/arm64/kernel/compat_alignment.c b/arch/arm64/kernel/compat_alignment.c
index deff21bfa680..b68e1d328d4c 100644
--- a/arch/arm64/kernel/compat_alignment.c
+++ b/arch/arm64/kernel/compat_alignment.c
@@ -368,6 +368,8 @@ int do_compat_alignment_fixup(unsigned long addr, struct pt_regs *regs)
return 1;
}
+ if (!handler)
+ return 1;
type = handler(addr, instr, regs);
if (type == TYPE_ERROR || type == TYPE_FAULT)
diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c
index d5d11fd11549..b198dde79e59 100644
--- a/arch/arm64/kernel/proton-pack.c
+++ b/arch/arm64/kernel/proton-pack.c
@@ -901,6 +901,7 @@ static u8 spectre_bhb_loop_affected(void)
MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_GOLD),
+ MIDR_ALL_VERSIONS(MIDR_HISI_HIP09),
{},
};
static const struct midr_range spectre_bhb_k11_list[] = {
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 887ac0b05961..78ddf6bdecad 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -130,7 +130,8 @@ static int __setup_additional_pages(enum vdso_abi abi,
mm->context.vdso = (void *)vdso_base;
ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
VM_READ|VM_EXEC|gp_flags|
- VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
+ VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
+ VM_SEALED_SYSMAP,
vdso_info[abi].cm);
if (IS_ERR(ret))
goto up_fail;
@@ -256,7 +257,8 @@ static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
*/
ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE,
VM_READ | VM_EXEC |
- VM_MAYREAD | VM_MAYEXEC,
+ VM_MAYREAD | VM_MAYEXEC |
+ VM_SEALED_SYSMAP,
&aarch32_vdso_maps[AA32_MAP_VECTORS]);
return PTR_ERR_OR_ZERO(ret);
@@ -279,7 +281,8 @@ static int aarch32_sigreturn_setup(struct mm_struct *mm)
*/
ret = _install_special_mapping(mm, addr, PAGE_SIZE,
VM_READ | VM_EXEC | VM_MAYREAD |
- VM_MAYWRITE | VM_MAYEXEC,
+ VM_MAYWRITE | VM_MAYEXEC |
+ VM_SEALED_SYSMAP,
&aarch32_vdso_maps[AA32_MAP_SIGPAGE]);
if (IS_ERR(ret))
goto out;
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index b98f89420713..ea6695d53fb9 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -1361,7 +1361,8 @@ int arch_add_memory(int nid, u64 start, u64 size,
__remove_pgd_mapping(swapper_pg_dir,
__phys_to_virt(start), size);
else {
- max_pfn = PFN_UP(start + size);
+ /* Address of hotplugged memory can be smaller */
+ max_pfn = max(max_pfn, PFN_UP(start + size));
max_low_pfn = max_pfn;
}
diff --git a/arch/csky/include/asm/pgalloc.h b/arch/csky/include/asm/pgalloc.h
index bf8400c28b5a..11055c574968 100644
--- a/arch/csky/include/asm/pgalloc.h
+++ b/arch/csky/include/asm/pgalloc.h
@@ -61,11 +61,8 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
return ret;
}
-#define __pte_free_tlb(tlb, pte, address) \
-do { \
- pagetable_dtor(page_ptdesc(pte)); \
- tlb_remove_page_ptdesc(tlb, page_ptdesc(pte)); \
-} while (0)
+#define __pte_free_tlb(tlb, pte, address) \
+ tlb_remove_ptdesc((tlb), page_ptdesc(pte))
extern void pagetable_init(void);
extern void mmu_init(unsigned long min_pfn, unsigned long max_pfn);
diff --git a/arch/hexagon/include/asm/pgalloc.h b/arch/hexagon/include/asm/pgalloc.h
index 1ee5f5f157ca..937a11ef4c33 100644
--- a/arch/hexagon/include/asm/pgalloc.h
+++ b/arch/hexagon/include/asm/pgalloc.h
@@ -87,10 +87,7 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
max_kernel_seg = pmdindex;
}
-#define __pte_free_tlb(tlb, pte, addr) \
-do { \
- pagetable_dtor((page_ptdesc(pte))); \
- tlb_remove_page_ptdesc((tlb), (page_ptdesc(pte))); \
-} while (0)
+#define __pte_free_tlb(tlb, pte, addr) \
+ tlb_remove_ptdesc((tlb), page_ptdesc(pte))
#endif
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index 687502917ae2..067c0b994648 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -30,6 +30,7 @@ config LOONGARCH
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_SET_DIRECT_MAP
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+ select ARCH_HAS_UBSAN
select ARCH_HAS_VDSO_ARCH_DATA
select ARCH_INLINE_READ_LOCK if !PREEMPTION
select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION
@@ -177,7 +178,7 @@ config LOONGARCH
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_TIF_NOHZ
- select HAVE_VIRT_CPU_ACCOUNTING_GEN if !SMP
+ select HAVE_VIRT_CPU_ACCOUNTING_GEN
select IRQ_FORCED_THREADING
select IRQ_LOONGARCH_CPU
select LOCK_MM_AND_FIND_VMA
@@ -387,8 +388,8 @@ config CMDLINE_BOOTLOADER
config CMDLINE_EXTEND
bool "Use built-in to extend bootloader kernel arguments"
help
- The command-line arguments provided during boot will be
- appended to the built-in command line. This is useful in
+ The built-in command line will be appended to the command-
+ line arguments provided during boot. This is useful in
cases where the provided arguments are insufficient and
you don't want to or cannot modify them.
diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig
index c9f564e1d4d9..90f21dfe22b1 100644
--- a/arch/loongarch/configs/loongson3_defconfig
+++ b/arch/loongarch/configs/loongson3_defconfig
@@ -24,9 +24,9 @@ CONFIG_NUMA_BALANCING=y
CONFIG_MEMCG=y
CONFIG_BLK_CGROUP=y
CONFIG_CFS_BANDWIDTH=y
-CONFIG_RT_GROUP_SCHED=y
CONFIG_CGROUP_PIDS=y
CONFIG_CGROUP_RDMA=y
+CONFIG_CGROUP_DMEM=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_HUGETLB=y
CONFIG_CPUSETS=y
@@ -665,6 +665,10 @@ CONFIG_RTW88_8723DE=m
CONFIG_RTW88_8723DU=m
CONFIG_RTW88_8821CE=m
CONFIG_RTW88_8821CU=m
+CONFIG_RTW88_8821AU=m
+CONFIG_RTW88_8812AU=m
+CONFIG_RTW88_8814AE=m
+CONFIG_RTW88_8814AU=m
CONFIG_RTW89=m
CONFIG_RTW89_8851BE=m
CONFIG_RTW89_8852AE=m
@@ -748,6 +752,7 @@ CONFIG_MEDIA_PCI_SUPPORT=y
CONFIG_VIDEO_BT848=m
CONFIG_DVB_BT8XX=m
CONFIG_DRM=y
+CONFIG_DRM_LOAD_EDID_FIRMWARE=y
CONFIG_DRM_RADEON=m
CONFIG_DRM_RADEON_USERPTR=y
CONFIG_DRM_AMDGPU=m
@@ -761,6 +766,7 @@ CONFIG_DRM_LOONGSON=y
CONFIG_FB=y
CONFIG_FB_EFI=y
CONFIG_FB_RADEON=y
+CONFIG_FIRMWARE_EDID=y
CONFIG_LCD_CLASS_DEVICE=y
CONFIG_LCD_PLATFORM=m
# CONFIG_VGA_CONSOLE is not set
@@ -843,6 +849,9 @@ CONFIG_TYPEC_TCPCI=m
CONFIG_TYPEC_UCSI=m
CONFIG_UCSI_ACPI=m
CONFIG_INFINIBAND=m
+CONFIG_EDAC=y
+# CONFIG_EDAC_LEGACY_SYSFS is not set
+CONFIG_EDAC_LOONGSON=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_EFI=y
CONFIG_RTC_DRV_LOONGSON=y
diff --git a/arch/loongarch/include/asm/cache.h b/arch/loongarch/include/asm/cache.h
index 1b6d09617199..aa622c754414 100644
--- a/arch/loongarch/include/asm/cache.h
+++ b/arch/loongarch/include/asm/cache.h
@@ -8,6 +8,8 @@
#define L1_CACHE_SHIFT CONFIG_L1_CACHE_SHIFT
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+#define ARCH_DMA_MINALIGN (16)
+
#define __read_mostly __section(".data..read_mostly")
#endif /* _ASM_CACHE_H */
diff --git a/arch/loongarch/include/asm/irq.h b/arch/loongarch/include/asm/irq.h
index a0ca84da8541..12bd15578c33 100644
--- a/arch/loongarch/include/asm/irq.h
+++ b/arch/loongarch/include/asm/irq.h
@@ -53,7 +53,7 @@ void spurious_interrupt(void);
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
void arch_trigger_cpumask_backtrace(const struct cpumask *mask, int exclude_cpu);
-#define MAX_IO_PICS 2
+#define MAX_IO_PICS 8
#define NR_IRQS (64 + NR_VECTORS * (NR_CPUS + MAX_IO_PICS))
struct acpi_vector_group {
diff --git a/arch/loongarch/include/asm/pgalloc.h b/arch/loongarch/include/asm/pgalloc.h
index 7211dff8c969..b58f587f0f0a 100644
--- a/arch/loongarch/include/asm/pgalloc.h
+++ b/arch/loongarch/include/asm/pgalloc.h
@@ -55,11 +55,8 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
return pte;
}
-#define __pte_free_tlb(tlb, pte, address) \
-do { \
- pagetable_dtor(page_ptdesc(pte)); \
- tlb_remove_page_ptdesc((tlb), page_ptdesc(pte)); \
-} while (0)
+#define __pte_free_tlb(tlb, pte, address) \
+ tlb_remove_ptdesc((tlb), page_ptdesc(pte))
#ifndef __PAGETABLE_PMD_FOLDED
diff --git a/arch/loongarch/include/asm/stacktrace.h b/arch/loongarch/include/asm/stacktrace.h
index f23adb15f418..fc8b64773794 100644
--- a/arch/loongarch/include/asm/stacktrace.h
+++ b/arch/loongarch/include/asm/stacktrace.h
@@ -8,6 +8,7 @@
#include <asm/asm.h>
#include <asm/ptrace.h>
#include <asm/loongarch.h>
+#include <asm/unwind_hints.h>
#include <linux/stringify.h>
enum stack_type {
@@ -43,6 +44,7 @@ int get_stack_info(unsigned long stack, struct task_struct *task, struct stack_i
static __always_inline void prepare_frametrace(struct pt_regs *regs)
{
__asm__ __volatile__(
+ UNWIND_HINT_SAVE
/* Save $ra */
STORE_ONE_REG(1)
/* Use $ra to save PC */
@@ -80,6 +82,7 @@ static __always_inline void prepare_frametrace(struct pt_regs *regs)
STORE_ONE_REG(29)
STORE_ONE_REG(30)
STORE_ONE_REG(31)
+ UNWIND_HINT_RESTORE
: "=m" (regs->csr_era)
: "r" (regs->regs)
: "memory");
diff --git a/arch/loongarch/include/asm/unwind_hints.h b/arch/loongarch/include/asm/unwind_hints.h
index a01086ad9dde..2c68bc72736c 100644
--- a/arch/loongarch/include/asm/unwind_hints.h
+++ b/arch/loongarch/include/asm/unwind_hints.h
@@ -23,6 +23,14 @@
UNWIND_HINT sp_reg=ORC_REG_SP type=UNWIND_HINT_TYPE_CALL
.endm
-#endif /* __ASSEMBLY__ */
+#else /* !__ASSEMBLY__ */
+
+#define UNWIND_HINT_SAVE \
+ UNWIND_HINT(UNWIND_HINT_TYPE_SAVE, 0, 0, 0)
+
+#define UNWIND_HINT_RESTORE \
+ UNWIND_HINT(UNWIND_HINT_TYPE_RESTORE, 0, 0, 0)
+
+#endif /* !__ASSEMBLY__ */
#endif /* _ASM_LOONGARCH_UNWIND_HINTS_H */
diff --git a/arch/loongarch/kernel/env.c b/arch/loongarch/kernel/env.c
index 2f1f5b08638f..27144de5c5fe 100644
--- a/arch/loongarch/kernel/env.c
+++ b/arch/loongarch/kernel/env.c
@@ -68,6 +68,8 @@ static int __init fdt_cpu_clk_init(void)
return -ENODEV;
clk = of_clk_get(np, 0);
+ of_node_put(np);
+
if (IS_ERR(clk))
return -ENODEV;
diff --git a/arch/loongarch/kernel/kgdb.c b/arch/loongarch/kernel/kgdb.c
index 445c452d72a7..7be5b4c0c900 100644
--- a/arch/loongarch/kernel/kgdb.c
+++ b/arch/loongarch/kernel/kgdb.c
@@ -8,6 +8,7 @@
#include <linux/hw_breakpoint.h>
#include <linux/kdebug.h>
#include <linux/kgdb.h>
+#include <linux/objtool.h>
#include <linux/processor.h>
#include <linux/ptrace.h>
#include <linux/sched.h>
@@ -224,13 +225,13 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
regs->csr_era = pc;
}
-void arch_kgdb_breakpoint(void)
+noinline void arch_kgdb_breakpoint(void)
{
__asm__ __volatile__ ( \
".globl kgdb_breakinst\n\t" \
- "nop\n" \
"kgdb_breakinst:\tbreak 2\n\t"); /* BRK_KDB = 2 */
}
+STACK_FRAME_NON_STANDARD(arch_kgdb_breakpoint);
/*
* Calls linux_debug_hook before the kernel dies. If KGDB is enabled,
diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c
index ea357a3edc09..fa1500d4aa3e 100644
--- a/arch/loongarch/net/bpf_jit.c
+++ b/arch/loongarch/net/bpf_jit.c
@@ -142,6 +142,8 @@ static void build_prologue(struct jit_ctx *ctx)
*/
if (seen_tail_call(ctx) && seen_call(ctx))
move_reg(ctx, TCC_SAVED, REG_TCC);
+ else
+ emit_insn(ctx, nop);
ctx->stack_size = stack_adjust;
}
@@ -905,7 +907,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
move_addr(ctx, t1, func_addr);
emit_insn(ctx, jirl, LOONGARCH_GPR_RA, t1, 0);
- move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0);
+
+ if (insn->src_reg != BPF_PSEUDO_CALL)
+ move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0);
+
break;
/* tail call */
@@ -930,7 +935,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
{
const u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm;
- move_imm(ctx, dst, imm64, is32);
+ if (bpf_pseudo_func(insn))
+ move_addr(ctx, dst, imm64);
+ else
+ move_imm(ctx, dst, imm64, is32);
return 1;
}
diff --git a/arch/loongarch/net/bpf_jit.h b/arch/loongarch/net/bpf_jit.h
index 68586338ecf8..f9c569f53949 100644
--- a/arch/loongarch/net/bpf_jit.h
+++ b/arch/loongarch/net/bpf_jit.h
@@ -27,6 +27,11 @@ struct jit_data {
struct jit_ctx ctx;
};
+static inline void emit_nop(union loongarch_instruction *insn)
+{
+ insn->word = INSN_NOP;
+}
+
#define emit_insn(ctx, func, ...) \
do { \
if (ctx->image != NULL) { \
diff --git a/arch/loongarch/vdso/Makefile b/arch/loongarch/vdso/Makefile
index 1c26147aff70..ccd2c5e135c6 100644
--- a/arch/loongarch/vdso/Makefile
+++ b/arch/loongarch/vdso/Makefile
@@ -36,8 +36,7 @@ endif
# VDSO linker flags.
ldflags-y := -Bsymbolic --no-undefined -soname=linux-vdso.so.1 \
- $(filter -E%,$(KBUILD_CFLAGS)) -nostdlib -shared \
- --hash-style=sysv --build-id -T
+ $(filter -E%,$(KBUILD_CFLAGS)) -nostdlib -shared --build-id -T
#
# Shared build commands.
diff --git a/arch/loongarch/vdso/vgetrandom-chacha.S b/arch/loongarch/vdso/vgetrandom-chacha.S
index c2733e6c3a8d..c4dd2bab8825 100644
--- a/arch/loongarch/vdso/vgetrandom-chacha.S
+++ b/arch/loongarch/vdso/vgetrandom-chacha.S
@@ -58,9 +58,7 @@ SYM_FUNC_START(__arch_chacha20_blocks_nostack)
#define copy0 t5
#define copy1 t6
#define copy2 t7
-
-/* Reuse i as copy3 */
-#define copy3 i
+#define copy3 t8
/* Packs to be used with OP_4REG */
#define line0 state0, state1, state2, state3
@@ -99,6 +97,7 @@ SYM_FUNC_START(__arch_chacha20_blocks_nostack)
li.w copy0, 0x61707865
li.w copy1, 0x3320646e
li.w copy2, 0x79622d32
+ li.w copy3, 0x6b206574
ld.w cnt_lo, counter, 0
ld.w cnt_hi, counter, 4
@@ -108,7 +107,7 @@ SYM_FUNC_START(__arch_chacha20_blocks_nostack)
move state0, copy0
move state1, copy1
move state2, copy2
- li.w state3, 0x6b206574
+ move state3, copy3
/* state[4,5,..,11] = key */
ld.w state4, key, 0
@@ -167,12 +166,6 @@ SYM_FUNC_START(__arch_chacha20_blocks_nostack)
addi.w i, i, -1
bnez i, .Lpermute
- /*
- * copy[3] = "expa", materialize it here because copy[3] shares the
- * same register with i which just became dead.
- */
- li.w copy3, 0x6b206574
-
/* output[0,1,2,3] = copy[0,1,2,3] + state[0,1,2,3] */
OP_4REG add.w line0, copy
st.w state0, output, 0
diff --git a/arch/m68k/include/asm/sun3_pgalloc.h b/arch/m68k/include/asm/sun3_pgalloc.h
index 80afc3a18724..1e21c758b774 100644
--- a/arch/m68k/include/asm/sun3_pgalloc.h
+++ b/arch/m68k/include/asm/sun3_pgalloc.h
@@ -17,11 +17,8 @@
extern const char bad_pmd_string[];
-#define __pte_free_tlb(tlb, pte, addr) \
-do { \
- pagetable_dtor(page_ptdesc(pte)); \
- tlb_remove_page_ptdesc((tlb), page_ptdesc(pte)); \
-} while (0)
+#define __pte_free_tlb(tlb, pte, addr) \
+ tlb_remove_ptdesc((tlb), page_ptdesc(pte))
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
{
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index 65f0d1fb8a2a..31d475cdb1c5 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -118,7 +118,7 @@ int page_is_ram(unsigned long pfn)
/*
* Check for command-line options that affect what MMU_init will do.
*/
-static void mm_cmdline_setup(void)
+static void __init mm_cmdline_setup(void)
{
unsigned long maxmem = 0;
char *p = cmd_line;
diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
index 26c7a6ede983..bbca420c96d3 100644
--- a/arch/mips/include/asm/pgalloc.h
+++ b/arch/mips/include/asm/pgalloc.h
@@ -48,11 +48,8 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
extern void pgd_init(void *addr);
extern pgd_t *pgd_alloc(struct mm_struct *mm);
-#define __pte_free_tlb(tlb, pte, address) \
-do { \
- pagetable_dtor(page_ptdesc(pte)); \
- tlb_remove_page_ptdesc((tlb), page_ptdesc(pte)); \
-} while (0)
+#define __pte_free_tlb(tlb, pte, address) \
+ tlb_remove_ptdesc((tlb), page_ptdesc(pte))
#ifndef __PAGETABLE_PMD_FOLDED
diff --git a/arch/nios2/include/asm/pgalloc.h b/arch/nios2/include/asm/pgalloc.h
index 12a536b7bfbd..db122b093a8b 100644
--- a/arch/nios2/include/asm/pgalloc.h
+++ b/arch/nios2/include/asm/pgalloc.h
@@ -28,10 +28,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
extern pgd_t *pgd_alloc(struct mm_struct *mm);
-#define __pte_free_tlb(tlb, pte, addr) \
- do { \
- pagetable_dtor(page_ptdesc(pte)); \
- tlb_remove_page_ptdesc((tlb), (page_ptdesc(pte))); \
- } while (0)
+#define __pte_free_tlb(tlb, pte, addr) \
+ tlb_remove_ptdesc((tlb), page_ptdesc(pte))
#endif /* _ASM_NIOS2_PGALLOC_H */
diff --git a/arch/openrisc/include/asm/pgalloc.h b/arch/openrisc/include/asm/pgalloc.h
index 3372f4e6ab4b..3f110931d8f6 100644
--- a/arch/openrisc/include/asm/pgalloc.h
+++ b/arch/openrisc/include/asm/pgalloc.h
@@ -64,10 +64,7 @@ extern inline pgd_t *pgd_alloc(struct mm_struct *mm)
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm);
-#define __pte_free_tlb(tlb, pte, addr) \
-do { \
- pagetable_dtor(page_ptdesc(pte)); \
- tlb_remove_page_ptdesc((tlb), (page_ptdesc(pte))); \
-} while (0)
+#define __pte_free_tlb(tlb, pte, addr) \
+ tlb_remove_ptdesc((tlb), page_ptdesc(pte))
#endif
diff --git a/arch/powerpc/configs/skiroot_defconfig b/arch/powerpc/configs/skiroot_defconfig
index 1eb446452fc0..3086c4a12d6d 100644
--- a/arch/powerpc/configs/skiroot_defconfig
+++ b/arch/powerpc/configs/skiroot_defconfig
@@ -78,7 +78,6 @@ CONFIG_VIRTIO_BLK=m
CONFIG_BLK_DEV_NVME=m
CONFIG_NVME_MULTIPATH=y
CONFIG_EEPROM_AT24=m
-# CONFIG_CXL is not set
# CONFIG_OCXL is not set
CONFIG_BLK_DEV_SD=m
CONFIG_BLK_DEV_SR=m
diff --git a/arch/powerpc/include/asm/copro.h b/arch/powerpc/include/asm/copro.h
index fd2e166ea02a..81bd176203ab 100644
--- a/arch/powerpc/include/asm/copro.h
+++ b/arch/powerpc/include/asm/copro.h
@@ -18,10 +18,4 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb);
-
-#ifdef CONFIG_PPC_COPRO_BASE
-void copro_flush_all_slbs(struct mm_struct *mm);
-#else
-static inline void copro_flush_all_slbs(struct mm_struct *mm) {}
-#endif
#endif /* _ASM_POWERPC_COPRO_H */
diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
index 47ed639f3b8f..a4dc27655b3e 100644
--- a/arch/powerpc/include/asm/device.h
+++ b/arch/powerpc/include/asm/device.h
@@ -38,9 +38,6 @@ struct dev_archdata {
#ifdef CONFIG_FAIL_IOMMU
int fail_iommu;
#endif
-#ifdef CONFIG_CXL_BASE
- struct cxl_context *cxl_ctx;
-#endif
#ifdef CONFIG_PCI_IOV
void *iov_data;
#endif
diff --git a/arch/powerpc/include/asm/pnv-pci.h b/arch/powerpc/include/asm/pnv-pci.h
index 8afc92860dbb..7e9a479951a3 100644
--- a/arch/powerpc/include/asm/pnv-pci.h
+++ b/arch/powerpc/include/asm/pnv-pci.h
@@ -10,7 +10,6 @@
#include <linux/pci_hotplug.h>
#include <linux/irq.h>
#include <linux/of.h>
-#include <misc/cxl-base.h>
#include <asm/opal-api.h>
#define PCI_SLOT_ID_PREFIX (1UL << 63)
@@ -25,25 +24,9 @@ extern int pnv_pci_get_power_state(uint64_t id, uint8_t *state);
extern int pnv_pci_set_power_state(uint64_t id, uint8_t state,
struct opal_msg *msg);
-extern int pnv_pci_set_tunnel_bar(struct pci_dev *dev, uint64_t addr,
- int enable);
-int pnv_phb_to_cxl_mode(struct pci_dev *dev, uint64_t mode);
-int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq,
- unsigned int virq);
-int pnv_cxl_alloc_hwirqs(struct pci_dev *dev, int num);
-void pnv_cxl_release_hwirqs(struct pci_dev *dev, int hwirq, int num);
-int pnv_cxl_get_irq_count(struct pci_dev *dev);
-struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev);
int64_t pnv_opal_pci_msi_eoi(struct irq_data *d);
bool is_pnv_opal_msi(struct irq_chip *chip);
-#ifdef CONFIG_CXL_BASE
-int pnv_cxl_alloc_hwirq_ranges(struct cxl_irq_ranges *irqs,
- struct pci_dev *dev, int num);
-void pnv_cxl_release_hwirq_ranges(struct cxl_irq_ranges *irqs,
- struct pci_dev *dev);
-#endif
-
struct pnv_php_slot {
struct hotplug_slot slot;
uint64_t id;
diff --git a/arch/powerpc/mm/book3s64/hash_native.c b/arch/powerpc/mm/book3s64/hash_native.c
index 430d1d935a7c..e9e2dd70c060 100644
--- a/arch/powerpc/mm/book3s64/hash_native.c
+++ b/arch/powerpc/mm/book3s64/hash_native.c
@@ -27,8 +27,6 @@
#include <asm/ppc-opcode.h>
#include <asm/feature-fixups.h>
-#include <misc/cxl-base.h>
-
#ifdef DEBUG_LOW
#define DBG_LOW(fmt...) udbg_printf(fmt)
#else
@@ -217,11 +215,9 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
static inline void tlbie(unsigned long vpn, int psize, int apsize,
int ssize, int local)
{
- unsigned int use_local;
+ unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL);
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
- use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && !cxl_ctx_in_use();
-
if (use_local)
use_local = mmu_psize_defs[psize].tlbiel;
if (lock_tlbie && !use_local)
@@ -789,10 +785,6 @@ static void native_flush_hash_range(unsigned long number, int local)
unsigned long psize = batch->psize;
int ssize = batch->ssize;
int i;
- unsigned int use_local;
-
- use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) &&
- mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use();
local_irq_save(flags);
@@ -827,7 +819,8 @@ static void native_flush_hash_range(unsigned long number, int local)
} pte_iterate_hashed_end();
}
- if (use_local) {
+ if (mmu_has_feature(MMU_FTR_TLBIEL) &&
+ mmu_psize_defs[psize].tlbiel && local) {
asm volatile("ptesync":::"memory");
for (i = 0; i < number; i++) {
vpn = batch->vpn[i];
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 734610052cf4..5158aefe4873 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -56,7 +56,7 @@
#include <asm/cacheflush.h>
#include <asm/cputable.h>
#include <asm/sections.h>
-#include <asm/copro.h>
+#include <asm/spu.h>
#include <asm/udbg.h>
#include <asm/text-patching.h>
#include <asm/fadump.h>
@@ -1600,7 +1600,9 @@ void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
if (get_slice_psize(mm, addr) == MMU_PAGE_4K)
return;
slice_set_range_psize(mm, addr, 1, MMU_PAGE_4K);
- copro_flush_all_slbs(mm);
+#ifdef CONFIG_SPU_BASE
+ spu_flush_all_slbs(mm);
+#endif
if ((get_paca_psize(addr) != MMU_PAGE_4K) && (current->mm == mm)) {
copy_mm_to_paca(mm);
@@ -1869,7 +1871,9 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
"to 4kB pages because of "
"non-cacheable mapping\n");
psize = mmu_vmalloc_psize = MMU_PAGE_4K;
- copro_flush_all_slbs(mm);
+#ifdef CONFIG_SPU_BASE
+ spu_flush_all_slbs(mm);
+#endif
}
}
diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
index c0c45d033cba..8f7d41ce2ca1 100644
--- a/arch/powerpc/mm/book3s64/pgtable.c
+++ b/arch/powerpc/mm/book3s64/pgtable.c
@@ -10,7 +10,6 @@
#include <linux/pkeys.h>
#include <linux/debugfs.h>
#include <linux/proc_fs.h>
-#include <misc/cxl-base.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
diff --git a/arch/powerpc/mm/book3s64/slice.c b/arch/powerpc/mm/book3s64/slice.c
index bc9a39821d1c..28bec5bc7879 100644
--- a/arch/powerpc/mm/book3s64/slice.c
+++ b/arch/powerpc/mm/book3s64/slice.c
@@ -22,7 +22,7 @@
#include <linux/security.h>
#include <asm/mman.h>
#include <asm/mmu.h>
-#include <asm/copro.h>
+#include <asm/spu.h>
#include <asm/hugetlb.h>
#include <asm/mmu_context.h>
@@ -248,7 +248,9 @@ static void slice_convert(struct mm_struct *mm,
spin_unlock_irqrestore(&slice_convert_lock, flags);
- copro_flush_all_slbs(mm);
+#ifdef CONFIG_SPU_BASE
+ spu_flush_all_slbs(mm);
+#endif
}
/*
diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
index f49fd873df8d..f5f8692e2c69 100644
--- a/arch/powerpc/mm/copro_fault.c
+++ b/arch/powerpc/mm/copro_fault.c
@@ -12,8 +12,6 @@
#include <linux/export.h>
#include <asm/reg.h>
#include <asm/copro.h>
-#include <asm/spu.h>
-#include <misc/cxl-base.h>
/*
* This ought to be kept in sync with the powerpc specific do_page_fault
@@ -135,13 +133,4 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
return 0;
}
EXPORT_SYMBOL_GPL(copro_calculate_slb);
-
-void copro_flush_all_slbs(struct mm_struct *mm)
-{
-#ifdef CONFIG_SPU_BASE
- spu_flush_all_slbs(mm);
-#endif
- cxl_slbia(mm);
-}
-EXPORT_SYMBOL_GPL(copro_flush_all_slbs);
#endif
diff --git a/arch/powerpc/platforms/cell/spufs/gang.c b/arch/powerpc/platforms/cell/spufs/gang.c
index 827d338deaf4..2c2999de6bfa 100644
--- a/arch/powerpc/platforms/cell/spufs/gang.c
+++ b/arch/powerpc/platforms/cell/spufs/gang.c
@@ -25,6 +25,7 @@ struct spu_gang *alloc_spu_gang(void)
mutex_init(&gang->aff_mutex);
INIT_LIST_HEAD(&gang->list);
INIT_LIST_HEAD(&gang->aff_list_head);
+ gang->alive = 1;
out:
return gang;
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 70236d1df3d3..9f9e4b871627 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -192,13 +192,32 @@ static int spufs_fill_dir(struct dentry *dir,
return -ENOMEM;
ret = spufs_new_file(dir->d_sb, dentry, files->ops,
files->mode & mode, files->size, ctx);
- if (ret)
+ if (ret) {
+ dput(dentry);
return ret;
+ }
files++;
}
return 0;
}
+static void unuse_gang(struct dentry *dir)
+{
+ struct inode *inode = dir->d_inode;
+ struct spu_gang *gang = SPUFS_I(inode)->i_gang;
+
+ if (gang) {
+ bool dead;
+
+ inode_lock(inode); // exclusion with spufs_create_context()
+ dead = !--gang->alive;
+ inode_unlock(inode);
+
+ if (dead)
+ simple_recursive_removal(dir, NULL);
+ }
+}
+
static int spufs_dir_close(struct inode *inode, struct file *file)
{
struct inode *parent;
@@ -213,6 +232,7 @@ static int spufs_dir_close(struct inode *inode, struct file *file)
inode_unlock(parent);
WARN_ON(ret);
+ unuse_gang(dir->d_parent);
return dcache_dir_close(inode, file);
}
@@ -405,7 +425,7 @@ spufs_create_context(struct inode *inode, struct dentry *dentry,
{
int ret;
int affinity;
- struct spu_gang *gang;
+ struct spu_gang *gang = SPUFS_I(inode)->i_gang;
struct spu_context *neighbor;
struct path path = {.mnt = mnt, .dentry = dentry};
@@ -420,11 +440,15 @@ spufs_create_context(struct inode *inode, struct dentry *dentry,
if ((flags & SPU_CREATE_ISOLATE) && !isolated_loader)
return -ENODEV;
- gang = NULL;
+ if (gang) {
+ if (!gang->alive)
+ return -ENOENT;
+ gang->alive++;
+ }
+
neighbor = NULL;
affinity = flags & (SPU_CREATE_AFFINITY_MEM | SPU_CREATE_AFFINITY_SPU);
if (affinity) {
- gang = SPUFS_I(inode)->i_gang;
if (!gang)
return -EINVAL;
mutex_lock(&gang->aff_mutex);
@@ -436,8 +460,11 @@ spufs_create_context(struct inode *inode, struct dentry *dentry,
}
ret = spufs_mkdir(inode, dentry, flags, mode & 0777);
- if (ret)
+ if (ret) {
+ if (neighbor)
+ put_spu_context(neighbor);
goto out_aff_unlock;
+ }
if (affinity) {
spufs_set_affinity(flags, SPUFS_I(d_inode(dentry))->i_ctx,
@@ -453,6 +480,8 @@ spufs_create_context(struct inode *inode, struct dentry *dentry,
out_aff_unlock:
if (affinity)
mutex_unlock(&gang->aff_mutex);
+ if (ret && gang)
+ gang->alive--; // can't reach 0
return ret;
}
@@ -482,6 +511,7 @@ spufs_mkgang(struct inode *dir, struct dentry *dentry, umode_t mode)
inode->i_fop = &simple_dir_operations;
d_instantiate(dentry, inode);
+ dget(dentry);
inc_nlink(dir);
inc_nlink(d_inode(dentry));
return ret;
@@ -492,6 +522,21 @@ out:
return ret;
}
+static int spufs_gang_close(struct inode *inode, struct file *file)
+{
+ unuse_gang(file->f_path.dentry);
+ return dcache_dir_close(inode, file);
+}
+
+static const struct file_operations spufs_gang_fops = {
+ .open = dcache_dir_open,
+ .release = spufs_gang_close,
+ .llseek = dcache_dir_lseek,
+ .read = generic_read_dir,
+ .iterate_shared = dcache_readdir,
+ .fsync = noop_fsync,
+};
+
static int spufs_gang_open(const struct path *path)
{
int ret;
@@ -511,7 +556,7 @@ static int spufs_gang_open(const struct path *path)
return PTR_ERR(filp);
}
- filp->f_op = &simple_dir_operations;
+ filp->f_op = &spufs_gang_fops;
fd_install(ret, filp);
return ret;
}
@@ -526,10 +571,8 @@ static int spufs_create_gang(struct inode *inode,
ret = spufs_mkgang(inode, dentry, mode & 0777);
if (!ret) {
ret = spufs_gang_open(&path);
- if (ret < 0) {
- int err = simple_rmdir(inode, dentry);
- WARN_ON(err);
- }
+ if (ret < 0)
+ unuse_gang(dentry);
}
return ret;
}
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 84958487f696..d33787c57c39 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -151,6 +151,8 @@ struct spu_gang {
int aff_flags;
struct spu *aff_ref_spu;
atomic_t aff_sched_count;
+
+ int alive;
};
/* Flag bits for spu_gang aff_flags */
diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile
index 19f0fc5c6f1b..9e5d0c847ee2 100644
--- a/arch/powerpc/platforms/powernv/Makefile
+++ b/arch/powerpc/platforms/powernv/Makefile
@@ -21,7 +21,6 @@ obj-$(CONFIG_PRESERVE_FA_DUMP) += opal-fadump.o
obj-$(CONFIG_OPAL_CORE) += opal-core.o
obj-$(CONFIG_PCI) += pci.o pci-ioda.o pci-ioda-tce.o
obj-$(CONFIG_PCI_IOV) += pci-sriov.o
-obj-$(CONFIG_CXL_BASE) += pci-cxl.o
obj-$(CONFIG_EEH) += eeh-powernv.o
obj-$(CONFIG_MEMORY_FAILURE) += opal-memory-errors.o
obj-$(CONFIG_OPAL_PRD) += opal-prd.o
diff --git a/arch/powerpc/platforms/powernv/pci-cxl.c b/arch/powerpc/platforms/powernv/pci-cxl.c
deleted file mode 100644
index 7e419de71db8..000000000000
--- a/arch/powerpc/platforms/powernv/pci-cxl.c
+++ /dev/null
@@ -1,153 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2014-2016 IBM Corp.
- */
-
-#include <linux/module.h>
-#include <misc/cxl-base.h>
-#include <asm/pnv-pci.h>
-#include <asm/opal.h>
-
-#include "pci.h"
-
-int pnv_phb_to_cxl_mode(struct pci_dev *dev, uint64_t mode)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
- struct pnv_ioda_pe *pe;
- int rc;
-
- pe = pnv_ioda_get_pe(dev);
- if (!pe)
- return -ENODEV;
-
- pe_info(pe, "Switching PHB to CXL\n");
-
- rc = opal_pci_set_phb_cxl_mode(phb->opal_id, mode, pe->pe_number);
- if (rc == OPAL_UNSUPPORTED)
- dev_err(&dev->dev, "Required cxl mode not supported by firmware - update skiboot\n");
- else if (rc)
- dev_err(&dev->dev, "opal_pci_set_phb_cxl_mode failed: %i\n", rc);
-
- return rc;
-}
-EXPORT_SYMBOL(pnv_phb_to_cxl_mode);
-
-/* Find PHB for cxl dev and allocate MSI hwirqs?
- * Returns the absolute hardware IRQ number
- */
-int pnv_cxl_alloc_hwirqs(struct pci_dev *dev, int num)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
- int hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, num);
-
- if (hwirq < 0) {
- dev_warn(&dev->dev, "Failed to find a free MSI\n");
- return -ENOSPC;
- }
-
- return phb->msi_base + hwirq;
-}
-EXPORT_SYMBOL(pnv_cxl_alloc_hwirqs);
-
-void pnv_cxl_release_hwirqs(struct pci_dev *dev, int hwirq, int num)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
-
- msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, num);
-}
-EXPORT_SYMBOL(pnv_cxl_release_hwirqs);
-
-void pnv_cxl_release_hwirq_ranges(struct cxl_irq_ranges *irqs,
- struct pci_dev *dev)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
- int i, hwirq;
-
- for (i = 1; i < CXL_IRQ_RANGES; i++) {
- if (!irqs->range[i])
- continue;
- pr_devel("cxl release irq range 0x%x: offset: 0x%lx limit: %ld\n",
- i, irqs->offset[i],
- irqs->range[i]);
- hwirq = irqs->offset[i] - phb->msi_base;
- msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq,
- irqs->range[i]);
- }
-}
-EXPORT_SYMBOL(pnv_cxl_release_hwirq_ranges);
-
-int pnv_cxl_alloc_hwirq_ranges(struct cxl_irq_ranges *irqs,
- struct pci_dev *dev, int num)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
- int i, hwirq, try;
-
- memset(irqs, 0, sizeof(struct cxl_irq_ranges));
-
- /* 0 is reserved for the multiplexed PSL DSI interrupt */
- for (i = 1; i < CXL_IRQ_RANGES && num; i++) {
- try = num;
- while (try) {
- hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, try);
- if (hwirq >= 0)
- break;
- try /= 2;
- }
- if (!try)
- goto fail;
-
- irqs->offset[i] = phb->msi_base + hwirq;
- irqs->range[i] = try;
- pr_devel("cxl alloc irq range 0x%x: offset: 0x%lx limit: %li\n",
- i, irqs->offset[i], irqs->range[i]);
- num -= try;
- }
- if (num)
- goto fail;
-
- return 0;
-fail:
- pnv_cxl_release_hwirq_ranges(irqs, dev);
- return -ENOSPC;
-}
-EXPORT_SYMBOL(pnv_cxl_alloc_hwirq_ranges);
-
-int pnv_cxl_get_irq_count(struct pci_dev *dev)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
-
- return phb->msi_bmp.irq_count;
-}
-EXPORT_SYMBOL(pnv_cxl_get_irq_count);
-
-int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq,
- unsigned int virq)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
- unsigned int xive_num = hwirq - phb->msi_base;
- struct pnv_ioda_pe *pe;
- int rc;
-
- if (!(pe = pnv_ioda_get_pe(dev)))
- return -ENODEV;
-
- /* Assign XIVE to PE */
- rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
- if (rc) {
- pe_warn(pe, "%s: OPAL error %d setting msi_base 0x%x "
- "hwirq 0x%x XIVE 0x%x PE\n",
- pci_name(dev), rc, phb->msi_base, hwirq, xive_num);
- return -EIO;
- }
- pnv_set_msi_irq_chip(phb, virq);
-
- return 0;
-}
-EXPORT_SYMBOL(pnv_cxl_ioda_msi_setup);
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index b0a14e48175c..d2a8e0287811 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -39,8 +39,6 @@
#include <asm/mmzone.h>
#include <asm/xive.h>
-#include <misc/cxl-base.h>
-
#include "powernv.h"
#include "pci.h"
#include "../../../../drivers/pci/pci.h"
@@ -1636,47 +1634,6 @@ int64_t pnv_opal_pci_msi_eoi(struct irq_data *d)
return opal_pci_msi_eoi(phb->opal_id, d->parent_data->hwirq);
}
-/*
- * The IRQ data is mapped in the XICS domain, with OPAL HW IRQ numbers
- */
-static void pnv_ioda2_msi_eoi(struct irq_data *d)
-{
- int64_t rc;
- unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
- struct pci_controller *hose = irq_data_get_irq_chip_data(d);
- struct pnv_phb *phb = hose->private_data;
-
- rc = opal_pci_msi_eoi(phb->opal_id, hw_irq);
- WARN_ON_ONCE(rc);
-
- icp_native_eoi(d);
-}
-
-/* P8/CXL only */
-void pnv_set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq)
-{
- struct irq_data *idata;
- struct irq_chip *ichip;
-
- /* The MSI EOI OPAL call is only needed on PHB3 */
- if (phb->model != PNV_PHB_MODEL_PHB3)
- return;
-
- if (!phb->ioda.irq_chip_init) {
- /*
- * First time we setup an MSI IRQ, we need to setup the
- * corresponding IRQ chip to route correctly.
- */
- idata = irq_get_irq_data(virq);
- ichip = irq_data_get_irq_chip(idata);
- phb->ioda.irq_chip_init = 1;
- phb->ioda.irq_chip = *ichip;
- phb->ioda.irq_chip.irq_eoi = pnv_ioda2_msi_eoi;
- }
- irq_set_chip(virq, &phb->ioda.irq_chip);
- irq_set_chip_data(virq, phb->hose);
-}
-
static struct irq_chip pnv_pci_msi_irq_chip;
/*
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 35f566aa0424..b2c1da025410 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -14,7 +14,6 @@
#include <linux/io.h>
#include <linux/msi.h>
#include <linux/iommu.h>
-#include <linux/sched/mm.h>
#include <asm/sections.h>
#include <asm/io.h>
@@ -33,8 +32,6 @@
#include "powernv.h"
#include "pci.h"
-static DEFINE_MUTEX(tunnel_mutex);
-
int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id)
{
struct device_node *node = np;
@@ -744,64 +741,6 @@ struct iommu_table *pnv_pci_table_alloc(int nid)
return tbl;
}
-struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev)
-{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
-
- return of_node_get(hose->dn);
-}
-EXPORT_SYMBOL(pnv_pci_get_phb_node);
-
-int pnv_pci_set_tunnel_bar(struct pci_dev *dev, u64 addr, int enable)
-{
- struct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus);
- u64 tunnel_bar;
- __be64 val;
- int rc;
-
- if (!opal_check_token(OPAL_PCI_GET_PBCQ_TUNNEL_BAR))
- return -ENXIO;
- if (!opal_check_token(OPAL_PCI_SET_PBCQ_TUNNEL_BAR))
- return -ENXIO;
-
- mutex_lock(&tunnel_mutex);
- rc = opal_pci_get_pbcq_tunnel_bar(phb->opal_id, &val);
- if (rc != OPAL_SUCCESS) {
- rc = -EIO;
- goto out;
- }
- tunnel_bar = be64_to_cpu(val);
- if (enable) {
- /*
- * Only one device per PHB can use atomics.
- * Our policy is first-come, first-served.
- */
- if (tunnel_bar) {
- if (tunnel_bar != addr)
- rc = -EBUSY;
- else
- rc = 0; /* Setting same address twice is ok */
- goto out;
- }
- } else {
- /*
- * The device that owns atomics and wants to release
- * them must pass the same address with enable == 0.
- */
- if (tunnel_bar != addr) {
- rc = -EPERM;
- goto out;
- }
- addr = 0x0ULL;
- }
- rc = opal_pci_set_pbcq_tunnel_bar(phb->opal_id, addr);
- rc = opal_error_code(rc);
-out:
- mutex_unlock(&tunnel_mutex);
- return rc;
-}
-EXPORT_SYMBOL_GPL(pnv_pci_set_tunnel_bar);
-
void pnv_pci_shutdown(void)
{
struct pci_controller *hose;
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 93fba1f8661f..42075501663b 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -163,7 +163,6 @@ struct pnv_phb {
unsigned int *io_segmap;
/* IRQ chip */
- int irq_chip_init;
struct irq_chip irq_chip;
/* Sorted list of used PE's based
@@ -281,7 +280,6 @@ extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option);
extern struct pnv_ioda_pe *pnv_pci_bdfn_to_pe(struct pnv_phb *phb, u16 bdfn);
extern struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev);
-extern void pnv_set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq);
extern unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift,
__u64 window_size, __u32 levels);
extern int pnv_eeh_post_init(void);
diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h
index 3e2aebea6312..770ce18a7328 100644
--- a/arch/riscv/include/asm/pgalloc.h
+++ b/arch/riscv/include/asm/pgalloc.h
@@ -15,24 +15,6 @@
#define __HAVE_ARCH_PUD_FREE
#include <asm-generic/pgalloc.h>
-/*
- * While riscv platforms with riscv_ipi_for_rfence as true require an IPI to
- * perform TLB shootdown, some platforms with riscv_ipi_for_rfence as false use
- * SBI to perform TLB shootdown. To keep software pagetable walkers safe in this
- * case we switch to RCU based table free (MMU_GATHER_RCU_TABLE_FREE). See the
- * comment below 'ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE' in include/asm-generic/tlb.h
- * for more details.
- */
-static inline void riscv_tlb_remove_ptdesc(struct mmu_gather *tlb, void *pt)
-{
- if (riscv_use_sbi_for_rfence()) {
- tlb_remove_ptdesc(tlb, pt);
- } else {
- pagetable_dtor(pt);
- tlb_remove_page_ptdesc(tlb, pt);
- }
-}
-
static inline void pmd_populate_kernel(struct mm_struct *mm,
pmd_t *pmd, pte_t *pte)
{
@@ -108,14 +90,14 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
unsigned long addr)
{
if (pgtable_l4_enabled)
- riscv_tlb_remove_ptdesc(tlb, virt_to_ptdesc(pud));
+ tlb_remove_ptdesc(tlb, virt_to_ptdesc(pud));
}
static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
unsigned long addr)
{
if (pgtable_l5_enabled)
- riscv_tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d));
+ tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d));
}
#endif /* __PAGETABLE_PMD_FOLDED */
@@ -143,7 +125,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
unsigned long addr)
{
- riscv_tlb_remove_ptdesc(tlb, virt_to_ptdesc(pmd));
+ tlb_remove_ptdesc(tlb, virt_to_ptdesc(pmd));
}
#endif /* __PAGETABLE_PMD_FOLDED */
@@ -151,7 +133,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
unsigned long addr)
{
- riscv_tlb_remove_ptdesc(tlb, page_ptdesc(pte));
+ tlb_remove_ptdesc(tlb, page_ptdesc(pte));
}
#endif /* CONFIG_MMU */
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index c809c486d136..b8fa367c1fc9 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -137,6 +137,7 @@ config S390
select ARCH_SUPPORTS_DEBUG_PAGEALLOC
select ARCH_SUPPORTS_HUGETLBFS
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && CC_IS_CLANG
+ select ARCH_SUPPORTS_MSEAL_SYSTEM_MAPPINGS
select ARCH_SUPPORTS_NUMA_BALANCING
select ARCH_SUPPORTS_PER_VMA_LOCK
select ARCH_USE_BUILTIN_BSWAP
diff --git a/arch/s390/hypfs/hypfs_diag_fs.c b/arch/s390/hypfs/hypfs_diag_fs.c
index 1e17e288cee4..ede951dc0085 100644
--- a/arch/s390/hypfs/hypfs_diag_fs.c
+++ b/arch/s390/hypfs/hypfs_diag_fs.c
@@ -209,6 +209,8 @@ static int hypfs_create_cpu_files(struct dentry *cpus_dir, void *cpu_info)
snprintf(buffer, TMP_SIZE, "%d", cpu_info__cpu_addr(diag204_get_info_type(),
cpu_info));
cpu_dir = hypfs_mkdir(cpus_dir, buffer);
+ if (IS_ERR(cpu_dir))
+ return PTR_ERR(cpu_dir);
rc = hypfs_create_u64(cpu_dir, "mgmtime",
cpu_info__acc_time(diag204_get_info_type(), cpu_info) -
cpu_info__lp_time(diag204_get_info_type(), cpu_info));
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 70c8f9ad13cd..430feb1a5013 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -80,7 +80,7 @@ static int map_vdso(unsigned long addr, unsigned long vdso_mapping_len)
vdso_text_start = vvar_start + VDSO_NR_PAGES * PAGE_SIZE;
/* VM_MAYWRITE for COW so gdb can set breakpoints */
vma = _install_special_mapping(mm, vdso_text_start, vdso_text_len,
- VM_READ|VM_EXEC|
+ VM_READ|VM_EXEC|VM_SEALED_SYSMAP|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
vdso_mapping);
if (IS_ERR(vma)) {
diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h
index 96d938fdf224..6fe7123d38fa 100644
--- a/arch/sh/include/asm/pgalloc.h
+++ b/arch/sh/include/asm/pgalloc.h
@@ -32,10 +32,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
}
-#define __pte_free_tlb(tlb, pte, addr) \
-do { \
- pagetable_dtor(page_ptdesc(pte)); \
- tlb_remove_page_ptdesc((tlb), (page_ptdesc(pte))); \
-} while (0)
+#define __pte_free_tlb(tlb, pte, addr) \
+ tlb_remove_ptdesc((tlb), page_ptdesc(pte))
#endif /* __ASM_SH_PGALLOC_H */
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index 18051b1cfce0..79509c7f39de 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -12,6 +12,7 @@ config UML
select ARCH_HAS_KCOV
select ARCH_HAS_STRNCPY_FROM_USER
select ARCH_HAS_STRNLEN_USER
+ select ARCH_HAS_STRICT_KERNEL_RWX
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_KASAN if X86_64
select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN
diff --git a/arch/um/drivers/Kconfig b/arch/um/drivers/Kconfig
index ede40a160c5e..9cb196070614 100644
--- a/arch/um/drivers/Kconfig
+++ b/arch/um/drivers/Kconfig
@@ -345,16 +345,20 @@ config UML_RTC
by providing a fake RTC clock that causes a wakeup at the right
time.
-config UML_PCI_OVER_VIRTIO
- bool "Enable PCI over VIRTIO device simulation"
- # in theory, just VIRTIO is enough, but that causes recursion
- depends on VIRTIO_UML
+config UML_PCI
+ bool
select FORCE_PCI
select UML_IOMEM_EMULATION
select UML_DMA_EMULATION
select PCI_MSI
select PCI_LOCKLESS_CONFIG
+config UML_PCI_OVER_VIRTIO
+ bool "Enable PCI over VIRTIO device simulation"
+ # in theory, just VIRTIO is enough, but that causes recursion
+ depends on VIRTIO_UML
+ select UML_PCI
+
config UML_PCI_OVER_VIRTIO_DEVICE_ID
int "set the virtio device ID for PCI emulation"
default -1
diff --git a/arch/um/drivers/Makefile b/arch/um/drivers/Makefile
index 57882e6bc215..0a5820343ad3 100644
--- a/arch/um/drivers/Makefile
+++ b/arch/um/drivers/Makefile
@@ -60,7 +60,8 @@ obj-$(CONFIG_BLK_DEV_COW_COMMON) += cow_user.o
obj-$(CONFIG_UML_RANDOM) += random.o
obj-$(CONFIG_VIRTIO_UML) += virtio_uml.o
obj-$(CONFIG_UML_RTC) += rtc.o
-obj-$(CONFIG_UML_PCI_OVER_VIRTIO) += virt-pci.o
+obj-$(CONFIG_UML_PCI) += virt-pci.o
+obj-$(CONFIG_UML_PCI_OVER_VIRTIO) += virtio_pcidev.o
# pcap_user.o must be added explicitly.
USER_OBJS := fd.o null.o pty.o tty.o xterm.o slip_common.o vde_user.o vector_user.o
diff --git a/arch/um/drivers/random.c b/arch/um/drivers/random.c
index da985e0dc69a..ca08c91f47a3 100644
--- a/arch/um/drivers/random.c
+++ b/arch/um/drivers/random.c
@@ -79,7 +79,7 @@ static int __init rng_init (void)
if (err < 0)
goto err_out_cleanup_hw;
- sigio_broken(random_fd);
+ sigio_broken();
hwrng.name = RNG_MODULE_NAME;
hwrng.read = rng_dev_read;
diff --git a/arch/um/drivers/rtc_user.c b/arch/um/drivers/rtc_user.c
index 7c3cec4c68cf..51e79f3148cd 100644
--- a/arch/um/drivers/rtc_user.c
+++ b/arch/um/drivers/rtc_user.c
@@ -39,7 +39,7 @@ int uml_rtc_start(bool timetravel)
}
/* apparently timerfd won't send SIGIO, use workaround */
- sigio_broken(uml_rtc_irq_fds[0]);
+ sigio_broken();
err = add_sigio_fd(uml_rtc_irq_fds[0]);
if (err < 0) {
close(uml_rtc_irq_fds[0]);
diff --git a/arch/um/drivers/ubd.h b/arch/um/drivers/ubd.h
index f016fe15499f..2985c14661f4 100644
--- a/arch/um/drivers/ubd.h
+++ b/arch/um/drivers/ubd.h
@@ -7,8 +7,10 @@
#ifndef __UM_UBD_USER_H
#define __UM_UBD_USER_H
-extern int start_io_thread(unsigned long sp, int *fds_out);
-extern int io_thread(void *arg);
+#include <os.h>
+
+int start_io_thread(struct os_helper_thread **td_out, int *fd_out);
+void *io_thread(void *arg);
extern int kernel_fd;
extern int ubd_read_poll(int timeout);
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 0b1e61f72fb3..4de6613e7468 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -474,12 +474,12 @@ static irqreturn_t ubd_intr(int irq, void *dev)
}
/* Only changed by ubd_init, which is an initcall. */
-static int io_pid = -1;
+static struct os_helper_thread *io_td;
static void kill_io_thread(void)
{
- if(io_pid != -1)
- os_kill_process(io_pid, 1);
+ if (io_td)
+ os_kill_helper_thread(io_td);
}
__uml_exitcall(kill_io_thread);
@@ -1104,8 +1104,8 @@ static int __init ubd_init(void)
late_initcall(ubd_init);
-static int __init ubd_driver_init(void){
- unsigned long stack;
+static int __init ubd_driver_init(void)
+{
int err;
/* Set by CONFIG_BLK_DEV_UBD_SYNC or ubd=sync.*/
@@ -1114,13 +1114,11 @@ static int __init ubd_driver_init(void){
/* Letting ubd=sync be like using ubd#s= instead of ubd#= is
* enough. So use anyway the io thread. */
}
- stack = alloc_stack(0, 0);
- io_pid = start_io_thread(stack + PAGE_SIZE, &thread_fd);
- if(io_pid < 0){
+ err = start_io_thread(&io_td, &thread_fd);
+ if (err < 0) {
printk(KERN_ERR
"ubd : Failed to start I/O thread (errno = %d) - "
- "falling back to synchronous I/O\n", -io_pid);
- io_pid = -1;
+ "falling back to synchronous I/O\n", -err);
return 0;
}
err = um_request_irq(UBD_IRQ, thread_fd, IRQ_READ, ubd_intr,
@@ -1496,12 +1494,11 @@ int kernel_fd = -1;
/* Only changed by the io thread. XXX: currently unused. */
static int io_count;
-int io_thread(void *arg)
+void *io_thread(void *arg)
{
int n, count, written, res;
- os_set_pdeathsig();
- os_fix_helper_signals();
+ os_fix_helper_thread_signals();
while(1){
n = bulk_req_safe_read(
@@ -1543,5 +1540,5 @@ int io_thread(void *arg)
} while (written < n);
}
- return 0;
+ return NULL;
}
diff --git a/arch/um/drivers/ubd_user.c b/arch/um/drivers/ubd_user.c
index b4f8b8e60564..c5e6545f6fcf 100644
--- a/arch/um/drivers/ubd_user.c
+++ b/arch/um/drivers/ubd_user.c
@@ -25,9 +25,9 @@
static struct pollfd kernel_pollfd;
-int start_io_thread(unsigned long sp, int *fd_out)
+int start_io_thread(struct os_helper_thread **td_out, int *fd_out)
{
- int pid, fds[2], err;
+ int fds[2], err;
err = os_pipe(fds, 1, 1);
if(err < 0){
@@ -47,14 +47,14 @@ int start_io_thread(unsigned long sp, int *fd_out)
goto out_close;
}
- pid = clone(io_thread, (void *) sp, CLONE_FILES | CLONE_VM, NULL);
- if(pid < 0){
- err = -errno;
- printk("start_io_thread - clone failed : errno = %d\n", errno);
+ err = os_run_helper_thread(td_out, io_thread, NULL);
+ if (err < 0) {
+ printk("%s - failed to run helper thread, err = %d\n",
+ __func__, -err);
goto out_close;
}
- return(pid);
+ return 0;
out_close:
os_close_file(fds[0]);
diff --git a/arch/um/drivers/virt-pci.c b/arch/um/drivers/virt-pci.c
index dd5580f975cc..b83b5a765d4e 100644
--- a/arch/um/drivers/virt-pci.c
+++ b/arch/um/drivers/virt-pci.c
@@ -5,52 +5,19 @@
*/
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/virtio.h>
-#include <linux/virtio_config.h>
#include <linux/logic_iomem.h>
#include <linux/of_platform.h>
#include <linux/irqdomain.h>
-#include <linux/virtio_pcidev.h>
-#include <linux/virtio-uml.h>
-#include <linux/delay.h>
#include <linux/msi.h>
#include <linux/unaligned.h>
#include <irq_kern.h>
+#include "virt-pci.h"
+
#define MAX_DEVICES 8
#define MAX_MSI_VECTORS 32
#define CFG_SPACE_SIZE 4096
-/* for MSI-X we have a 32-bit payload */
-#define MAX_IRQ_MSG_SIZE (sizeof(struct virtio_pcidev_msg) + sizeof(u32))
-#define NUM_IRQ_MSGS 10
-
-struct um_pci_message_buffer {
- struct virtio_pcidev_msg hdr;
- u8 data[8];
-};
-
-struct um_pci_device {
- struct virtio_device *vdev;
-
- /* for now just standard BARs */
- u8 resptr[PCI_STD_NUM_BARS];
-
- struct virtqueue *cmd_vq, *irq_vq;
-
-#define UM_PCI_WRITE_BUFS 20
- struct um_pci_message_buffer bufs[UM_PCI_WRITE_BUFS + 1];
- void *extra_ptrs[UM_PCI_WRITE_BUFS + 1];
- DECLARE_BITMAP(used_bufs, UM_PCI_WRITE_BUFS);
-
-#define UM_PCI_STAT_WAITING 0
- unsigned long status;
-
- int irq;
-
- bool platform;
-};
-
struct um_pci_device_reg {
struct um_pci_device *dev;
void __iomem *iomem;
@@ -65,179 +32,15 @@ static struct irq_domain *um_pci_inner_domain;
static struct irq_domain *um_pci_msi_domain;
static unsigned long um_pci_msi_used[BITS_TO_LONGS(MAX_MSI_VECTORS)];
-static unsigned int um_pci_max_delay_us = 40000;
-module_param_named(max_delay_us, um_pci_max_delay_us, uint, 0644);
-
-static int um_pci_get_buf(struct um_pci_device *dev, bool *posted)
-{
- int i;
-
- for (i = 0; i < UM_PCI_WRITE_BUFS; i++) {
- if (!test_and_set_bit(i, dev->used_bufs))
- return i;
- }
-
- *posted = false;
- return UM_PCI_WRITE_BUFS;
-}
-
-static void um_pci_free_buf(struct um_pci_device *dev, void *buf)
-{
- int i;
-
- if (buf == &dev->bufs[UM_PCI_WRITE_BUFS]) {
- kfree(dev->extra_ptrs[UM_PCI_WRITE_BUFS]);
- dev->extra_ptrs[UM_PCI_WRITE_BUFS] = NULL;
- return;
- }
-
- for (i = 0; i < UM_PCI_WRITE_BUFS; i++) {
- if (buf == &dev->bufs[i]) {
- kfree(dev->extra_ptrs[i]);
- dev->extra_ptrs[i] = NULL;
- WARN_ON(!test_and_clear_bit(i, dev->used_bufs));
- return;
- }
- }
-
- WARN_ON(1);
-}
-
-static int um_pci_send_cmd(struct um_pci_device *dev,
- struct virtio_pcidev_msg *cmd,
- unsigned int cmd_size,
- const void *extra, unsigned int extra_size,
- void *out, unsigned int out_size)
-{
- struct scatterlist out_sg, extra_sg, in_sg;
- struct scatterlist *sgs_list[] = {
- [0] = &out_sg,
- [1] = extra ? &extra_sg : &in_sg,
- [2] = extra ? &in_sg : NULL,
- };
- struct um_pci_message_buffer *buf;
- int delay_count = 0;
- bool bounce_out;
- int ret, len;
- int buf_idx;
- bool posted;
-
- if (WARN_ON(cmd_size < sizeof(*cmd) || cmd_size > sizeof(*buf)))
- return -EINVAL;
-
- switch (cmd->op) {
- case VIRTIO_PCIDEV_OP_CFG_WRITE:
- case VIRTIO_PCIDEV_OP_MMIO_WRITE:
- case VIRTIO_PCIDEV_OP_MMIO_MEMSET:
- /* in PCI, writes are posted, so don't wait */
- posted = !out;
- WARN_ON(!posted);
- break;
- default:
- posted = false;
- break;
- }
-
- bounce_out = !posted && cmd_size <= sizeof(*cmd) &&
- out && out_size <= sizeof(buf->data);
-
- buf_idx = um_pci_get_buf(dev, &posted);
- buf = &dev->bufs[buf_idx];
- memcpy(buf, cmd, cmd_size);
-
- if (posted && extra && extra_size > sizeof(buf) - cmd_size) {
- dev->extra_ptrs[buf_idx] = kmemdup(extra, extra_size,
- GFP_ATOMIC);
-
- if (!dev->extra_ptrs[buf_idx]) {
- um_pci_free_buf(dev, buf);
- return -ENOMEM;
- }
- extra = dev->extra_ptrs[buf_idx];
- } else if (extra && extra_size <= sizeof(buf) - cmd_size) {
- memcpy((u8 *)buf + cmd_size, extra, extra_size);
- cmd_size += extra_size;
- extra_size = 0;
- extra = NULL;
- cmd = (void *)buf;
- } else {
- cmd = (void *)buf;
- }
-
- sg_init_one(&out_sg, cmd, cmd_size);
- if (extra)
- sg_init_one(&extra_sg, extra, extra_size);
- /* allow stack for small buffers */
- if (bounce_out)
- sg_init_one(&in_sg, buf->data, out_size);
- else if (out)
- sg_init_one(&in_sg, out, out_size);
-
- /* add to internal virtio queue */
- ret = virtqueue_add_sgs(dev->cmd_vq, sgs_list,
- extra ? 2 : 1,
- out ? 1 : 0,
- cmd, GFP_ATOMIC);
- if (ret) {
- um_pci_free_buf(dev, buf);
- return ret;
- }
-
- if (posted) {
- virtqueue_kick(dev->cmd_vq);
- return 0;
- }
-
- /* kick and poll for getting a response on the queue */
- set_bit(UM_PCI_STAT_WAITING, &dev->status);
- virtqueue_kick(dev->cmd_vq);
- ret = 0;
-
- while (1) {
- void *completed = virtqueue_get_buf(dev->cmd_vq, &len);
-
- if (completed == buf)
- break;
-
- if (completed)
- um_pci_free_buf(dev, completed);
-
- if (WARN_ONCE(virtqueue_is_broken(dev->cmd_vq) ||
- ++delay_count > um_pci_max_delay_us,
- "um virt-pci delay: %d", delay_count)) {
- ret = -EIO;
- break;
- }
- udelay(1);
- }
- clear_bit(UM_PCI_STAT_WAITING, &dev->status);
-
- if (bounce_out)
- memcpy(out, buf->data, out_size);
-
- um_pci_free_buf(dev, buf);
-
- return ret;
-}
-
static unsigned long um_pci_cfgspace_read(void *priv, unsigned int offset,
int size)
{
struct um_pci_device_reg *reg = priv;
struct um_pci_device *dev = reg->dev;
- struct virtio_pcidev_msg hdr = {
- .op = VIRTIO_PCIDEV_OP_CFG_READ,
- .size = size,
- .addr = offset,
- };
- /* max 8, we might not use it all */
- u8 data[8];
if (!dev)
return ULONG_MAX;
- memset(data, 0xff, sizeof(data));
-
switch (size) {
case 1:
case 2:
@@ -251,23 +54,7 @@ static unsigned long um_pci_cfgspace_read(void *priv, unsigned int offset,
return ULONG_MAX;
}
- if (um_pci_send_cmd(dev, &hdr, sizeof(hdr), NULL, 0, data, size))
- return ULONG_MAX;
-
- switch (size) {
- case 1:
- return data[0];
- case 2:
- return le16_to_cpup((void *)data);
- case 4:
- return le32_to_cpup((void *)data);
-#ifdef CONFIG_64BIT
- case 8:
- return le64_to_cpup((void *)data);
-#endif
- default:
- return ULONG_MAX;
- }
+ return dev->ops->cfgspace_read(dev, offset, size);
}
static void um_pci_cfgspace_write(void *priv, unsigned int offset, int size,
@@ -275,42 +62,24 @@ static void um_pci_cfgspace_write(void *priv, unsigned int offset, int size,
{
struct um_pci_device_reg *reg = priv;
struct um_pci_device *dev = reg->dev;
- struct {
- struct virtio_pcidev_msg hdr;
- /* maximum size - we may only use parts of it */
- u8 data[8];
- } msg = {
- .hdr = {
- .op = VIRTIO_PCIDEV_OP_CFG_WRITE,
- .size = size,
- .addr = offset,
- },
- };
if (!dev)
return;
switch (size) {
case 1:
- msg.data[0] = (u8)val;
- break;
case 2:
- put_unaligned_le16(val, (void *)msg.data);
- break;
case 4:
- put_unaligned_le32(val, (void *)msg.data);
- break;
#ifdef CONFIG_64BIT
case 8:
- put_unaligned_le64(val, (void *)msg.data);
- break;
#endif
+ break;
default:
WARN(1, "invalid config space write size %d\n", size);
return;
}
- WARN_ON(um_pci_send_cmd(dev, &msg.hdr, sizeof(msg), NULL, 0, NULL, 0));
+ dev->ops->cfgspace_write(dev, offset, size, val);
}
static const struct logic_iomem_ops um_pci_device_cfgspace_ops = {
@@ -318,30 +87,14 @@ static const struct logic_iomem_ops um_pci_device_cfgspace_ops = {
.write = um_pci_cfgspace_write,
};
-static void um_pci_bar_copy_from(void *priv, void *buffer,
- unsigned int offset, int size)
+static unsigned long um_pci_bar_read(void *priv, unsigned int offset,
+ int size)
{
u8 *resptr = priv;
struct um_pci_device *dev = container_of(resptr - *resptr,
struct um_pci_device,
resptr[0]);
- struct virtio_pcidev_msg hdr = {
- .op = VIRTIO_PCIDEV_OP_MMIO_READ,
- .bar = *resptr,
- .size = size,
- .addr = offset,
- };
-
- memset(buffer, 0xff, size);
-
- um_pci_send_cmd(dev, &hdr, sizeof(hdr), NULL, 0, buffer, size);
-}
-
-static unsigned long um_pci_bar_read(void *priv, unsigned int offset,
- int size)
-{
- /* 8 is maximum size - we may only use parts of it */
- u8 data[8];
+ u8 bar = *resptr;
switch (size) {
case 1:
@@ -352,72 +105,60 @@ static unsigned long um_pci_bar_read(void *priv, unsigned int offset,
#endif
break;
default:
- WARN(1, "invalid config space read size %d\n", size);
+ WARN(1, "invalid bar read size %d\n", size);
return ULONG_MAX;
}
- um_pci_bar_copy_from(priv, data, offset, size);
+ return dev->ops->bar_read(dev, bar, offset, size);
+}
+
+static void um_pci_bar_write(void *priv, unsigned int offset, int size,
+ unsigned long val)
+{
+ u8 *resptr = priv;
+ struct um_pci_device *dev = container_of(resptr - *resptr,
+ struct um_pci_device,
+ resptr[0]);
+ u8 bar = *resptr;
switch (size) {
case 1:
- return data[0];
case 2:
- return le16_to_cpup((void *)data);
case 4:
- return le32_to_cpup((void *)data);
#ifdef CONFIG_64BIT
case 8:
- return le64_to_cpup((void *)data);
#endif
+ break;
default:
- return ULONG_MAX;
+ WARN(1, "invalid bar write size %d\n", size);
+ return;
}
+
+ dev->ops->bar_write(dev, bar, offset, size, val);
}
-static void um_pci_bar_copy_to(void *priv, unsigned int offset,
- const void *buffer, int size)
+static void um_pci_bar_copy_from(void *priv, void *buffer,
+ unsigned int offset, int size)
{
u8 *resptr = priv;
struct um_pci_device *dev = container_of(resptr - *resptr,
struct um_pci_device,
resptr[0]);
- struct virtio_pcidev_msg hdr = {
- .op = VIRTIO_PCIDEV_OP_MMIO_WRITE,
- .bar = *resptr,
- .size = size,
- .addr = offset,
- };
+ u8 bar = *resptr;
- um_pci_send_cmd(dev, &hdr, sizeof(hdr), buffer, size, NULL, 0);
+ dev->ops->bar_copy_from(dev, bar, buffer, offset, size);
}
-static void um_pci_bar_write(void *priv, unsigned int offset, int size,
- unsigned long val)
+static void um_pci_bar_copy_to(void *priv, unsigned int offset,
+ const void *buffer, int size)
{
- /* maximum size - we may only use parts of it */
- u8 data[8];
-
- switch (size) {
- case 1:
- data[0] = (u8)val;
- break;
- case 2:
- put_unaligned_le16(val, (void *)data);
- break;
- case 4:
- put_unaligned_le32(val, (void *)data);
- break;
-#ifdef CONFIG_64BIT
- case 8:
- put_unaligned_le64(val, (void *)data);
- break;
-#endif
- default:
- WARN(1, "invalid config space write size %d\n", size);
- return;
- }
+ u8 *resptr = priv;
+ struct um_pci_device *dev = container_of(resptr - *resptr,
+ struct um_pci_device,
+ resptr[0]);
+ u8 bar = *resptr;
- um_pci_bar_copy_to(priv, offset, data, size);
+ dev->ops->bar_copy_to(dev, bar, offset, buffer, size);
}
static void um_pci_bar_set(void *priv, unsigned int offset, u8 value, int size)
@@ -426,20 +167,9 @@ static void um_pci_bar_set(void *priv, unsigned int offset, u8 value, int size)
struct um_pci_device *dev = container_of(resptr - *resptr,
struct um_pci_device,
resptr[0]);
- struct {
- struct virtio_pcidev_msg hdr;
- u8 data;
- } msg = {
- .hdr = {
- .op = VIRTIO_PCIDEV_OP_CFG_WRITE,
- .bar = *resptr,
- .size = size,
- .addr = offset,
- },
- .data = value,
- };
+ u8 bar = *resptr;
- um_pci_send_cmd(dev, &msg.hdr, sizeof(msg), NULL, 0, NULL, 0);
+ dev->ops->bar_set(dev, bar, offset, value, size);
}
static const struct logic_iomem_ops um_pci_device_bar_ops = {
@@ -486,76 +216,6 @@ static void um_pci_rescan(void)
pci_unlock_rescan_remove();
}
-static void um_pci_irq_vq_addbuf(struct virtqueue *vq, void *buf, bool kick)
-{
- struct scatterlist sg[1];
-
- sg_init_one(sg, buf, MAX_IRQ_MSG_SIZE);
- if (virtqueue_add_inbuf(vq, sg, 1, buf, GFP_ATOMIC))
- kfree(buf);
- else if (kick)
- virtqueue_kick(vq);
-}
-
-static void um_pci_handle_irq_message(struct virtqueue *vq,
- struct virtio_pcidev_msg *msg)
-{
- struct virtio_device *vdev = vq->vdev;
- struct um_pci_device *dev = vdev->priv;
-
- if (!dev->irq)
- return;
-
- /* we should properly chain interrupts, but on ARCH=um we don't care */
-
- switch (msg->op) {
- case VIRTIO_PCIDEV_OP_INT:
- generic_handle_irq(dev->irq);
- break;
- case VIRTIO_PCIDEV_OP_MSI:
- /* our MSI message is just the interrupt number */
- if (msg->size == sizeof(u32))
- generic_handle_irq(le32_to_cpup((void *)msg->data));
- else
- generic_handle_irq(le16_to_cpup((void *)msg->data));
- break;
- case VIRTIO_PCIDEV_OP_PME:
- /* nothing to do - we already woke up due to the message */
- break;
- default:
- dev_err(&vdev->dev, "unexpected virt-pci message %d\n", msg->op);
- break;
- }
-}
-
-static void um_pci_cmd_vq_cb(struct virtqueue *vq)
-{
- struct virtio_device *vdev = vq->vdev;
- struct um_pci_device *dev = vdev->priv;
- void *cmd;
- int len;
-
- if (test_bit(UM_PCI_STAT_WAITING, &dev->status))
- return;
-
- while ((cmd = virtqueue_get_buf(vq, &len)))
- um_pci_free_buf(dev, cmd);
-}
-
-static void um_pci_irq_vq_cb(struct virtqueue *vq)
-{
- struct virtio_pcidev_msg *msg;
- int len;
-
- while ((msg = virtqueue_get_buf(vq, &len))) {
- if (len >= sizeof(*msg))
- um_pci_handle_irq_message(vq, msg);
-
- /* recycle the message buffer */
- um_pci_irq_vq_addbuf(vq, msg, true);
- }
-}
-
#ifdef CONFIG_OF
/* Copied from arch/x86/kernel/devicetree.c */
struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
@@ -577,200 +237,6 @@ struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
}
#endif
-static int um_pci_init_vqs(struct um_pci_device *dev)
-{
- struct virtqueue_info vqs_info[] = {
- { "cmd", um_pci_cmd_vq_cb },
- { "irq", um_pci_irq_vq_cb },
- };
- struct virtqueue *vqs[2];
- int err, i;
-
- err = virtio_find_vqs(dev->vdev, 2, vqs, vqs_info, NULL);
- if (err)
- return err;
-
- dev->cmd_vq = vqs[0];
- dev->irq_vq = vqs[1];
-
- virtio_device_ready(dev->vdev);
-
- for (i = 0; i < NUM_IRQ_MSGS; i++) {
- void *msg = kzalloc(MAX_IRQ_MSG_SIZE, GFP_KERNEL);
-
- if (msg)
- um_pci_irq_vq_addbuf(dev->irq_vq, msg, false);
- }
-
- virtqueue_kick(dev->irq_vq);
-
- return 0;
-}
-
-static void __um_pci_virtio_platform_remove(struct virtio_device *vdev,
- struct um_pci_device *dev)
-{
- virtio_reset_device(vdev);
- vdev->config->del_vqs(vdev);
-
- mutex_lock(&um_pci_mtx);
- um_pci_platform_device = NULL;
- mutex_unlock(&um_pci_mtx);
-
- kfree(dev);
-}
-
-static int um_pci_virtio_platform_probe(struct virtio_device *vdev,
- struct um_pci_device *dev)
-{
- int ret;
-
- dev->platform = true;
-
- mutex_lock(&um_pci_mtx);
-
- if (um_pci_platform_device) {
- mutex_unlock(&um_pci_mtx);
- ret = -EBUSY;
- goto out_free;
- }
-
- ret = um_pci_init_vqs(dev);
- if (ret) {
- mutex_unlock(&um_pci_mtx);
- goto out_free;
- }
-
- um_pci_platform_device = dev;
-
- mutex_unlock(&um_pci_mtx);
-
- ret = of_platform_default_populate(vdev->dev.of_node, NULL, &vdev->dev);
- if (ret)
- __um_pci_virtio_platform_remove(vdev, dev);
-
- return ret;
-
-out_free:
- kfree(dev);
- return ret;
-}
-
-static int um_pci_virtio_probe(struct virtio_device *vdev)
-{
- struct um_pci_device *dev;
- int i, free = -1;
- int err = -ENOSPC;
-
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
-
- dev->vdev = vdev;
- vdev->priv = dev;
-
- if (of_device_is_compatible(vdev->dev.of_node, "simple-bus"))
- return um_pci_virtio_platform_probe(vdev, dev);
-
- mutex_lock(&um_pci_mtx);
- for (i = 0; i < MAX_DEVICES; i++) {
- if (um_pci_devices[i].dev)
- continue;
- free = i;
- break;
- }
-
- if (free < 0)
- goto error;
-
- err = um_pci_init_vqs(dev);
- if (err)
- goto error;
-
- dev->irq = irq_alloc_desc(numa_node_id());
- if (dev->irq < 0) {
- err = dev->irq;
- goto err_reset;
- }
- um_pci_devices[free].dev = dev;
- vdev->priv = dev;
-
- mutex_unlock(&um_pci_mtx);
-
- device_set_wakeup_enable(&vdev->dev, true);
-
- /*
- * In order to do suspend-resume properly, don't allow VQs
- * to be suspended.
- */
- virtio_uml_set_no_vq_suspend(vdev, true);
-
- um_pci_rescan();
- return 0;
-err_reset:
- virtio_reset_device(vdev);
- vdev->config->del_vqs(vdev);
-error:
- mutex_unlock(&um_pci_mtx);
- kfree(dev);
- return err;
-}
-
-static void um_pci_virtio_remove(struct virtio_device *vdev)
-{
- struct um_pci_device *dev = vdev->priv;
- int i;
-
- if (dev->platform) {
- of_platform_depopulate(&vdev->dev);
- __um_pci_virtio_platform_remove(vdev, dev);
- return;
- }
-
- device_set_wakeup_enable(&vdev->dev, false);
-
- mutex_lock(&um_pci_mtx);
- for (i = 0; i < MAX_DEVICES; i++) {
- if (um_pci_devices[i].dev != dev)
- continue;
-
- um_pci_devices[i].dev = NULL;
- irq_free_desc(dev->irq);
-
- break;
- }
- mutex_unlock(&um_pci_mtx);
-
- if (i < MAX_DEVICES) {
- struct pci_dev *pci_dev;
-
- pci_dev = pci_get_slot(bridge->bus, i);
- if (pci_dev)
- pci_stop_and_remove_bus_device_locked(pci_dev);
- }
-
- /* Stop all virtqueues */
- virtio_reset_device(vdev);
- dev->cmd_vq = NULL;
- dev->irq_vq = NULL;
- vdev->config->del_vqs(vdev);
-
- kfree(dev);
-}
-
-static struct virtio_device_id id_table[] = {
- { CONFIG_UML_PCI_OVER_VIRTIO_DEVICE_ID, VIRTIO_DEV_ANY_ID },
- { 0 },
-};
-MODULE_DEVICE_TABLE(virtio, id_table);
-
-static struct virtio_driver um_pci_virtio_driver = {
- .driver.name = "virtio-pci",
- .id_table = id_table,
- .probe = um_pci_virtio_probe,
- .remove = um_pci_virtio_remove,
-};
-
static struct resource virt_cfgspace_resource = {
.name = "PCI config space",
.start = 0xf0000000 - MAX_DEVICES * CFG_SPACE_SIZE,
@@ -889,7 +355,7 @@ static void um_pci_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
}
static struct irq_chip um_pci_msi_bottom_irq_chip = {
- .name = "UM virtio MSI",
+ .name = "UM virtual MSI",
.irq_compose_msi_msg = um_pci_compose_msi_msg,
};
@@ -939,7 +405,7 @@ static const struct irq_domain_ops um_pci_inner_domain_ops = {
};
static struct irq_chip um_pci_msi_irq_chip = {
- .name = "UM virtio PCIe MSI",
+ .name = "UM virtual PCIe MSI",
.irq_mask = pci_msi_mask_irq,
.irq_unmask = pci_msi_unmask_irq,
};
@@ -998,6 +464,78 @@ static struct resource virt_platform_resource = {
.flags = IORESOURCE_MEM,
};
+int um_pci_device_register(struct um_pci_device *dev)
+{
+ int i, free = -1;
+ int err = 0;
+
+ mutex_lock(&um_pci_mtx);
+ for (i = 0; i < MAX_DEVICES; i++) {
+ if (um_pci_devices[i].dev)
+ continue;
+ free = i;
+ break;
+ }
+
+ if (free < 0) {
+ err = -ENOSPC;
+ goto out;
+ }
+
+ dev->irq = irq_alloc_desc(numa_node_id());
+ if (dev->irq < 0) {
+ err = dev->irq;
+ goto out;
+ }
+
+ um_pci_devices[free].dev = dev;
+
+out:
+ mutex_unlock(&um_pci_mtx);
+ if (!err)
+ um_pci_rescan();
+ return err;
+}
+
+void um_pci_device_unregister(struct um_pci_device *dev)
+{
+ int i;
+
+ mutex_lock(&um_pci_mtx);
+ for (i = 0; i < MAX_DEVICES; i++) {
+ if (um_pci_devices[i].dev != dev)
+ continue;
+ um_pci_devices[i].dev = NULL;
+ irq_free_desc(dev->irq);
+ break;
+ }
+ mutex_unlock(&um_pci_mtx);
+
+ if (i < MAX_DEVICES) {
+ struct pci_dev *pci_dev;
+
+ pci_dev = pci_get_slot(bridge->bus, i);
+ if (pci_dev)
+ pci_stop_and_remove_bus_device_locked(pci_dev);
+ }
+}
+
+int um_pci_platform_device_register(struct um_pci_device *dev)
+{
+ guard(mutex)(&um_pci_mtx);
+ if (um_pci_platform_device)
+ return -EBUSY;
+ um_pci_platform_device = dev;
+ return 0;
+}
+
+void um_pci_platform_device_unregister(struct um_pci_device *dev)
+{
+ guard(mutex)(&um_pci_mtx);
+ if (um_pci_platform_device == dev)
+ um_pci_platform_device = NULL;
+}
+
static int __init um_pci_init(void)
{
struct irq_domain_info inner_domain_info = {
@@ -1014,10 +552,6 @@ static int __init um_pci_init(void)
WARN_ON(logic_iomem_add_region(&virt_platform_resource,
&um_pci_platform_ops));
- if (WARN(CONFIG_UML_PCI_OVER_VIRTIO_DEVICE_ID < 0,
- "No virtio device ID configured for PCI - no PCI support\n"))
- return 0;
-
bridge = pci_alloc_host_bridge(0);
if (!bridge) {
err = -ENOMEM;
@@ -1065,10 +599,8 @@ static int __init um_pci_init(void)
if (err)
goto free;
- err = register_virtio_driver(&um_pci_virtio_driver);
- if (err)
- goto free;
return 0;
+
free:
if (!IS_ERR_OR_NULL(um_pci_inner_domain))
irq_domain_remove(um_pci_inner_domain);
@@ -1080,11 +612,10 @@ free:
}
return err;
}
-module_init(um_pci_init);
+device_initcall(um_pci_init);
static void __exit um_pci_exit(void)
{
- unregister_virtio_driver(&um_pci_virtio_driver);
irq_domain_remove(um_pci_msi_domain);
irq_domain_remove(um_pci_inner_domain);
pci_free_resource_list(&bridge->windows);
diff --git a/arch/um/drivers/virt-pci.h b/arch/um/drivers/virt-pci.h
new file mode 100644
index 000000000000..b20d1475d1eb
--- /dev/null
+++ b/arch/um/drivers/virt-pci.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __UM_VIRT_PCI_H
+#define __UM_VIRT_PCI_H
+
+#include <linux/pci.h>
+
+struct um_pci_device {
+ const struct um_pci_ops *ops;
+
+ /* for now just standard BARs */
+ u8 resptr[PCI_STD_NUM_BARS];
+
+ int irq;
+};
+
+struct um_pci_ops {
+ unsigned long (*cfgspace_read)(struct um_pci_device *dev,
+ unsigned int offset, int size);
+ void (*cfgspace_write)(struct um_pci_device *dev, unsigned int offset,
+ int size, unsigned long val);
+
+ unsigned long (*bar_read)(struct um_pci_device *dev, int bar,
+ unsigned int offset, int size);
+ void (*bar_write)(struct um_pci_device *dev, int bar,
+ unsigned int offset, int size, unsigned long val);
+
+ void (*bar_copy_from)(struct um_pci_device *dev, int bar, void *buffer,
+ unsigned int offset, int size);
+ void (*bar_copy_to)(struct um_pci_device *dev, int bar,
+ unsigned int offset, const void *buffer, int size);
+ void (*bar_set)(struct um_pci_device *dev, int bar,
+ unsigned int offset, u8 value, int size);
+};
+
+int um_pci_device_register(struct um_pci_device *dev);
+void um_pci_device_unregister(struct um_pci_device *dev);
+
+int um_pci_platform_device_register(struct um_pci_device *dev);
+void um_pci_platform_device_unregister(struct um_pci_device *dev);
+
+#endif /* __UM_VIRT_PCI_H */
diff --git a/arch/um/drivers/virtio_pcidev.c b/arch/um/drivers/virtio_pcidev.c
new file mode 100644
index 000000000000..3c4c4c928fdd
--- /dev/null
+++ b/arch/um/drivers/virtio_pcidev.c
@@ -0,0 +1,628 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Intel Corporation
+ * Author: Johannes Berg <johannes@sipsolutions.net>
+ */
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/logic_iomem.h>
+#include <linux/of_platform.h>
+#include <linux/irqdomain.h>
+#include <linux/virtio_pcidev.h>
+#include <linux/virtio-uml.h>
+#include <linux/delay.h>
+#include <linux/msi.h>
+#include <linux/unaligned.h>
+#include <irq_kern.h>
+
+#include "virt-pci.h"
+
+#define to_virtio_pcidev(_pdev) \
+ container_of(_pdev, struct virtio_pcidev_device, pdev)
+
+/* for MSI-X we have a 32-bit payload */
+#define MAX_IRQ_MSG_SIZE (sizeof(struct virtio_pcidev_msg) + sizeof(u32))
+#define NUM_IRQ_MSGS 10
+
+struct virtio_pcidev_message_buffer {
+ struct virtio_pcidev_msg hdr;
+ u8 data[8];
+};
+
+struct virtio_pcidev_device {
+ struct um_pci_device pdev;
+ struct virtio_device *vdev;
+
+ struct virtqueue *cmd_vq, *irq_vq;
+
+#define VIRTIO_PCIDEV_WRITE_BUFS 20
+ struct virtio_pcidev_message_buffer bufs[VIRTIO_PCIDEV_WRITE_BUFS + 1];
+ void *extra_ptrs[VIRTIO_PCIDEV_WRITE_BUFS + 1];
+ DECLARE_BITMAP(used_bufs, VIRTIO_PCIDEV_WRITE_BUFS);
+
+#define UM_PCI_STAT_WAITING 0
+ unsigned long status;
+
+ bool platform;
+};
+
+static unsigned int virtio_pcidev_max_delay_us = 40000;
+module_param_named(max_delay_us, virtio_pcidev_max_delay_us, uint, 0644);
+
+static int virtio_pcidev_get_buf(struct virtio_pcidev_device *dev, bool *posted)
+{
+ int i;
+
+ for (i = 0; i < VIRTIO_PCIDEV_WRITE_BUFS; i++) {
+ if (!test_and_set_bit(i, dev->used_bufs))
+ return i;
+ }
+
+ *posted = false;
+ return VIRTIO_PCIDEV_WRITE_BUFS;
+}
+
+static void virtio_pcidev_free_buf(struct virtio_pcidev_device *dev, void *buf)
+{
+ int i;
+
+ if (buf == &dev->bufs[VIRTIO_PCIDEV_WRITE_BUFS]) {
+ kfree(dev->extra_ptrs[VIRTIO_PCIDEV_WRITE_BUFS]);
+ dev->extra_ptrs[VIRTIO_PCIDEV_WRITE_BUFS] = NULL;
+ return;
+ }
+
+ for (i = 0; i < VIRTIO_PCIDEV_WRITE_BUFS; i++) {
+ if (buf == &dev->bufs[i]) {
+ kfree(dev->extra_ptrs[i]);
+ dev->extra_ptrs[i] = NULL;
+ WARN_ON(!test_and_clear_bit(i, dev->used_bufs));
+ return;
+ }
+ }
+
+ WARN_ON(1);
+}
+
+static int virtio_pcidev_send_cmd(struct virtio_pcidev_device *dev,
+ struct virtio_pcidev_msg *cmd,
+ unsigned int cmd_size,
+ const void *extra, unsigned int extra_size,
+ void *out, unsigned int out_size)
+{
+ struct scatterlist out_sg, extra_sg, in_sg;
+ struct scatterlist *sgs_list[] = {
+ [0] = &out_sg,
+ [1] = extra ? &extra_sg : &in_sg,
+ [2] = extra ? &in_sg : NULL,
+ };
+ struct virtio_pcidev_message_buffer *buf;
+ int delay_count = 0;
+ bool bounce_out;
+ int ret, len;
+ int buf_idx;
+ bool posted;
+
+ if (WARN_ON(cmd_size < sizeof(*cmd) || cmd_size > sizeof(*buf)))
+ return -EINVAL;
+
+ switch (cmd->op) {
+ case VIRTIO_PCIDEV_OP_CFG_WRITE:
+ case VIRTIO_PCIDEV_OP_MMIO_WRITE:
+ case VIRTIO_PCIDEV_OP_MMIO_MEMSET:
+ /* in PCI, writes are posted, so don't wait */
+ posted = !out;
+ WARN_ON(!posted);
+ break;
+ default:
+ posted = false;
+ break;
+ }
+
+ bounce_out = !posted && cmd_size <= sizeof(*cmd) &&
+ out && out_size <= sizeof(buf->data);
+
+ buf_idx = virtio_pcidev_get_buf(dev, &posted);
+ buf = &dev->bufs[buf_idx];
+ memcpy(buf, cmd, cmd_size);
+
+ if (posted && extra && extra_size > sizeof(buf) - cmd_size) {
+ dev->extra_ptrs[buf_idx] = kmemdup(extra, extra_size,
+ GFP_ATOMIC);
+
+ if (!dev->extra_ptrs[buf_idx]) {
+ virtio_pcidev_free_buf(dev, buf);
+ return -ENOMEM;
+ }
+ extra = dev->extra_ptrs[buf_idx];
+ } else if (extra && extra_size <= sizeof(buf) - cmd_size) {
+ memcpy((u8 *)buf + cmd_size, extra, extra_size);
+ cmd_size += extra_size;
+ extra_size = 0;
+ extra = NULL;
+ cmd = (void *)buf;
+ } else {
+ cmd = (void *)buf;
+ }
+
+ sg_init_one(&out_sg, cmd, cmd_size);
+ if (extra)
+ sg_init_one(&extra_sg, extra, extra_size);
+ /* allow stack for small buffers */
+ if (bounce_out)
+ sg_init_one(&in_sg, buf->data, out_size);
+ else if (out)
+ sg_init_one(&in_sg, out, out_size);
+
+ /* add to internal virtio queue */
+ ret = virtqueue_add_sgs(dev->cmd_vq, sgs_list,
+ extra ? 2 : 1,
+ out ? 1 : 0,
+ cmd, GFP_ATOMIC);
+ if (ret) {
+ virtio_pcidev_free_buf(dev, buf);
+ return ret;
+ }
+
+ if (posted) {
+ virtqueue_kick(dev->cmd_vq);
+ return 0;
+ }
+
+ /* kick and poll for getting a response on the queue */
+ set_bit(UM_PCI_STAT_WAITING, &dev->status);
+ virtqueue_kick(dev->cmd_vq);
+ ret = 0;
+
+ while (1) {
+ void *completed = virtqueue_get_buf(dev->cmd_vq, &len);
+
+ if (completed == buf)
+ break;
+
+ if (completed)
+ virtio_pcidev_free_buf(dev, completed);
+
+ if (WARN_ONCE(virtqueue_is_broken(dev->cmd_vq) ||
+ ++delay_count > virtio_pcidev_max_delay_us,
+ "um virt-pci delay: %d", delay_count)) {
+ ret = -EIO;
+ break;
+ }
+ udelay(1);
+ }
+ clear_bit(UM_PCI_STAT_WAITING, &dev->status);
+
+ if (bounce_out)
+ memcpy(out, buf->data, out_size);
+
+ virtio_pcidev_free_buf(dev, buf);
+
+ return ret;
+}
+
+static unsigned long virtio_pcidev_cfgspace_read(struct um_pci_device *pdev,
+ unsigned int offset, int size)
+{
+ struct virtio_pcidev_device *dev = to_virtio_pcidev(pdev);
+ struct virtio_pcidev_msg hdr = {
+ .op = VIRTIO_PCIDEV_OP_CFG_READ,
+ .size = size,
+ .addr = offset,
+ };
+ /* max 8, we might not use it all */
+ u8 data[8];
+
+ memset(data, 0xff, sizeof(data));
+
+ /* size has been checked in um_pci_cfgspace_read() */
+ if (virtio_pcidev_send_cmd(dev, &hdr, sizeof(hdr), NULL, 0, data, size))
+ return ULONG_MAX;
+
+ switch (size) {
+ case 1:
+ return data[0];
+ case 2:
+ return le16_to_cpup((void *)data);
+ case 4:
+ return le32_to_cpup((void *)data);
+#ifdef CONFIG_64BIT
+ case 8:
+ return le64_to_cpup((void *)data);
+#endif
+ default:
+ return ULONG_MAX;
+ }
+}
+
+static void virtio_pcidev_cfgspace_write(struct um_pci_device *pdev,
+ unsigned int offset, int size,
+ unsigned long val)
+{
+ struct virtio_pcidev_device *dev = to_virtio_pcidev(pdev);
+ struct {
+ struct virtio_pcidev_msg hdr;
+ /* maximum size - we may only use parts of it */
+ u8 data[8];
+ } msg = {
+ .hdr = {
+ .op = VIRTIO_PCIDEV_OP_CFG_WRITE,
+ .size = size,
+ .addr = offset,
+ },
+ };
+
+ /* size has been checked in um_pci_cfgspace_write() */
+ switch (size) {
+ case 1:
+ msg.data[0] = (u8)val;
+ break;
+ case 2:
+ put_unaligned_le16(val, (void *)msg.data);
+ break;
+ case 4:
+ put_unaligned_le32(val, (void *)msg.data);
+ break;
+#ifdef CONFIG_64BIT
+ case 8:
+ put_unaligned_le64(val, (void *)msg.data);
+ break;
+#endif
+ }
+
+ WARN_ON(virtio_pcidev_send_cmd(dev, &msg.hdr, sizeof(msg), NULL, 0, NULL, 0));
+}
+
+static void virtio_pcidev_bar_copy_from(struct um_pci_device *pdev,
+ int bar, void *buffer,
+ unsigned int offset, int size)
+{
+ struct virtio_pcidev_device *dev = to_virtio_pcidev(pdev);
+ struct virtio_pcidev_msg hdr = {
+ .op = VIRTIO_PCIDEV_OP_MMIO_READ,
+ .bar = bar,
+ .size = size,
+ .addr = offset,
+ };
+
+ memset(buffer, 0xff, size);
+
+ virtio_pcidev_send_cmd(dev, &hdr, sizeof(hdr), NULL, 0, buffer, size);
+}
+
+static unsigned long virtio_pcidev_bar_read(struct um_pci_device *pdev, int bar,
+ unsigned int offset, int size)
+{
+ /* 8 is maximum size - we may only use parts of it */
+ u8 data[8];
+
+ /* size has been checked in um_pci_bar_read() */
+ virtio_pcidev_bar_copy_from(pdev, bar, data, offset, size);
+
+ switch (size) {
+ case 1:
+ return data[0];
+ case 2:
+ return le16_to_cpup((void *)data);
+ case 4:
+ return le32_to_cpup((void *)data);
+#ifdef CONFIG_64BIT
+ case 8:
+ return le64_to_cpup((void *)data);
+#endif
+ default:
+ return ULONG_MAX;
+ }
+}
+
+static void virtio_pcidev_bar_copy_to(struct um_pci_device *pdev,
+ int bar, unsigned int offset,
+ const void *buffer, int size)
+{
+ struct virtio_pcidev_device *dev = to_virtio_pcidev(pdev);
+ struct virtio_pcidev_msg hdr = {
+ .op = VIRTIO_PCIDEV_OP_MMIO_WRITE,
+ .bar = bar,
+ .size = size,
+ .addr = offset,
+ };
+
+ virtio_pcidev_send_cmd(dev, &hdr, sizeof(hdr), buffer, size, NULL, 0);
+}
+
+static void virtio_pcidev_bar_write(struct um_pci_device *pdev, int bar,
+ unsigned int offset, int size,
+ unsigned long val)
+{
+ /* maximum size - we may only use parts of it */
+ u8 data[8];
+
+ /* size has been checked in um_pci_bar_write() */
+ switch (size) {
+ case 1:
+ data[0] = (u8)val;
+ break;
+ case 2:
+ put_unaligned_le16(val, (void *)data);
+ break;
+ case 4:
+ put_unaligned_le32(val, (void *)data);
+ break;
+#ifdef CONFIG_64BIT
+ case 8:
+ put_unaligned_le64(val, (void *)data);
+ break;
+#endif
+ }
+
+ virtio_pcidev_bar_copy_to(pdev, bar, offset, data, size);
+}
+
+static void virtio_pcidev_bar_set(struct um_pci_device *pdev, int bar,
+ unsigned int offset, u8 value, int size)
+{
+ struct virtio_pcidev_device *dev = to_virtio_pcidev(pdev);
+ struct {
+ struct virtio_pcidev_msg hdr;
+ u8 data;
+ } msg = {
+ .hdr = {
+ .op = VIRTIO_PCIDEV_OP_CFG_WRITE,
+ .bar = bar,
+ .size = size,
+ .addr = offset,
+ },
+ .data = value,
+ };
+
+ virtio_pcidev_send_cmd(dev, &msg.hdr, sizeof(msg), NULL, 0, NULL, 0);
+}
+
+static const struct um_pci_ops virtio_pcidev_um_pci_ops = {
+ .cfgspace_read = virtio_pcidev_cfgspace_read,
+ .cfgspace_write = virtio_pcidev_cfgspace_write,
+ .bar_read = virtio_pcidev_bar_read,
+ .bar_write = virtio_pcidev_bar_write,
+ .bar_copy_from = virtio_pcidev_bar_copy_from,
+ .bar_copy_to = virtio_pcidev_bar_copy_to,
+ .bar_set = virtio_pcidev_bar_set,
+};
+
+static void virtio_pcidev_irq_vq_addbuf(struct virtqueue *vq, void *buf, bool kick)
+{
+ struct scatterlist sg[1];
+
+ sg_init_one(sg, buf, MAX_IRQ_MSG_SIZE);
+ if (virtqueue_add_inbuf(vq, sg, 1, buf, GFP_ATOMIC))
+ kfree(buf);
+ else if (kick)
+ virtqueue_kick(vq);
+}
+
+static void virtio_pcidev_handle_irq_message(struct virtqueue *vq,
+ struct virtio_pcidev_msg *msg)
+{
+ struct virtio_device *vdev = vq->vdev;
+ struct virtio_pcidev_device *dev = vdev->priv;
+
+ if (!dev->pdev.irq)
+ return;
+
+ /* we should properly chain interrupts, but on ARCH=um we don't care */
+
+ switch (msg->op) {
+ case VIRTIO_PCIDEV_OP_INT:
+ generic_handle_irq(dev->pdev.irq);
+ break;
+ case VIRTIO_PCIDEV_OP_MSI:
+ /* our MSI message is just the interrupt number */
+ if (msg->size == sizeof(u32))
+ generic_handle_irq(le32_to_cpup((void *)msg->data));
+ else
+ generic_handle_irq(le16_to_cpup((void *)msg->data));
+ break;
+ case VIRTIO_PCIDEV_OP_PME:
+ /* nothing to do - we already woke up due to the message */
+ break;
+ default:
+ dev_err(&vdev->dev, "unexpected virt-pci message %d\n", msg->op);
+ break;
+ }
+}
+
+static void virtio_pcidev_cmd_vq_cb(struct virtqueue *vq)
+{
+ struct virtio_device *vdev = vq->vdev;
+ struct virtio_pcidev_device *dev = vdev->priv;
+ void *cmd;
+ int len;
+
+ if (test_bit(UM_PCI_STAT_WAITING, &dev->status))
+ return;
+
+ while ((cmd = virtqueue_get_buf(vq, &len)))
+ virtio_pcidev_free_buf(dev, cmd);
+}
+
+static void virtio_pcidev_irq_vq_cb(struct virtqueue *vq)
+{
+ struct virtio_pcidev_msg *msg;
+ int len;
+
+ while ((msg = virtqueue_get_buf(vq, &len))) {
+ if (len >= sizeof(*msg))
+ virtio_pcidev_handle_irq_message(vq, msg);
+
+ /* recycle the message buffer */
+ virtio_pcidev_irq_vq_addbuf(vq, msg, true);
+ }
+}
+
+static int virtio_pcidev_init_vqs(struct virtio_pcidev_device *dev)
+{
+ struct virtqueue_info vqs_info[] = {
+ { "cmd", virtio_pcidev_cmd_vq_cb },
+ { "irq", virtio_pcidev_irq_vq_cb },
+ };
+ struct virtqueue *vqs[2];
+ int err, i;
+
+ err = virtio_find_vqs(dev->vdev, 2, vqs, vqs_info, NULL);
+ if (err)
+ return err;
+
+ dev->cmd_vq = vqs[0];
+ dev->irq_vq = vqs[1];
+
+ virtio_device_ready(dev->vdev);
+
+ for (i = 0; i < NUM_IRQ_MSGS; i++) {
+ void *msg = kzalloc(MAX_IRQ_MSG_SIZE, GFP_KERNEL);
+
+ if (msg)
+ virtio_pcidev_irq_vq_addbuf(dev->irq_vq, msg, false);
+ }
+
+ virtqueue_kick(dev->irq_vq);
+
+ return 0;
+}
+
+static void __virtio_pcidev_virtio_platform_remove(struct virtio_device *vdev,
+ struct virtio_pcidev_device *dev)
+{
+ um_pci_platform_device_unregister(&dev->pdev);
+
+ virtio_reset_device(vdev);
+ vdev->config->del_vqs(vdev);
+
+ kfree(dev);
+}
+
+static int virtio_pcidev_virtio_platform_probe(struct virtio_device *vdev,
+ struct virtio_pcidev_device *dev)
+{
+ int err;
+
+ dev->platform = true;
+
+ err = virtio_pcidev_init_vqs(dev);
+ if (err)
+ goto err_free;
+
+ err = um_pci_platform_device_register(&dev->pdev);
+ if (err)
+ goto err_reset;
+
+ err = of_platform_default_populate(vdev->dev.of_node, NULL, &vdev->dev);
+ if (err)
+ goto err_unregister;
+
+ return 0;
+
+err_unregister:
+ um_pci_platform_device_unregister(&dev->pdev);
+err_reset:
+ virtio_reset_device(vdev);
+ vdev->config->del_vqs(vdev);
+err_free:
+ kfree(dev);
+ return err;
+}
+
+static int virtio_pcidev_virtio_probe(struct virtio_device *vdev)
+{
+ struct virtio_pcidev_device *dev;
+ int err;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->vdev = vdev;
+ vdev->priv = dev;
+
+ dev->pdev.ops = &virtio_pcidev_um_pci_ops;
+
+ if (of_device_is_compatible(vdev->dev.of_node, "simple-bus"))
+ return virtio_pcidev_virtio_platform_probe(vdev, dev);
+
+ err = virtio_pcidev_init_vqs(dev);
+ if (err)
+ goto err_free;
+
+ err = um_pci_device_register(&dev->pdev);
+ if (err)
+ goto err_reset;
+
+ device_set_wakeup_enable(&vdev->dev, true);
+
+ /*
+ * In order to do suspend-resume properly, don't allow VQs
+ * to be suspended.
+ */
+ virtio_uml_set_no_vq_suspend(vdev, true);
+
+ return 0;
+
+err_reset:
+ virtio_reset_device(vdev);
+ vdev->config->del_vqs(vdev);
+err_free:
+ kfree(dev);
+ return err;
+}
+
+static void virtio_pcidev_virtio_remove(struct virtio_device *vdev)
+{
+ struct virtio_pcidev_device *dev = vdev->priv;
+
+ if (dev->platform) {
+ of_platform_depopulate(&vdev->dev);
+ __virtio_pcidev_virtio_platform_remove(vdev, dev);
+ return;
+ }
+
+ device_set_wakeup_enable(&vdev->dev, false);
+
+ um_pci_device_unregister(&dev->pdev);
+
+ /* Stop all virtqueues */
+ virtio_reset_device(vdev);
+ dev->cmd_vq = NULL;
+ dev->irq_vq = NULL;
+ vdev->config->del_vqs(vdev);
+
+ kfree(dev);
+}
+
+static struct virtio_device_id id_table[] = {
+ { CONFIG_UML_PCI_OVER_VIRTIO_DEVICE_ID, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+MODULE_DEVICE_TABLE(virtio, id_table);
+
+static struct virtio_driver virtio_pcidev_virtio_driver = {
+ .driver.name = "virtio-pci",
+ .id_table = id_table,
+ .probe = virtio_pcidev_virtio_probe,
+ .remove = virtio_pcidev_virtio_remove,
+};
+
+static int __init virtio_pcidev_init(void)
+{
+ if (WARN(CONFIG_UML_PCI_OVER_VIRTIO_DEVICE_ID < 0,
+ "No virtio device ID configured for PCI - no PCI support\n"))
+ return 0;
+
+ return register_virtio_driver(&virtio_pcidev_virtio_driver);
+}
+late_initcall(virtio_pcidev_init);
+
+static void __exit virtio_pcidev_exit(void)
+{
+ unregister_virtio_driver(&virtio_pcidev_virtio_driver);
+}
+module_exit(virtio_pcidev_exit);
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild
index 428f2c5158c2..04ab3b653a48 100644
--- a/arch/um/include/asm/Kbuild
+++ b/arch/um/include/asm/Kbuild
@@ -13,6 +13,7 @@ generic-y += irq_work.h
generic-y += kdebug.h
generic-y += mcs_spinlock.h
generic-y += mmiowb.h
+generic-y += module.h
generic-y += module.lds.h
generic-y += param.h
generic-y += parport.h
diff --git a/arch/um/include/asm/pgalloc.h b/arch/um/include/asm/pgalloc.h
index f0af23c3aeb2..826ec44b58cd 100644
--- a/arch/um/include/asm/pgalloc.h
+++ b/arch/um/include/asm/pgalloc.h
@@ -25,27 +25,18 @@
*/
extern pgd_t *pgd_alloc(struct mm_struct *);
-#define __pte_free_tlb(tlb, pte, address) \
-do { \
- pagetable_dtor(page_ptdesc(pte)); \
- tlb_remove_page_ptdesc((tlb), (page_ptdesc(pte))); \
-} while (0)
+#define __pte_free_tlb(tlb, pte, address) \
+ tlb_remove_ptdesc((tlb), page_ptdesc(pte))
#if CONFIG_PGTABLE_LEVELS > 2
-#define __pmd_free_tlb(tlb, pmd, address) \
-do { \
- pagetable_dtor(virt_to_ptdesc(pmd)); \
- tlb_remove_page_ptdesc((tlb), virt_to_ptdesc(pmd)); \
-} while (0)
+#define __pmd_free_tlb(tlb, pmd, address) \
+ tlb_remove_ptdesc((tlb), virt_to_ptdesc(pmd))
#if CONFIG_PGTABLE_LEVELS > 3
-#define __pud_free_tlb(tlb, pud, address) \
-do { \
- pagetable_dtor(virt_to_ptdesc(pud)); \
- tlb_remove_page_ptdesc((tlb), virt_to_ptdesc(pud)); \
-} while (0)
+#define __pud_free_tlb(tlb, pud, address) \
+ tlb_remove_ptdesc((tlb), virt_to_ptdesc(pud))
#endif
#endif
diff --git a/arch/um/include/asm/processor-generic.h b/arch/um/include/asm/processor-generic.h
index 5d6356eafffe..8a789c17acd8 100644
--- a/arch/um/include/asm/processor-generic.h
+++ b/arch/um/include/asm/processor-generic.h
@@ -31,6 +31,8 @@ struct thread_struct {
} thread;
} request;
+ void *segv_continue;
+
/* Contains variable sized FP registers */
struct pt_regs regs;
};
diff --git a/arch/um/include/asm/uaccess.h b/arch/um/include/asm/uaccess.h
index 1d4b6bbc1b65..3a08f9029a3f 100644
--- a/arch/um/include/asm/uaccess.h
+++ b/arch/um/include/asm/uaccess.h
@@ -9,6 +9,7 @@
#include <asm/elf.h>
#include <linux/unaligned.h>
+#include <sysdep/faultinfo.h>
#define __under_task_size(addr, size) \
(((unsigned long) (addr) < TASK_SIZE) && \
@@ -44,19 +45,28 @@ static inline int __access_ok(const void __user *ptr, unsigned long size)
__access_ok_vsyscall(addr, size));
}
-/* no pagefaults for kernel addresses in um */
#define __get_kernel_nofault(dst, src, type, err_label) \
do { \
- *((type *)dst) = get_unaligned((type *)(src)); \
- if (0) /* make sure the label looks used to the compiler */ \
+ int __faulted; \
+ \
+ ___backtrack_faulted(__faulted); \
+ if (__faulted) { \
+ *((type *)dst) = (type) 0; \
goto err_label; \
+ } \
+ *((type *)dst) = get_unaligned((type *)(src)); \
+ current->thread.segv_continue = NULL; \
} while (0)
#define __put_kernel_nofault(dst, src, type, err_label) \
do { \
- put_unaligned(*((type *)src), (type *)(dst)); \
- if (0) /* make sure the label looks used to the compiler */ \
+ int __faulted; \
+ \
+ ___backtrack_faulted(__faulted); \
+ if (__faulted) \
goto err_label; \
+ put_unaligned(*((type *)src), (type *)(dst)); \
+ current->thread.segv_continue = NULL; \
} while (0)
#endif
diff --git a/arch/um/include/linux/time-internal.h b/arch/um/include/linux/time-internal.h
index b22226634ff6..138908b999d7 100644
--- a/arch/um/include/linux/time-internal.h
+++ b/arch/um/include/linux/time-internal.h
@@ -83,6 +83,8 @@ extern void time_travel_not_configured(void);
#define time_travel_del_event(...) time_travel_not_configured()
#endif /* CONFIG_UML_TIME_TRAVEL_SUPPORT */
+extern unsigned long tt_extra_sched_jiffies;
+
/*
* Without CONFIG_UML_TIME_TRAVEL_SUPPORT this is a linker error if used,
* which is intentional since we really shouldn't link it in that case.
diff --git a/arch/um/include/shared/arch.h b/arch/um/include/shared/arch.h
index 880ee42a3329..cc398a21ad96 100644
--- a/arch/um/include/shared/arch.h
+++ b/arch/um/include/shared/arch.h
@@ -12,4 +12,6 @@ extern void arch_check_bugs(void);
extern int arch_fixup(unsigned long address, struct uml_pt_regs *regs);
extern void arch_examine_signal(int sig, struct uml_pt_regs *regs);
+void mc_set_rip(void *_mc, void *target);
+
#endif
diff --git a/arch/um/include/shared/as-layout.h b/arch/um/include/shared/as-layout.h
index ea65f151bf48..4f44dcce8a7c 100644
--- a/arch/um/include/shared/as-layout.h
+++ b/arch/um/include/shared/as-layout.h
@@ -50,7 +50,7 @@ extern int linux_main(int argc, char **argv, char **envp);
extern void uml_finishsetup(void);
struct siginfo;
-extern void (*sig_info[])(int, struct siginfo *si, struct uml_pt_regs *);
+extern void (*sig_info[])(int, struct siginfo *si, struct uml_pt_regs *, void *);
#endif
diff --git a/arch/um/include/shared/irq_user.h b/arch/um/include/shared/irq_user.h
index da0f6eea30d0..88835b52ae2b 100644
--- a/arch/um/include/shared/irq_user.h
+++ b/arch/um/include/shared/irq_user.h
@@ -15,7 +15,8 @@ enum um_irq_type {
};
struct siginfo;
-extern void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs);
+extern void sigio_handler(int sig, struct siginfo *unused_si,
+ struct uml_pt_regs *regs, void *mc);
void sigio_run_timetravel_handlers(void);
extern void free_irq_by_fd(int fd);
extern void deactivate_fd(int fd, int irqnum);
diff --git a/arch/um/include/shared/kern_util.h b/arch/um/include/shared/kern_util.h
index f21dc8517538..00ca3e12fd9a 100644
--- a/arch/um/include/shared/kern_util.h
+++ b/arch/um/include/shared/kern_util.h
@@ -24,10 +24,12 @@ extern void free_stack(unsigned long stack, int order);
struct pt_regs;
extern void do_signal(struct pt_regs *regs);
extern void interrupt_end(void);
-extern void relay_signal(int sig, struct siginfo *si, struct uml_pt_regs *regs);
+extern void relay_signal(int sig, struct siginfo *si, struct uml_pt_regs *regs,
+ void *mc);
extern unsigned long segv(struct faultinfo fi, unsigned long ip,
- int is_user, struct uml_pt_regs *regs);
+ int is_user, struct uml_pt_regs *regs,
+ void *mc);
extern int handle_page_fault(unsigned long address, unsigned long ip,
int is_write, int is_user, int *code_out);
@@ -59,8 +61,10 @@ extern unsigned long from_irq_stack(int nested);
extern int singlestepping(void);
-extern void segv_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs);
-extern void winch(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs);
+extern void segv_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs,
+ void *mc);
+extern void winch(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs,
+ void *mc);
extern void fatal_sigsegv(void) __attribute__ ((noreturn));
void um_idle_sleep(void);
diff --git a/arch/um/include/shared/os.h b/arch/um/include/shared/os.h
index 5babad8c5f75..152a60080d5b 100644
--- a/arch/um/include/shared/os.h
+++ b/arch/um/include/shared/os.h
@@ -213,7 +213,6 @@ extern int os_protect_memory(void *addr, unsigned long len,
extern int os_unmap_memory(void *addr, int len);
extern int os_drop_memory(void *addr, int length);
extern int can_drop_memory(void);
-extern int os_mincore(void *addr, unsigned long len);
void os_set_pdeathsig(void);
@@ -225,6 +224,11 @@ extern int run_helper_thread(int (*proc)(void *), void *arg,
unsigned int flags, unsigned long *stack_out);
extern int helper_wait(int pid);
+struct os_helper_thread;
+int os_run_helper_thread(struct os_helper_thread **td_out,
+ void *(*routine)(void *), void *arg);
+void os_kill_helper_thread(struct os_helper_thread *td);
+void os_fix_helper_thread_signals(void);
/* umid.c */
extern int umid_file_name(char *name, char *buf, int len);
@@ -310,7 +314,7 @@ extern void um_irqs_resume(void);
extern int add_sigio_fd(int fd);
extern int ignore_sigio_fd(int fd);
extern void maybe_sigio_broken(int fd);
-extern void sigio_broken(int fd);
+extern void sigio_broken(void);
/*
* unlocked versions for IRQ controller code.
*
diff --git a/arch/um/include/shared/sigio.h b/arch/um/include/shared/sigio.h
index e60c8b227844..c6c2edce1f6d 100644
--- a/arch/um/include/shared/sigio.h
+++ b/arch/um/include/shared/sigio.h
@@ -6,7 +6,6 @@
#ifndef __SIGIO_H__
#define __SIGIO_H__
-extern int write_sigio_irq(int fd);
extern void sigio_lock(void);
extern void sigio_unlock(void);
diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile
index f8567b933ffa..4df1cd0d2017 100644
--- a/arch/um/kernel/Makefile
+++ b/arch/um/kernel/Makefile
@@ -17,7 +17,7 @@ extra-y := vmlinux.lds
obj-y = config.o exec.o exitcode.o irq.o ksyms.o mem.o \
physmem.o process.o ptrace.o reboot.o sigio.o \
signal.o sysrq.o time.o tlb.o trap.o \
- um_arch.o umid.o maccess.o kmsg_dump.o capflags.o skas/
+ um_arch.o umid.o kmsg_dump.o capflags.o skas/
obj-y += load_file.o
obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index a4991746f5ea..abe8f30a521c 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -236,7 +236,8 @@ static void _sigio_handler(struct uml_pt_regs *regs,
free_irqs();
}
-void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
+void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs,
+ void *mc)
{
preempt_disable();
_sigio_handler(regs, irqs_suspended);
diff --git a/arch/um/kernel/maccess.c b/arch/um/kernel/maccess.c
deleted file mode 100644
index 8ccd56813f68..000000000000
--- a/arch/um/kernel/maccess.c
+++ /dev/null
@@ -1,19 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2013 Richard Weinberger <richrd@nod.at>
- */
-
-#include <linux/uaccess.h>
-#include <linux/kernel.h>
-#include <os.h>
-
-bool copy_from_kernel_nofault_allowed(const void *src, size_t size)
-{
- void *psrc = (void *)rounddown((unsigned long)src, PAGE_SIZE);
-
- if ((unsigned long)src < PAGE_SIZE || size <= 0)
- return false;
- if (os_mincore(psrc, size + src - psrc) <= 0)
- return false;
- return true;
-}
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 379f33a1babf..76bec7de81b5 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -9,6 +9,8 @@
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/slab.h>
+#include <linux/init.h>
+#include <asm/sections.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <as-layout.h>
@@ -66,6 +68,7 @@ void __init arch_mm_preinit(void)
map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
memblock_free((void *)brk_end, uml_reserved - brk_end);
uml_reserved = brk_end;
+ min_low_pfn = PFN_UP(__pa(uml_reserved));
max_pfn = max_low_pfn;
}
@@ -242,3 +245,11 @@ static const pgprot_t protection_map[16] = {
[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED
};
DECLARE_VM_GET_PAGE_PROT
+
+void mark_rodata_ro(void)
+{
+ unsigned long rodata_start = PFN_ALIGN(__start_rodata);
+ unsigned long rodata_end = PFN_ALIGN(__end_rodata);
+
+ os_protect_memory((void *)rodata_start, rodata_end - rodata_start, 1, 0, 0);
+}
diff --git a/arch/um/kernel/sigio.c b/arch/um/kernel/sigio.c
index 5085a50c3b8c..4fc04742048a 100644
--- a/arch/um/kernel/sigio.c
+++ b/arch/um/kernel/sigio.c
@@ -8,32 +8,6 @@
#include <os.h>
#include <sigio.h>
-/* Protected by sigio_lock() called from write_sigio_workaround */
-static int sigio_irq_fd = -1;
-
-static irqreturn_t sigio_interrupt(int irq, void *data)
-{
- char c;
-
- os_read_file(sigio_irq_fd, &c, sizeof(c));
- return IRQ_HANDLED;
-}
-
-int write_sigio_irq(int fd)
-{
- int err;
-
- err = um_request_irq(SIGIO_WRITE_IRQ, fd, IRQ_READ, sigio_interrupt,
- 0, "write sigio", NULL);
- if (err < 0) {
- printk(KERN_ERR "write_sigio_irq : um_request_irq failed, "
- "err = %d\n", err);
- return -1;
- }
- sigio_irq_fd = fd;
- return 0;
-}
-
/* These are called from os-Linux/sigio.c to protect its pollfds arrays. */
static DEFINE_MUTEX(sigio_mutex);
diff --git a/arch/um/kernel/skas/syscall.c b/arch/um/kernel/skas/syscall.c
index b09e85279d2b..a5beaea2967e 100644
--- a/arch/um/kernel/skas/syscall.c
+++ b/arch/um/kernel/skas/syscall.c
@@ -31,6 +31,17 @@ void handle_syscall(struct uml_pt_regs *r)
goto out;
syscall = UPT_SYSCALL_NR(r);
+
+ /*
+ * If no time passes, then sched_yield may not actually yield, causing
+ * broken spinlock implementations in userspace (ASAN) to hang for long
+ * periods of time.
+ */
+ if ((time_travel_mode == TT_MODE_INFCPU ||
+ time_travel_mode == TT_MODE_EXTERNAL) &&
+ syscall == __NR_sched_yield)
+ tt_extra_sched_jiffies += 1;
+
if (syscall >= 0 && syscall < __NR_syscalls) {
unsigned long ret = EXECUTE_SYSCALL(syscall, regs);
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index cdaee3e94273..ce073150dc20 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -16,6 +16,7 @@
#include <kern_util.h>
#include <os.h>
#include <skas.h>
+#include <arch.h>
/*
* Note this is constrained to return 0, -EFAULT, -EACCES, -ENOMEM by
@@ -175,12 +176,14 @@ void fatal_sigsegv(void)
* @sig: the signal number
* @unused_si: the signal info struct; unused in this handler
* @regs: the ptrace register information
+ * @mc: the mcontext of the signal
*
* The handler first extracts the faultinfo from the UML ptrace regs struct.
* If the userfault did not happen in an UML userspace process, bad_segv is called.
* Otherwise the signal did happen in a cloned userspace process, handle it.
*/
-void segv_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
+void segv_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs,
+ void *mc)
{
struct faultinfo * fi = UPT_FAULTINFO(regs);
@@ -189,7 +192,7 @@ void segv_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
bad_segv(*fi, UPT_IP(regs));
return;
}
- segv(*fi, UPT_IP(regs), UPT_IS_USER(regs), regs);
+ segv(*fi, UPT_IP(regs), UPT_IS_USER(regs), regs, mc);
}
/*
@@ -199,7 +202,7 @@ void segv_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
* give us bad data!
*/
unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
- struct uml_pt_regs *regs)
+ struct uml_pt_regs *regs, void *mc)
{
int si_code;
int err;
@@ -223,6 +226,19 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
goto out;
}
else if (current->mm == NULL) {
+ if (current->pagefault_disabled) {
+ if (!mc) {
+ show_regs(container_of(regs, struct pt_regs, regs));
+ panic("Segfault with pagefaults disabled but no mcontext");
+ }
+ if (!current->thread.segv_continue) {
+ show_regs(container_of(regs, struct pt_regs, regs));
+ panic("Segfault without recovery target");
+ }
+ mc_set_rip(mc, current->thread.segv_continue);
+ current->thread.segv_continue = NULL;
+ goto out;
+ }
show_regs(container_of(regs, struct pt_regs, regs));
panic("Segfault with no mm");
}
@@ -274,7 +290,8 @@ out:
return 0;
}
-void relay_signal(int sig, struct siginfo *si, struct uml_pt_regs *regs)
+void relay_signal(int sig, struct siginfo *si, struct uml_pt_regs *regs,
+ void *mc)
{
int code, err;
if (!UPT_IS_USER(regs)) {
@@ -302,7 +319,8 @@ void relay_signal(int sig, struct siginfo *si, struct uml_pt_regs *regs)
}
}
-void winch(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
+void winch(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs,
+ void *mc)
{
do_IRQ(WINCH_IRQ, regs);
}
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index 050279814eac..d4b3b6742ec8 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -12,6 +12,7 @@
#include <linux/panic_notifier.h>
#include <linux/seq_file.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
#include <linux/utsname.h>
#include <linux/sched.h>
#include <linux/sched/task.h>
@@ -78,7 +79,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "model name\t: UML\n");
seq_printf(m, "mode\t\t: skas\n");
seq_printf(m, "host\t\t: %s\n", host_info);
- seq_printf(m, "fpu\t\t: %s\n", cpu_has(&boot_cpu_data, X86_FEATURE_FPU) ? "yes" : "no");
+ seq_printf(m, "fpu\t\t: %s\n", str_yes_no(cpu_has(&boot_cpu_data, X86_FEATURE_FPU)));
seq_printf(m, "flags\t\t:");
for (i = 0; i < 32*NCAPINTS; i++)
if (cpu_has(&boot_cpu_data, i) && (x86_cap_flags[i] != NULL))
diff --git a/arch/um/os-Linux/helper.c b/arch/um/os-Linux/helper.c
index 3cb8ac63be6e..89c2ad2a4e3a 100644
--- a/arch/um/os-Linux/helper.c
+++ b/arch/um/os-Linux/helper.c
@@ -8,6 +8,7 @@
#include <unistd.h>
#include <errno.h>
#include <sched.h>
+#include <pthread.h>
#include <linux/limits.h>
#include <sys/socket.h>
#include <sys/wait.h>
@@ -121,6 +122,10 @@ int run_helper_thread(int (*proc)(void *), void *arg, unsigned int flags,
unsigned long stack, sp;
int pid, status, err;
+ /* To share memory space, use os_run_helper_thread() instead. */
+ if (flags & CLONE_VM)
+ return -EINVAL;
+
stack = alloc_stack(0, __uml_cant_sleep());
if (stack == 0)
return -ENOMEM;
@@ -167,3 +172,65 @@ int helper_wait(int pid)
} else
return 0;
}
+
+struct os_helper_thread {
+ pthread_t handle;
+};
+
+int os_run_helper_thread(struct os_helper_thread **td_out,
+ void *(*routine)(void *), void *arg)
+{
+ struct os_helper_thread *td;
+ sigset_t sigset, oset;
+ int err, flags;
+
+ flags = __uml_cant_sleep() ? UM_GFP_ATOMIC : UM_GFP_KERNEL;
+ td = uml_kmalloc(sizeof(*td), flags);
+ if (!td)
+ return -ENOMEM;
+
+ sigfillset(&sigset);
+ if (sigprocmask(SIG_SETMASK, &sigset, &oset) < 0) {
+ err = -errno;
+ kfree(td);
+ return err;
+ }
+
+ err = pthread_create(&td->handle, NULL, routine, arg);
+
+ if (sigprocmask(SIG_SETMASK, &oset, NULL) < 0)
+ panic("Failed to restore the signal mask: %d", errno);
+
+ if (err != 0)
+ kfree(td);
+ else
+ *td_out = td;
+
+ return -err;
+}
+
+void os_kill_helper_thread(struct os_helper_thread *td)
+{
+ pthread_cancel(td->handle);
+ pthread_join(td->handle, NULL);
+ kfree(td);
+}
+
+void os_fix_helper_thread_signals(void)
+{
+ sigset_t sigset;
+
+ sigemptyset(&sigset);
+
+ sigaddset(&sigset, SIGWINCH);
+ sigaddset(&sigset, SIGPIPE);
+ sigaddset(&sigset, SIGPROF);
+ sigaddset(&sigset, SIGINT);
+ sigaddset(&sigset, SIGTERM);
+ sigaddset(&sigset, SIGCHLD);
+ sigaddset(&sigset, SIGALRM);
+ sigaddset(&sigset, SIGIO);
+ sigaddset(&sigset, SIGUSR1);
+
+ pthread_sigmask(SIG_SETMASK, &sigset, NULL);
+}
diff --git a/arch/um/os-Linux/process.c b/arch/um/os-Linux/process.c
index 9f086f939420..184566edeee9 100644
--- a/arch/um/os-Linux/process.c
+++ b/arch/um/os-Linux/process.c
@@ -142,57 +142,6 @@ out:
return ok;
}
-static int os_page_mincore(void *addr)
-{
- char vec[2];
- int ret;
-
- ret = mincore(addr, UM_KERN_PAGE_SIZE, vec);
- if (ret < 0) {
- if (errno == ENOMEM || errno == EINVAL)
- return 0;
- else
- return -errno;
- }
-
- return vec[0] & 1;
-}
-
-int os_mincore(void *addr, unsigned long len)
-{
- char *vec;
- int ret, i;
-
- if (len <= UM_KERN_PAGE_SIZE)
- return os_page_mincore(addr);
-
- vec = calloc(1, (len + UM_KERN_PAGE_SIZE - 1) / UM_KERN_PAGE_SIZE);
- if (!vec)
- return -ENOMEM;
-
- ret = mincore(addr, UM_KERN_PAGE_SIZE, vec);
- if (ret < 0) {
- if (errno == ENOMEM || errno == EINVAL)
- ret = 0;
- else
- ret = -errno;
-
- goto out;
- }
-
- for (i = 0; i < ((len + UM_KERN_PAGE_SIZE - 1) / UM_KERN_PAGE_SIZE); i++) {
- if (!(vec[i] & 1)) {
- ret = 0;
- goto out;
- }
- }
-
- ret = 1;
-out:
- free(vec);
- return ret;
-}
-
void init_new_thread_signals(void)
{
set_handler(SIGSEGV);
diff --git a/arch/um/os-Linux/sigio.c b/arch/um/os-Linux/sigio.c
index 9aac8def4d63..a05a6ecee756 100644
--- a/arch/um/os-Linux/sigio.c
+++ b/arch/um/os-Linux/sigio.c
@@ -11,6 +11,7 @@
#include <sched.h>
#include <signal.h>
#include <string.h>
+#include <sys/epoll.h>
#include <kern_util.h>
#include <init.h>
#include <os.h>
@@ -21,184 +22,51 @@
* Protected by sigio_lock(), also used by sigio_cleanup, which is an
* exitcall.
*/
-static int write_sigio_pid = -1;
-static unsigned long write_sigio_stack;
+static struct os_helper_thread *write_sigio_td;
-/*
- * These arrays are initialized before the sigio thread is started, and
- * the descriptors closed after it is killed. So, it can't see them change.
- * On the UML side, they are changed under the sigio_lock.
- */
-#define SIGIO_FDS_INIT {-1, -1}
-
-static int write_sigio_fds[2] = SIGIO_FDS_INIT;
-static int sigio_private[2] = SIGIO_FDS_INIT;
+static int epollfd = -1;
-struct pollfds {
- struct pollfd *poll;
- int size;
- int used;
-};
+#define MAX_EPOLL_EVENTS 64
-/*
- * Protected by sigio_lock(). Used by the sigio thread, but the UML thread
- * synchronizes with it.
- */
-static struct pollfds current_poll;
-static struct pollfds next_poll;
-static struct pollfds all_sigio_fds;
+static struct epoll_event epoll_events[MAX_EPOLL_EVENTS];
-static int write_sigio_thread(void *unused)
+static void *write_sigio_thread(void *unused)
{
- struct pollfds *fds, tmp;
- struct pollfd *p;
- int i, n, respond_fd;
- char c;
-
- os_set_pdeathsig();
- os_fix_helper_signals();
- fds = &current_poll;
+ int pid = getpid();
+ int r;
+
+ os_fix_helper_thread_signals();
+
while (1) {
- n = poll(fds->poll, fds->used, -1);
- if (n < 0) {
+ r = epoll_wait(epollfd, epoll_events, MAX_EPOLL_EVENTS, -1);
+ if (r < 0) {
if (errno == EINTR)
continue;
- printk(UM_KERN_ERR "write_sigio_thread : poll returned "
- "%d, errno = %d\n", n, errno);
+ printk(UM_KERN_ERR "%s: epoll_wait failed, errno = %d\n",
+ __func__, errno);
}
- for (i = 0; i < fds->used; i++) {
- p = &fds->poll[i];
- if (p->revents == 0)
- continue;
- if (p->fd == sigio_private[1]) {
- CATCH_EINTR(n = read(sigio_private[1], &c,
- sizeof(c)));
- if (n != sizeof(c))
- printk(UM_KERN_ERR
- "write_sigio_thread : "
- "read on socket failed, "
- "err = %d\n", errno);
- tmp = current_poll;
- current_poll = next_poll;
- next_poll = tmp;
- respond_fd = sigio_private[1];
- }
- else {
- respond_fd = write_sigio_fds[1];
- fds->used--;
- memmove(&fds->poll[i], &fds->poll[i + 1],
- (fds->used - i) * sizeof(*fds->poll));
- }
-
- CATCH_EINTR(n = write(respond_fd, &c, sizeof(c)));
- if (n != sizeof(c))
- printk(UM_KERN_ERR "write_sigio_thread : "
- "write on socket failed, err = %d\n",
- errno);
- }
- }
- return 0;
-}
-
-static int need_poll(struct pollfds *polls, int n)
-{
- struct pollfd *new;
-
- if (n <= polls->size)
- return 0;
-
- new = uml_kmalloc(n * sizeof(struct pollfd), UM_GFP_ATOMIC);
- if (new == NULL) {
- printk(UM_KERN_ERR "need_poll : failed to allocate new "
- "pollfds\n");
- return -ENOMEM;
+ CATCH_EINTR(r = tgkill(pid, pid, SIGIO));
+ if (r < 0)
+ printk(UM_KERN_ERR "%s: tgkill failed, errno = %d\n",
+ __func__, errno);
}
- memcpy(new, polls->poll, polls->used * sizeof(struct pollfd));
- kfree(polls->poll);
-
- polls->poll = new;
- polls->size = n;
- return 0;
-}
-
-/*
- * Must be called with sigio_lock held, because it's needed by the marked
- * critical section.
- */
-static void update_thread(void)
-{
- unsigned long flags;
- int n;
- char c;
-
- flags = um_set_signals_trace(0);
- CATCH_EINTR(n = write(sigio_private[0], &c, sizeof(c)));
- if (n != sizeof(c)) {
- printk(UM_KERN_ERR "update_thread : write failed, err = %d\n",
- errno);
- goto fail;
- }
-
- CATCH_EINTR(n = read(sigio_private[0], &c, sizeof(c)));
- if (n != sizeof(c)) {
- printk(UM_KERN_ERR "update_thread : read failed, err = %d\n",
- errno);
- goto fail;
- }
-
- um_set_signals_trace(flags);
- return;
- fail:
- /* Critical section start */
- if (write_sigio_pid != -1) {
- os_kill_process(write_sigio_pid, 1);
- free_stack(write_sigio_stack, 0);
- }
- write_sigio_pid = -1;
- close(sigio_private[0]);
- close(sigio_private[1]);
- close(write_sigio_fds[0]);
- close(write_sigio_fds[1]);
- /* Critical section end */
- um_set_signals_trace(flags);
+ return NULL;
}
int __add_sigio_fd(int fd)
{
- struct pollfd *p;
- int err, i, n;
-
- for (i = 0; i < all_sigio_fds.used; i++) {
- if (all_sigio_fds.poll[i].fd == fd)
- break;
- }
- if (i == all_sigio_fds.used)
- return -ENOSPC;
-
- p = &all_sigio_fds.poll[i];
-
- for (i = 0; i < current_poll.used; i++) {
- if (current_poll.poll[i].fd == fd)
- return 0;
- }
-
- n = current_poll.used;
- err = need_poll(&next_poll, n + 1);
- if (err)
- return err;
-
- memcpy(next_poll.poll, current_poll.poll,
- current_poll.used * sizeof(struct pollfd));
- next_poll.poll[n] = *p;
- next_poll.used = n + 1;
- update_thread();
-
- return 0;
+ struct epoll_event event = {
+ .data.fd = fd,
+ .events = EPOLLIN | EPOLLET,
+ };
+ int r;
+
+ CATCH_EINTR(r = epoll_ctl(epollfd, EPOLL_CTL_ADD, fd, &event));
+ return r < 0 ? -errno : 0;
}
-
int add_sigio_fd(int fd)
{
int err;
@@ -212,38 +80,11 @@ int add_sigio_fd(int fd)
int __ignore_sigio_fd(int fd)
{
- struct pollfd *p;
- int err, i, n = 0;
-
- /*
- * This is called from exitcalls elsewhere in UML - if
- * sigio_cleanup has already run, then update_thread will hang
- * or fail because the thread is no longer running.
- */
- if (write_sigio_pid == -1)
- return -EIO;
-
- for (i = 0; i < current_poll.used; i++) {
- if (current_poll.poll[i].fd == fd)
- break;
- }
- if (i == current_poll.used)
- return -ENOENT;
-
- err = need_poll(&next_poll, current_poll.used - 1);
- if (err)
- return err;
-
- for (i = 0; i < current_poll.used; i++) {
- p = &current_poll.poll[i];
- if (p->fd != fd)
- next_poll.poll[n++] = *p;
- }
- next_poll.used = current_poll.used - 1;
-
- update_thread();
+ struct epoll_event event;
+ int r;
- return 0;
+ CATCH_EINTR(r = epoll_ctl(epollfd, EPOLL_CTL_DEL, fd, &event));
+ return r < 0 ? -errno : 0;
}
int ignore_sigio_fd(int fd)
@@ -257,125 +98,37 @@ int ignore_sigio_fd(int fd)
return err;
}
-static struct pollfd *setup_initial_poll(int fd)
-{
- struct pollfd *p;
-
- p = uml_kmalloc(sizeof(struct pollfd), UM_GFP_KERNEL);
- if (p == NULL) {
- printk(UM_KERN_ERR "setup_initial_poll : failed to allocate "
- "poll\n");
- return NULL;
- }
- *p = ((struct pollfd) { .fd = fd,
- .events = POLLIN,
- .revents = 0 });
- return p;
-}
-
static void write_sigio_workaround(void)
{
- struct pollfd *p;
int err;
- int l_write_sigio_fds[2];
- int l_sigio_private[2];
- int l_write_sigio_pid;
- /* We call this *tons* of times - and most ones we must just fail. */
sigio_lock();
- l_write_sigio_pid = write_sigio_pid;
- sigio_unlock();
-
- if (l_write_sigio_pid != -1)
- return;
+ if (write_sigio_td)
+ goto out;
- err = os_pipe(l_write_sigio_fds, 1, 1);
- if (err < 0) {
- printk(UM_KERN_ERR "write_sigio_workaround - os_pipe 1 failed, "
- "err = %d\n", -err);
- return;
+ epollfd = epoll_create(MAX_EPOLL_EVENTS);
+ if (epollfd < 0) {
+ printk(UM_KERN_ERR "%s: epoll_create failed, errno = %d\n",
+ __func__, errno);
+ goto out;
}
- err = os_pipe(l_sigio_private, 1, 1);
+
+ err = os_run_helper_thread(&write_sigio_td, write_sigio_thread, NULL);
if (err < 0) {
- printk(UM_KERN_ERR "write_sigio_workaround - os_pipe 2 failed, "
- "err = %d\n", -err);
- goto out_close1;
+ printk(UM_KERN_ERR "%s: os_run_helper_thread failed, errno = %d\n",
+ __func__, -err);
+ close(epollfd);
+ epollfd = -1;
+ goto out;
}
- p = setup_initial_poll(l_sigio_private[1]);
- if (!p)
- goto out_close2;
-
- sigio_lock();
-
- /*
- * Did we race? Don't try to optimize this, please, it's not so likely
- * to happen, and no more than once at the boot.
- */
- if (write_sigio_pid != -1)
- goto out_free;
-
- current_poll = ((struct pollfds) { .poll = p,
- .used = 1,
- .size = 1 });
-
- if (write_sigio_irq(l_write_sigio_fds[0]))
- goto out_clear_poll;
-
- memcpy(write_sigio_fds, l_write_sigio_fds, sizeof(l_write_sigio_fds));
- memcpy(sigio_private, l_sigio_private, sizeof(l_sigio_private));
-
- write_sigio_pid = run_helper_thread(write_sigio_thread, NULL,
- CLONE_FILES | CLONE_VM,
- &write_sigio_stack);
-
- if (write_sigio_pid < 0)
- goto out_clear;
-
- sigio_unlock();
- return;
-
-out_clear:
- write_sigio_pid = -1;
- write_sigio_fds[0] = -1;
- write_sigio_fds[1] = -1;
- sigio_private[0] = -1;
- sigio_private[1] = -1;
-out_clear_poll:
- current_poll = ((struct pollfds) { .poll = NULL,
- .size = 0,
- .used = 0 });
-out_free:
+out:
sigio_unlock();
- kfree(p);
-out_close2:
- close(l_sigio_private[0]);
- close(l_sigio_private[1]);
-out_close1:
- close(l_write_sigio_fds[0]);
- close(l_write_sigio_fds[1]);
}
-void sigio_broken(int fd)
+void sigio_broken(void)
{
- int err;
-
write_sigio_workaround();
-
- sigio_lock();
- err = need_poll(&all_sigio_fds, all_sigio_fds.used + 1);
- if (err) {
- printk(UM_KERN_ERR "maybe_sigio_broken - failed to add pollfd "
- "for descriptor %d\n", fd);
- goto out;
- }
-
- all_sigio_fds.poll[all_sigio_fds.used++] =
- ((struct pollfd) { .fd = fd,
- .events = POLLIN,
- .revents = 0 });
-out:
- sigio_unlock();
}
/* Changed during early boot */
@@ -389,17 +142,16 @@ void maybe_sigio_broken(int fd)
if (pty_output_sigio)
return;
- sigio_broken(fd);
+ sigio_broken();
}
static void sigio_cleanup(void)
{
- if (write_sigio_pid == -1)
+ if (!write_sigio_td)
return;
- os_kill_process(write_sigio_pid, 1);
- free_stack(write_sigio_stack, 0);
- write_sigio_pid = -1;
+ os_kill_helper_thread(write_sigio_td);
+ write_sigio_td = NULL;
}
__uml_exitcall(sigio_cleanup);
diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c
index 9ea7269ffb77..e71e5b4878d1 100644
--- a/arch/um/os-Linux/signal.c
+++ b/arch/um/os-Linux/signal.c
@@ -21,7 +21,7 @@
#include <sys/ucontext.h>
#include <timetravel.h>
-void (*sig_info[NSIG])(int, struct siginfo *, struct uml_pt_regs *) = {
+void (*sig_info[NSIG])(int, struct siginfo *, struct uml_pt_regs *, void *mc) = {
[SIGTRAP] = relay_signal,
[SIGFPE] = relay_signal,
[SIGILL] = relay_signal,
@@ -47,7 +47,7 @@ static void sig_handler_common(int sig, struct siginfo *si, mcontext_t *mc)
if ((sig != SIGIO) && (sig != SIGWINCH))
unblock_signals_trace();
- (*sig_info[sig])(sig, si, &r);
+ (*sig_info[sig])(sig, si, &r, mc);
errno = save_errno;
}
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
index e2f8f156402f..ae2aea062f06 100644
--- a/arch/um/os-Linux/skas/process.c
+++ b/arch/um/os-Linux/skas/process.c
@@ -166,7 +166,7 @@ static void get_skas_faultinfo(int pid, struct faultinfo *fi)
static void handle_segv(int pid, struct uml_pt_regs *regs)
{
get_skas_faultinfo(pid, &regs->faultinfo);
- segv(regs->faultinfo, 0, 1, NULL);
+ segv(regs->faultinfo, 0, 1, NULL, NULL);
}
static void handle_trap(int pid, struct uml_pt_regs *regs)
@@ -525,7 +525,7 @@ void userspace(struct uml_pt_regs *regs)
get_skas_faultinfo(pid,
&regs->faultinfo);
(*sig_info[SIGSEGV])(SIGSEGV, (struct siginfo *)&si,
- regs);
+ regs, NULL);
}
else handle_segv(pid, regs);
break;
@@ -533,7 +533,7 @@ void userspace(struct uml_pt_regs *regs)
handle_trap(pid, regs);
break;
case SIGTRAP:
- relay_signal(SIGTRAP, (struct siginfo *)&si, regs);
+ relay_signal(SIGTRAP, (struct siginfo *)&si, regs, NULL);
break;
case SIGALRM:
break;
@@ -543,7 +543,7 @@ void userspace(struct uml_pt_regs *regs)
case SIGFPE:
case SIGWINCH:
block_signals_trace();
- (*sig_info[sig])(sig, (struct siginfo *)&si, regs);
+ (*sig_info[sig])(sig, (struct siginfo *)&si, regs, NULL);
unblock_signals_trace();
break;
default:
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 9395ec37bb64..85ba2e187571 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -27,6 +27,7 @@ config X86_64
# Options that are inherently 64-bit kernel only:
select ARCH_HAS_GIGANTIC_PAGE
select ARCH_HAS_PTDUMP
+ select ARCH_SUPPORTS_MSEAL_SYSTEM_MAPPINGS
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
select ARCH_SUPPORTS_PER_VMA_LOCK
select ARCH_SUPPORTS_HUGE_PFNMAP if TRANSPARENT_HUGEPAGE
@@ -889,6 +890,7 @@ config INTEL_TDX_GUEST
depends on X86_64 && CPU_SUP_INTEL
depends on X86_X2APIC
depends on EFI_STUB
+ depends on PARAVIRT
select ARCH_HAS_CC_PLATFORM
select X86_MEM_ENCRYPT
select X86_MCE
diff --git a/arch/x86/Makefile.um b/arch/x86/Makefile.um
index a46b1397ad01..c86cbd9cbba3 100644
--- a/arch/x86/Makefile.um
+++ b/arch/x86/Makefile.um
@@ -7,12 +7,13 @@ core-y += arch/x86/crypto/
# GCC versions < 11. See:
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99652
#
-ifeq ($(CONFIG_CC_IS_CLANG),y)
-KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx
-KBUILD_RUSTFLAGS += --target=$(objtree)/scripts/target.json
+ifeq ($(call gcc-min-version, 110000)$(CONFIG_CC_IS_CLANG),y)
+KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx
KBUILD_RUSTFLAGS += -Ctarget-feature=-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-avx,-avx2
endif
+KBUILD_RUSTFLAGS += --target=$(objtree)/scripts/target.json
+
ifeq ($(CONFIG_X86_32),y)
START := 0x8048000
diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c
index 7772b01ab738..edab6d6049be 100644
--- a/arch/x86/coco/tdx/tdx.c
+++ b/arch/x86/coco/tdx/tdx.c
@@ -14,6 +14,7 @@
#include <asm/ia32.h>
#include <asm/insn.h>
#include <asm/insn-eval.h>
+#include <asm/paravirt_types.h>
#include <asm/pgtable.h>
#include <asm/set_memory.h>
#include <asm/traps.h>
@@ -392,13 +393,21 @@ static int handle_halt(struct ve_info *ve)
{
const bool irq_disabled = irqs_disabled();
+ /*
+ * HLT with IRQs enabled is unsafe, as an IRQ that is intended to be a
+ * wake event may be consumed before requesting HLT emulation, leaving
+ * the vCPU blocking indefinitely.
+ */
+ if (WARN_ONCE(!irq_disabled, "HLT emulation with IRQs enabled"))
+ return -EIO;
+
if (__halt(irq_disabled))
return -EIO;
return ve_instr_len(ve);
}
-void __cpuidle tdx_safe_halt(void)
+void __cpuidle tdx_halt(void)
{
const bool irq_disabled = false;
@@ -409,6 +418,16 @@ void __cpuidle tdx_safe_halt(void)
WARN_ONCE(1, "HLT instruction emulation failed\n");
}
+static void __cpuidle tdx_safe_halt(void)
+{
+ tdx_halt();
+ /*
+ * "__cpuidle" section doesn't support instrumentation, so stick
+ * with raw_* variant that avoids tracing hooks.
+ */
+ raw_local_irq_enable();
+}
+
static int read_msr(struct pt_regs *regs, struct ve_info *ve)
{
struct tdx_module_args args = {
@@ -1110,6 +1129,19 @@ void __init tdx_early_init(void)
x86_platform.guest.enc_kexec_finish = tdx_kexec_finish;
/*
+ * Avoid "sti;hlt" execution in TDX guests as HLT induces a #VE that
+ * will enable interrupts before HLT TDCALL invocation if executed
+ * in STI-shadow, possibly resulting in missed wakeup events.
+ *
+ * Modify all possible HLT execution paths to use TDX specific routines
+ * that directly execute TDCALL and toggle the interrupt state as
+ * needed after TDCALL completion. This also reduces HLT related #VEs
+ * in addition to having a reliable halt logic execution.
+ */
+ pv_ops.irq.safe_halt = tdx_safe_halt;
+ pv_ops.irq.halt = tdx_halt;
+
+ /*
* TDX intercepts the RDMSR to read the X2APIC ID in the parallel
* bringup low level code. That raises #VE which cannot be handled
* there.
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index 9518bf1ddf35..adb299d3b6a1 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -162,7 +162,8 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr)
text_start,
image->size,
VM_READ|VM_EXEC|
- VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
+ VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
+ VM_SEALED_SYSMAP,
&vdso_mapping);
if (IS_ERR(vma)) {
@@ -181,7 +182,7 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr)
VDSO_VCLOCK_PAGES_START(addr),
VDSO_NR_VCLOCK_PAGES * PAGE_SIZE,
VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
- VM_PFNMAP,
+ VM_PFNMAP|VM_SEALED_SYSMAP,
&vvar_vclock_mapping);
if (IS_ERR(vma)) {
diff --git a/arch/x86/include/asm/arch_hweight.h b/arch/x86/include/asm/arch_hweight.h
index b5982b94bdba..cbc6157f0b4b 100644
--- a/arch/x86/include/asm/arch_hweight.h
+++ b/arch/x86/include/asm/arch_hweight.h
@@ -16,7 +16,8 @@ static __always_inline unsigned int __arch_hweight32(unsigned int w)
{
unsigned int res;
- asm_inline (ALTERNATIVE("call __sw_hweight32",
+ asm_inline (ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE
+ "call __sw_hweight32",
"popcntl %[val], %[cnt]", X86_FEATURE_POPCNT)
: [cnt] "=" REG_OUT (res), ASM_CALL_CONSTRAINT
: [val] REG_IN (w));
@@ -45,7 +46,8 @@ static __always_inline unsigned long __arch_hweight64(__u64 w)
{
unsigned long res;
- asm_inline (ALTERNATIVE("call __sw_hweight64",
+ asm_inline (ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE
+ "call __sw_hweight64",
"popcntq %[val], %[cnt]", X86_FEATURE_POPCNT)
: [cnt] "=" REG_OUT (res), ASM_CALL_CONSTRAINT
: [val] REG_IN (w));
diff --git a/arch/x86/include/asm/iosf_mbi.h b/arch/x86/include/asm/iosf_mbi.h
index af7541c11821..8ace6559d399 100644
--- a/arch/x86/include/asm/iosf_mbi.h
+++ b/arch/x86/include/asm/iosf_mbi.h
@@ -168,13 +168,6 @@ void iosf_mbi_unblock_punit_i2c_access(void);
int iosf_mbi_register_pmic_bus_access_notifier(struct notifier_block *nb);
/**
- * iosf_mbi_register_pmic_bus_access_notifier - Unregister PMIC bus notifier
- *
- * @nb: notifier_block to unregister
- */
-int iosf_mbi_unregister_pmic_bus_access_notifier(struct notifier_block *nb);
-
-/**
* iosf_mbi_unregister_pmic_bus_access_notifier_unlocked - Unregister PMIC bus
* notifier, unlocked
*
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index abb8374c9ff7..9a9b21b78905 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -76,6 +76,28 @@ static __always_inline void native_local_irq_restore(unsigned long flags)
#endif
+#ifndef CONFIG_PARAVIRT
+#ifndef __ASSEMBLY__
+/*
+ * Used in the idle loop; sti takes one instruction cycle
+ * to complete:
+ */
+static __always_inline void arch_safe_halt(void)
+{
+ native_safe_halt();
+}
+
+/*
+ * Used when interrupts are already enabled or to
+ * shutdown the processor:
+ */
+static __always_inline void halt(void)
+{
+ native_halt();
+}
+#endif /* __ASSEMBLY__ */
+#endif /* CONFIG_PARAVIRT */
+
#ifdef CONFIG_PARAVIRT_XXL
#include <asm/paravirt.h>
#else
@@ -98,24 +120,6 @@ static __always_inline void arch_local_irq_enable(void)
}
/*
- * Used in the idle loop; sti takes one instruction cycle
- * to complete:
- */
-static __always_inline void arch_safe_halt(void)
-{
- native_safe_halt();
-}
-
-/*
- * Used when interrupts are already enabled or to
- * shutdown the processor:
- */
-static __always_inline void halt(void)
-{
- native_halt();
-}
-
-/*
* For spinlocks, etc:
*/
static __always_inline unsigned long arch_local_irq_save(void)
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index bed346bfac89..c4c23190925c 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -102,6 +102,16 @@ static inline void notify_page_enc_status_changed(unsigned long pfn,
PVOP_VCALL3(mmu.notify_page_enc_status_changed, pfn, npages, enc);
}
+static __always_inline void arch_safe_halt(void)
+{
+ PVOP_VCALL0(irq.safe_halt);
+}
+
+static inline void halt(void)
+{
+ PVOP_VCALL0(irq.halt);
+}
+
#ifdef CONFIG_PARAVIRT_XXL
static inline void load_sp0(unsigned long sp0)
{
@@ -165,16 +175,6 @@ static inline void __write_cr4(unsigned long x)
PVOP_VCALL1(cpu.write_cr4, x);
}
-static __always_inline void arch_safe_halt(void)
-{
- PVOP_VCALL0(irq.safe_halt);
-}
-
-static inline void halt(void)
-{
- PVOP_VCALL0(irq.halt);
-}
-
static inline u64 paravirt_read_msr(unsigned msr)
{
return PVOP_CALL1(u64, cpu.read_msr, msr);
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 62912023b46f..631c306ce1ff 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -120,10 +120,9 @@ struct pv_irq_ops {
struct paravirt_callee_save save_fl;
struct paravirt_callee_save irq_disable;
struct paravirt_callee_save irq_enable;
-
+#endif
void (*safe_halt)(void);
void (*halt)(void);
-#endif
} __no_randomize_layout;
struct pv_mmu_ops {
diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
index daea94c2993c..55a5e656e4b9 100644
--- a/arch/x86/include/asm/smap.h
+++ b/arch/x86/include/asm/smap.h
@@ -16,23 +16,23 @@
#ifdef __ASSEMBLER__
#define ASM_CLAC \
- ALTERNATIVE "", "clac", X86_FEATURE_SMAP
+ ALTERNATIVE __stringify(ANNOTATE_IGNORE_ALTERNATIVE), "clac", X86_FEATURE_SMAP
#define ASM_STAC \
- ALTERNATIVE "", "stac", X86_FEATURE_SMAP
+ ALTERNATIVE __stringify(ANNOTATE_IGNORE_ALTERNATIVE), "stac", X86_FEATURE_SMAP
#else /* __ASSEMBLER__ */
static __always_inline void clac(void)
{
/* Note: a barrier is implicit in alternative() */
- alternative("", "clac", X86_FEATURE_SMAP);
+ alternative(ANNOTATE_IGNORE_ALTERNATIVE "", "clac", X86_FEATURE_SMAP);
}
static __always_inline void stac(void)
{
/* Note: a barrier is implicit in alternative() */
- alternative("", "stac", X86_FEATURE_SMAP);
+ alternative(ANNOTATE_IGNORE_ALTERNATIVE "", "stac", X86_FEATURE_SMAP);
}
static __always_inline unsigned long smap_save(void)
@@ -40,7 +40,8 @@ static __always_inline unsigned long smap_save(void)
unsigned long flags;
asm volatile ("# smap_save\n\t"
- ALTERNATIVE("", "pushf; pop %0; " "clac" "\n\t",
+ ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE
+ "", "pushf; pop %0; clac",
X86_FEATURE_SMAP)
: "=rm" (flags) : : "memory", "cc");
@@ -50,16 +51,22 @@ static __always_inline unsigned long smap_save(void)
static __always_inline void smap_restore(unsigned long flags)
{
asm volatile ("# smap_restore\n\t"
- ALTERNATIVE("", "push %0; popf\n\t",
+ ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE
+ "", "push %0; popf",
X86_FEATURE_SMAP)
: : "g" (flags) : "memory", "cc");
}
/* These macros can be used in asm() statements */
#define ASM_CLAC \
- ALTERNATIVE("", "clac", X86_FEATURE_SMAP)
+ ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE "", "clac", X86_FEATURE_SMAP)
#define ASM_STAC \
- ALTERNATIVE("", "stac", X86_FEATURE_SMAP)
+ ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE "", "stac", X86_FEATURE_SMAP)
+
+#define ASM_CLAC_UNSAFE \
+ ALTERNATIVE("", ANNOTATE_IGNORE_ALTERNATIVE "clac", X86_FEATURE_SMAP)
+#define ASM_STAC_UNSAFE \
+ ALTERNATIVE("", ANNOTATE_IGNORE_ALTERNATIVE "stac", X86_FEATURE_SMAP)
#endif /* __ASSEMBLER__ */
diff --git a/arch/x86/include/asm/tdx.h b/arch/x86/include/asm/tdx.h
index 65394aa9b49f..4a1922ec80cf 100644
--- a/arch/x86/include/asm/tdx.h
+++ b/arch/x86/include/asm/tdx.h
@@ -58,7 +58,7 @@ void tdx_get_ve_info(struct ve_info *ve);
bool tdx_handle_virt_exception(struct pt_regs *regs, struct ve_info *ve);
-void tdx_safe_halt(void);
+void tdx_halt(void);
bool tdx_early_handle_ve(struct pt_regs *regs);
@@ -72,7 +72,7 @@ void __init tdx_dump_td_ctls(u64 td_ctls);
#else
static inline void tdx_early_init(void) { };
-static inline void tdx_safe_halt(void) { };
+static inline void tdx_halt(void) { };
static inline bool tdx_early_handle_ve(struct pt_regs *regs) { return false; }
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index 97771b9d33af..59a62c3780a2 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -231,14 +231,12 @@ static __always_inline void __xen_stac(void)
* Suppress objtool seeing the STAC/CLAC and getting confused about it
* calling random code with AC=1.
*/
- asm volatile(ANNOTATE_IGNORE_ALTERNATIVE
- ASM_STAC ::: "memory", "flags");
+ asm volatile(ASM_STAC_UNSAFE ::: "memory", "flags");
}
static __always_inline void __xen_clac(void)
{
- asm volatile(ANNOTATE_IGNORE_ALTERNATIVE
- ASM_CLAC ::: "memory", "flags");
+ asm volatile(ASM_CLAC_UNSAFE ::: "memory", "flags");
}
static inline long
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 97925632c28e..1ccd05d8999f 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -75,6 +75,11 @@ void paravirt_set_sched_clock(u64 (*func)(void))
static_call_update(pv_sched_clock, func);
}
+static noinstr void pv_native_safe_halt(void)
+{
+ native_safe_halt();
+}
+
#ifdef CONFIG_PARAVIRT_XXL
static noinstr void pv_native_write_cr2(unsigned long val)
{
@@ -100,11 +105,6 @@ static noinstr void pv_native_set_debugreg(int regno, unsigned long val)
{
native_set_debugreg(regno, val);
}
-
-static noinstr void pv_native_safe_halt(void)
-{
- native_safe_halt();
-}
#endif
struct pv_info pv_info = {
@@ -161,9 +161,11 @@ struct paravirt_patch_template pv_ops = {
.irq.save_fl = __PV_IS_CALLEE_SAVE(pv_native_save_fl),
.irq.irq_disable = __PV_IS_CALLEE_SAVE(pv_native_irq_disable),
.irq.irq_enable = __PV_IS_CALLEE_SAVE(pv_native_irq_enable),
+#endif /* CONFIG_PARAVIRT_XXL */
+
+ /* Irq HLT ops. */
.irq.safe_halt = pv_native_safe_halt,
.irq.halt = native_halt,
-#endif /* CONFIG_PARAVIRT_XXL */
/* Mmu ops. */
.mmu.flush_tlb_user = native_flush_tlb_local,
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 91f6ff618852..962c3ce39323 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -939,7 +939,7 @@ void __init select_idle_routine(void)
static_call_update(x86_idle, mwait_idle);
} else if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) {
pr_info("using TDX aware idle routine\n");
- static_call_update(x86_idle, tdx_safe_halt);
+ static_call_update(x86_idle, tdx_halt);
} else {
static_call_update(x86_idle, default_idle);
}
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index aa8c341b2441..06296eb69fd4 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -77,6 +77,24 @@ SYM_FUNC_START(rep_movs_alternative)
_ASM_EXTABLE_UA( 0b, 1b)
.Llarge_movsq:
+ /* Do the first possibly unaligned word */
+0: movq (%rsi),%rax
+1: movq %rax,(%rdi)
+
+ _ASM_EXTABLE_UA( 0b, .Lcopy_user_tail)
+ _ASM_EXTABLE_UA( 1b, .Lcopy_user_tail)
+
+ /* What would be the offset to the aligned destination? */
+ leaq 8(%rdi),%rax
+ andq $-8,%rax
+ subq %rdi,%rax
+
+ /* .. and update pointers and count to match */
+ addq %rax,%rdi
+ addq %rax,%rsi
+ subq %rax,%rcx
+
+ /* make %rcx contain the number of words, %rax the remainder */
movq %rcx,%rax
shrq $3,%rcx
andl $7,%eax
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 36a017b4a30d..7c4f6f591f2b 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -959,9 +959,18 @@ int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
ret = __add_pages(nid, start_pfn, nr_pages, params);
WARN_ON_ONCE(ret);
- /* update max_pfn, max_low_pfn and high_memory */
- update_end_of_memory_vars(start_pfn << PAGE_SHIFT,
- nr_pages << PAGE_SHIFT);
+ /*
+ * Special case: add_pages() is called by memremap_pages() for adding device
+ * private pages. Do not bump up max_pfn in the device private path,
+ * because max_pfn changes affect dma_addressing_limited().
+ *
+ * dma_addressing_limited() returning true when max_pfn is the device's
+ * addressable memory can force device drivers to use bounce buffers
+ * and impact their performance negatively:
+ */
+ if (!params->pgmap)
+ /* update max_pfn, max_low_pfn and high_memory */
+ update_end_of_memory_vars(start_pfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT);
return ret;
}
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index 72405d315b41..def3d9284254 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -2274,6 +2274,7 @@ int set_mce_nospec(unsigned long pfn)
pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn);
return rc;
}
+EXPORT_SYMBOL_GPL(set_mce_nospec);
/* Restore full speculative operation to the pfn. */
int clear_mce_nospec(unsigned long pfn)
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index cec321fb74f2..a05fcddfc811 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -20,7 +20,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm)
void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
{
paravirt_release_pte(page_to_pfn(pte));
- tlb_remove_table(tlb, page_ptdesc(pte));
+ tlb_remove_ptdesc(tlb, page_ptdesc(pte));
}
#if CONFIG_PGTABLE_LEVELS > 2
@@ -34,21 +34,21 @@ void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
#ifdef CONFIG_X86_PAE
tlb->need_flush_all = 1;
#endif
- tlb_remove_table(tlb, virt_to_ptdesc(pmd));
+ tlb_remove_ptdesc(tlb, virt_to_ptdesc(pmd));
}
#if CONFIG_PGTABLE_LEVELS > 3
void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
{
paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
- tlb_remove_table(tlb, virt_to_ptdesc(pud));
+ tlb_remove_ptdesc(tlb, virt_to_ptdesc(pud));
}
#if CONFIG_PGTABLE_LEVELS > 4
void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d)
{
paravirt_release_p4d(__pa(p4d) >> PAGE_SHIFT);
- tlb_remove_table(tlb, virt_to_ptdesc(p4d));
+ tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d));
}
#endif /* CONFIG_PGTABLE_LEVELS > 4 */
#endif /* CONFIG_PGTABLE_LEVELS > 3 */
diff --git a/arch/x86/platform/intel/iosf_mbi.c b/arch/x86/platform/intel/iosf_mbi.c
index c81cea208c2c..40ae94db20d8 100644
--- a/arch/x86/platform/intel/iosf_mbi.c
+++ b/arch/x86/platform/intel/iosf_mbi.c
@@ -422,19 +422,6 @@ int iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
}
EXPORT_SYMBOL(iosf_mbi_unregister_pmic_bus_access_notifier_unlocked);
-int iosf_mbi_unregister_pmic_bus_access_notifier(struct notifier_block *nb)
-{
- int ret;
-
- /* Wait for the bus to go inactive before unregistering */
- iosf_mbi_punit_acquire();
- ret = iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(nb);
- iosf_mbi_punit_release();
-
- return ret;
-}
-EXPORT_SYMBOL(iosf_mbi_unregister_pmic_bus_access_notifier);
-
void iosf_mbi_assert_punit_acquired(void)
{
WARN_ON(iosf_mbi_pmic_punit_access_count == 0);
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 63230ff8cf4f..08e76a5ca155 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -27,6 +27,7 @@
#include <asm/mmu_context.h>
#include <asm/cpu_device_id.h>
#include <asm/microcode.h>
+#include <asm/fred.h>
#ifdef CONFIG_X86_32
__visible unsigned long saved_context_ebx;
@@ -231,6 +232,19 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
*/
#ifdef CONFIG_X86_64
wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
+
+ /*
+ * Reinitialize FRED to ensure the FRED MSRs contain the same values
+ * as before hibernation.
+ *
+ * Note, the setup of FRED RSPs requires access to percpu data
+ * structures. Therefore, FRED reinitialization can only occur after
+ * the percpu access pointer (i.e., MSR_GS_BASE) is restored.
+ */
+ if (ctxt->cr4 & X86_CR4_FRED) {
+ cpu_init_fred_exceptions();
+ cpu_init_fred_rsps();
+ }
#else
loadsegment(fs, __KERNEL_PERCPU);
#endif
diff --git a/arch/x86/tools/insn_decoder_test.c b/arch/x86/tools/insn_decoder_test.c
index 6c2986d2ad11..08cd913cbd4e 100644
--- a/arch/x86/tools/insn_decoder_test.c
+++ b/arch/x86/tools/insn_decoder_test.c
@@ -12,8 +12,6 @@
#include <stdarg.h>
#include <linux/kallsyms.h>
-#define unlikely(cond) (cond)
-
#include <asm/insn.h>
#include <inat.c>
#include <insn.c>
diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h
index 4da336965698..b51aefd6ec2b 100644
--- a/arch/x86/um/asm/barrier.h
+++ b/arch/x86/um/asm/barrier.h
@@ -12,9 +12,9 @@
*/
#ifdef CONFIG_X86_32
-#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
-#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
-#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
+#define mb() alternative("lock addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
+#define rmb() alternative("lock addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
+#define wmb() alternative("lock addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
#else /* CONFIG_X86_32 */
diff --git a/arch/x86/um/asm/module.h b/arch/x86/um/asm/module.h
deleted file mode 100644
index a3b061d66082..000000000000
--- a/arch/x86/um/asm/module.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __UM_MODULE_H
-#define __UM_MODULE_H
-
-/* UML is simple */
-struct mod_arch_specific
-{
-};
-
-#ifdef CONFIG_X86_32
-
-#define Elf_Shdr Elf32_Shdr
-#define Elf_Sym Elf32_Sym
-#define Elf_Ehdr Elf32_Ehdr
-
-#else
-
-#define Elf_Shdr Elf64_Shdr
-#define Elf_Sym Elf64_Sym
-#define Elf_Ehdr Elf64_Ehdr
-
-#endif
-
-#endif
diff --git a/arch/x86/um/os-Linux/mcontext.c b/arch/x86/um/os-Linux/mcontext.c
index e80ab7d28117..37decaa74761 100644
--- a/arch/x86/um/os-Linux/mcontext.c
+++ b/arch/x86/um/os-Linux/mcontext.c
@@ -4,6 +4,7 @@
#include <asm/ptrace.h>
#include <sysdep/ptrace.h>
#include <sysdep/mcontext.h>
+#include <arch.h>
void get_regs_from_mc(struct uml_pt_regs *regs, mcontext_t *mc)
{
@@ -27,7 +28,17 @@ void get_regs_from_mc(struct uml_pt_regs *regs, mcontext_t *mc)
COPY(RIP);
COPY2(EFLAGS, EFL);
COPY2(CS, CSGSFS);
- regs->gp[CS / sizeof(unsigned long)] &= 0xffff;
- regs->gp[CS / sizeof(unsigned long)] |= 3;
+ regs->gp[SS / sizeof(unsigned long)] = mc->gregs[REG_CSGSFS] >> 48;
+#endif
+}
+
+void mc_set_rip(void *_mc, void *target)
+{
+ mcontext_t *mc = _mc;
+
+#ifdef __i386__
+ mc->gregs[REG_EIP] = (unsigned long)target;
+#else
+ mc->gregs[REG_RIP] = (unsigned long)target;
#endif
}
diff --git a/arch/x86/um/shared/sysdep/faultinfo_32.h b/arch/x86/um/shared/sysdep/faultinfo_32.h
index b6f2437ec29c..ab5c8e47049c 100644
--- a/arch/x86/um/shared/sysdep/faultinfo_32.h
+++ b/arch/x86/um/shared/sysdep/faultinfo_32.h
@@ -29,4 +29,16 @@ struct faultinfo {
#define PTRACE_FULL_FAULTINFO 0
+#define ___backtrack_faulted(_faulted) \
+ asm volatile ( \
+ "mov $0, %0\n" \
+ "movl $__get_kernel_nofault_faulted_%=,%1\n" \
+ "jmp _end_%=\n" \
+ "__get_kernel_nofault_faulted_%=:\n" \
+ "mov $1, %0;" \
+ "_end_%=:" \
+ : "=r" (_faulted), \
+ "=m" (current->thread.segv_continue) :: \
+ )
+
#endif
diff --git a/arch/x86/um/shared/sysdep/faultinfo_64.h b/arch/x86/um/shared/sysdep/faultinfo_64.h
index ee88f88974ea..26fb4835d3e9 100644
--- a/arch/x86/um/shared/sysdep/faultinfo_64.h
+++ b/arch/x86/um/shared/sysdep/faultinfo_64.h
@@ -29,4 +29,16 @@ struct faultinfo {
#define PTRACE_FULL_FAULTINFO 1
+#define ___backtrack_faulted(_faulted) \
+ asm volatile ( \
+ "mov $0, %0\n" \
+ "movq $__get_kernel_nofault_faulted_%=,%1\n" \
+ "jmp _end_%=\n" \
+ "__get_kernel_nofault_faulted_%=:\n" \
+ "mov $1, %0;" \
+ "_end_%=:" \
+ : "=r" (_faulted), \
+ "=m" (current->thread.segv_continue) :: \
+ )
+
#endif
diff --git a/arch/x86/um/vdso/vma.c b/arch/x86/um/vdso/vma.c
index f238f7b33cdd..dc8dfb2abd80 100644
--- a/arch/x86/um/vdso/vma.c
+++ b/arch/x86/um/vdso/vma.c
@@ -12,33 +12,22 @@
static unsigned int __read_mostly vdso_enabled = 1;
unsigned long um_vdso_addr;
+static struct page *um_vdso;
extern unsigned long task_size;
extern char vdso_start[], vdso_end[];
-static struct page **vdsop;
-
static int __init init_vdso(void)
{
- struct page *um_vdso;
-
BUG_ON(vdso_end - vdso_start > PAGE_SIZE);
um_vdso_addr = task_size - PAGE_SIZE;
- vdsop = kmalloc(sizeof(struct page *), GFP_KERNEL);
- if (!vdsop)
- goto oom;
-
um_vdso = alloc_page(GFP_KERNEL);
- if (!um_vdso) {
- kfree(vdsop);
-
+ if (!um_vdso)
goto oom;
- }
copy_page(page_address(um_vdso), vdso_start);
- *vdsop = um_vdso;
return 0;
@@ -56,6 +45,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
struct mm_struct *mm = current->mm;
static struct vm_special_mapping vdso_mapping = {
.name = "[vdso]",
+ .pages = &um_vdso,
};
if (!vdso_enabled)
@@ -64,7 +54,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
if (mmap_write_lock_killable(mm))
return -EINTR;
- vdso_mapping.pages = vdsop;
vma = _install_special_mapping(mm, um_vdso_addr, PAGE_SIZE,
VM_READ|VM_EXEC|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
diff --git a/block/blk-mq.c b/block/blk-mq.c
index ae8494d88897..c2697db59109 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2965,8 +2965,7 @@ static bool blk_mq_attempt_bio_merge(struct request_queue *q,
static struct request *blk_mq_get_new_requests(struct request_queue *q,
struct blk_plug *plug,
- struct bio *bio,
- unsigned int nsegs)
+ struct bio *bio)
{
struct blk_mq_alloc_data data = {
.q = q,
@@ -3125,7 +3124,7 @@ new_request:
if (rq) {
blk_mq_use_cached_rq(rq, plug, bio);
} else {
- rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
+ rq = blk_mq_get_new_requests(q, plug, bio);
if (unlikely(!rq)) {
if (bio->bi_opf & REQ_NOWAIT)
bio_wouldblock_error(bio);
@@ -4465,14 +4464,12 @@ static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
return NULL;
}
-static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
- struct request_queue *q)
+static void __blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
+ struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
unsigned long i, j;
- /* protect against switching io scheduler */
- mutex_lock(&q->elevator_lock);
for (i = 0; i < set->nr_hw_queues; i++) {
int old_node;
int node = blk_mq_get_hctx_node(set, i);
@@ -4505,7 +4502,19 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
xa_for_each_start(&q->hctx_table, j, hctx, j)
blk_mq_exit_hctx(q, set, hctx, j);
- mutex_unlock(&q->elevator_lock);
+}
+
+static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
+ struct request_queue *q, bool lock)
+{
+ if (lock) {
+ /* protect against switching io scheduler */
+ mutex_lock(&q->elevator_lock);
+ __blk_mq_realloc_hw_ctxs(set, q);
+ mutex_unlock(&q->elevator_lock);
+ } else {
+ __blk_mq_realloc_hw_ctxs(set, q);
+ }
/* unregister cpuhp callbacks for exited hctxs */
blk_mq_remove_hw_queues_cpuhp(q);
@@ -4537,7 +4546,7 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
xa_init(&q->hctx_table);
- blk_mq_realloc_hw_ctxs(set, q);
+ blk_mq_realloc_hw_ctxs(set, q, false);
if (!q->nr_hw_queues)
goto err_hctxs;
@@ -5033,7 +5042,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
fallback:
blk_mq_update_queue_map(set);
list_for_each_entry(q, &set->tag_list, tag_set_list) {
- blk_mq_realloc_hw_ctxs(set, q);
+ blk_mq_realloc_hw_ctxs(set, q, true);
if (q->nr_hw_queues != set->nr_hw_queues) {
int i = prev_nr_hw_queues;
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index d294c5948b67..abd609d4c8ef 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -302,13 +302,6 @@ struct test_sg_division {
* @key_offset_relative_to_alignmask: if true, add the algorithm's alignmask to
* the @key_offset
* @finalization_type: what finalization function to use for hashes
- * @multibuffer: test with multibuffer
- * @multibuffer_index: random number used to generate the message index to use
- * for multibuffer.
- * @multibuffer_uneven: test with multibuffer using uneven lengths
- * @multibuffer_lens: random lengths to make chained request uneven
- * @multibuffer_count: random number used to generate the num_msgs parameter
- * for multibuffer
* @nosimd: execute with SIMD disabled? Requires !CRYPTO_TFM_REQ_MAY_SLEEP.
* This applies to the parts of the operation that aren't controlled
* individually by @nosimd_setkey or @src_divs[].nosimd.
@@ -328,11 +321,6 @@ struct testvec_config {
enum finalization_type finalization_type;
bool nosimd;
bool nosimd_setkey;
- bool multibuffer;
- unsigned int multibuffer_index;
- unsigned int multibuffer_count;
- bool multibuffer_uneven;
- unsigned int multibuffer_lens[MAX_MB_MSGS];
};
#define TESTVEC_CONFIG_NAMELEN 192
@@ -572,7 +560,6 @@ struct test_sglist {
char *bufs[XBUFSIZE];
struct scatterlist sgl[XBUFSIZE];
struct scatterlist sgl_saved[XBUFSIZE];
- struct scatterlist full_sgl[XBUFSIZE];
struct scatterlist *sgl_ptr;
unsigned int nents;
};
@@ -686,11 +673,6 @@ static int build_test_sglist(struct test_sglist *tsgl,
sg_mark_end(&tsgl->sgl[tsgl->nents - 1]);
tsgl->sgl_ptr = tsgl->sgl;
memcpy(tsgl->sgl_saved, tsgl->sgl, tsgl->nents * sizeof(tsgl->sgl[0]));
-
- sg_init_table(tsgl->full_sgl, XBUFSIZE);
- for (i = 0; i < XBUFSIZE; i++)
- sg_set_buf(tsgl->full_sgl, tsgl->bufs[i], PAGE_SIZE * 2);
-
return 0;
}
@@ -1167,27 +1149,6 @@ static void generate_random_testvec_config(struct rnd_state *rng,
break;
}
- if (prandom_bool(rng)) {
- int i;
-
- cfg->multibuffer = true;
- cfg->multibuffer_count = prandom_u32_state(rng);
- cfg->multibuffer_count %= MAX_MB_MSGS;
- if (cfg->multibuffer_count++) {
- cfg->multibuffer_index = prandom_u32_state(rng);
- cfg->multibuffer_index %= cfg->multibuffer_count;
- }
-
- cfg->multibuffer_uneven = prandom_bool(rng);
- for (i = 0; i < MAX_MB_MSGS; i++)
- cfg->multibuffer_lens[i] =
- generate_random_length(rng, PAGE_SIZE * 2 * XBUFSIZE);
-
- p += scnprintf(p, end - p, " multibuffer(%d/%d%s)",
- cfg->multibuffer_index, cfg->multibuffer_count,
- cfg->multibuffer_uneven ? "/uneven" : "");
- }
-
if (!(cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP)) {
if (prandom_bool(rng)) {
cfg->nosimd = true;
@@ -1492,7 +1453,6 @@ static int do_ahash_op(int (*op)(struct ahash_request *req),
struct ahash_request *req,
struct crypto_wait *wait, bool nosimd)
{
- struct ahash_request *r2;
int err;
if (nosimd)
@@ -1503,15 +1463,7 @@ static int do_ahash_op(int (*op)(struct ahash_request *req),
if (nosimd)
crypto_reenable_simd_for_test();
- err = crypto_wait_req(err, wait);
- if (err)
- return err;
-
- list_for_each_entry(r2, &req->base.list, base.list)
- if (r2->base.err)
- return r2->base.err;
-
- return 0;
+ return crypto_wait_req(err, wait);
}
static int check_nonfinal_ahash_op(const char *op, int err,
@@ -1532,65 +1484,20 @@ static int check_nonfinal_ahash_op(const char *op, int err,
return 0;
}
-static void setup_ahash_multibuffer(
- struct ahash_request *reqs[MAX_MB_MSGS],
- const struct testvec_config *cfg,
- struct test_sglist *tsgl)
-{
- struct scatterlist *sg = tsgl->full_sgl;
- static u8 trash[HASH_MAX_DIGESTSIZE];
- struct ahash_request *req = reqs[0];
- unsigned int num_msgs;
- unsigned int msg_idx;
- int i;
-
- if (!cfg->multibuffer)
- return;
-
- num_msgs = cfg->multibuffer_count;
- if (num_msgs == 1)
- return;
-
- msg_idx = cfg->multibuffer_index;
- for (i = 1; i < num_msgs; i++) {
- struct ahash_request *r2 = reqs[i];
- unsigned int nbytes = req->nbytes;
-
- if (cfg->multibuffer_uneven)
- nbytes = cfg->multibuffer_lens[i];
-
- ahash_request_set_callback(r2, req->base.flags, NULL, NULL);
- ahash_request_set_crypt(r2, sg, trash, nbytes);
- ahash_request_chain(r2, req);
- }
-
- if (msg_idx) {
- reqs[msg_idx]->src = req->src;
- reqs[msg_idx]->nbytes = req->nbytes;
- reqs[msg_idx]->result = req->result;
- req->src = sg;
- if (cfg->multibuffer_uneven)
- req->nbytes = cfg->multibuffer_lens[0];
- req->result = trash;
- }
-}
-
/* Test one hash test vector in one configuration, using the ahash API */
static int test_ahash_vec_cfg(const struct hash_testvec *vec,
const char *vec_name,
const struct testvec_config *cfg,
- struct ahash_request *reqs[MAX_MB_MSGS],
+ struct ahash_request *req,
struct test_sglist *tsgl,
u8 *hashstate)
{
- struct ahash_request *req = reqs[0];
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
const unsigned int digestsize = crypto_ahash_digestsize(tfm);
const unsigned int statesize = crypto_ahash_statesize(tfm);
const char *driver = crypto_ahash_driver_name(tfm);
const u32 req_flags = CRYPTO_TFM_REQ_MAY_BACKLOG | cfg->req_flags;
const struct test_sg_division *divs[XBUFSIZE];
- struct ahash_request *reqi = req;
DECLARE_CRYPTO_WAIT(wait);
unsigned int i;
struct scatterlist *pending_sgl;
@@ -1598,9 +1505,6 @@ static int test_ahash_vec_cfg(const struct hash_testvec *vec,
u8 result[HASH_MAX_DIGESTSIZE + TESTMGR_POISON_LEN];
int err;
- if (cfg->multibuffer)
- reqi = reqs[cfg->multibuffer_index];
-
/* Set the key, if specified */
if (vec->ksize) {
err = do_setkey(crypto_ahash_setkey, tfm, vec->key, vec->ksize,
@@ -1630,7 +1534,7 @@ static int test_ahash_vec_cfg(const struct hash_testvec *vec,
/* Do the actual hashing */
- testmgr_poison(reqi->__ctx, crypto_ahash_reqsize(tfm));
+ testmgr_poison(req->__ctx, crypto_ahash_reqsize(tfm));
testmgr_poison(result, digestsize + TESTMGR_POISON_LEN);
if (cfg->finalization_type == FINALIZATION_TYPE_DIGEST ||
@@ -1639,7 +1543,6 @@ static int test_ahash_vec_cfg(const struct hash_testvec *vec,
ahash_request_set_callback(req, req_flags, crypto_req_done,
&wait);
ahash_request_set_crypt(req, tsgl->sgl, result, vec->psize);
- setup_ahash_multibuffer(reqs, cfg, tsgl);
err = do_ahash_op(crypto_ahash_digest, req, &wait, cfg->nosimd);
if (err) {
if (err == vec->digest_error)
@@ -1661,7 +1564,6 @@ static int test_ahash_vec_cfg(const struct hash_testvec *vec,
ahash_request_set_callback(req, req_flags, crypto_req_done, &wait);
ahash_request_set_crypt(req, NULL, result, 0);
- setup_ahash_multibuffer(reqs, cfg, tsgl);
err = do_ahash_op(crypto_ahash_init, req, &wait, cfg->nosimd);
err = check_nonfinal_ahash_op("init", err, result, digestsize,
driver, vec_name, cfg);
@@ -1678,7 +1580,6 @@ static int test_ahash_vec_cfg(const struct hash_testvec *vec,
crypto_req_done, &wait);
ahash_request_set_crypt(req, pending_sgl, result,
pending_len);
- setup_ahash_multibuffer(reqs, cfg, tsgl);
err = do_ahash_op(crypto_ahash_update, req, &wait,
divs[i]->nosimd);
err = check_nonfinal_ahash_op("update", err,
@@ -1693,7 +1594,7 @@ static int test_ahash_vec_cfg(const struct hash_testvec *vec,
/* Test ->export() and ->import() */
testmgr_poison(hashstate + statesize,
TESTMGR_POISON_LEN);
- err = crypto_ahash_export(reqi, hashstate);
+ err = crypto_ahash_export(req, hashstate);
err = check_nonfinal_ahash_op("export", err,
result, digestsize,
driver, vec_name, cfg);
@@ -1706,8 +1607,8 @@ static int test_ahash_vec_cfg(const struct hash_testvec *vec,
return -EOVERFLOW;
}
- testmgr_poison(reqi->__ctx, crypto_ahash_reqsize(tfm));
- err = crypto_ahash_import(reqi, hashstate);
+ testmgr_poison(req->__ctx, crypto_ahash_reqsize(tfm));
+ err = crypto_ahash_import(req, hashstate);
err = check_nonfinal_ahash_op("import", err,
result, digestsize,
driver, vec_name, cfg);
@@ -1721,7 +1622,6 @@ static int test_ahash_vec_cfg(const struct hash_testvec *vec,
ahash_request_set_callback(req, req_flags, crypto_req_done, &wait);
ahash_request_set_crypt(req, pending_sgl, result, pending_len);
- setup_ahash_multibuffer(reqs, cfg, tsgl);
if (cfg->finalization_type == FINALIZATION_TYPE_FINAL) {
/* finish with update() and final() */
err = do_ahash_op(crypto_ahash_update, req, &wait, cfg->nosimd);
@@ -1753,7 +1653,7 @@ result_ready:
static int test_hash_vec_cfg(const struct hash_testvec *vec,
const char *vec_name,
const struct testvec_config *cfg,
- struct ahash_request *reqs[MAX_MB_MSGS],
+ struct ahash_request *req,
struct shash_desc *desc,
struct test_sglist *tsgl,
u8 *hashstate)
@@ -1773,12 +1673,11 @@ static int test_hash_vec_cfg(const struct hash_testvec *vec,
return err;
}
- return test_ahash_vec_cfg(vec, vec_name, cfg, reqs, tsgl, hashstate);
+ return test_ahash_vec_cfg(vec, vec_name, cfg, req, tsgl, hashstate);
}
static int test_hash_vec(const struct hash_testvec *vec, unsigned int vec_num,
- struct ahash_request *reqs[MAX_MB_MSGS],
- struct shash_desc *desc,
+ struct ahash_request *req, struct shash_desc *desc,
struct test_sglist *tsgl, u8 *hashstate)
{
char vec_name[16];
@@ -1790,7 +1689,7 @@ static int test_hash_vec(const struct hash_testvec *vec, unsigned int vec_num,
for (i = 0; i < ARRAY_SIZE(default_hash_testvec_configs); i++) {
err = test_hash_vec_cfg(vec, vec_name,
&default_hash_testvec_configs[i],
- reqs, desc, tsgl, hashstate);
+ req, desc, tsgl, hashstate);
if (err)
return err;
}
@@ -1807,7 +1706,7 @@ static int test_hash_vec(const struct hash_testvec *vec, unsigned int vec_num,
generate_random_testvec_config(&rng, &cfg, cfgname,
sizeof(cfgname));
err = test_hash_vec_cfg(vec, vec_name, &cfg,
- reqs, desc, tsgl, hashstate);
+ req, desc, tsgl, hashstate);
if (err)
return err;
cond_resched();
@@ -1866,12 +1765,11 @@ done:
*/
static int test_hash_vs_generic_impl(const char *generic_driver,
unsigned int maxkeysize,
- struct ahash_request *reqs[MAX_MB_MSGS],
+ struct ahash_request *req,
struct shash_desc *desc,
struct test_sglist *tsgl,
u8 *hashstate)
{
- struct ahash_request *req = reqs[0];
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
const unsigned int digestsize = crypto_ahash_digestsize(tfm);
const unsigned int blocksize = crypto_ahash_blocksize(tfm);
@@ -1969,7 +1867,7 @@ static int test_hash_vs_generic_impl(const char *generic_driver,
sizeof(cfgname));
err = test_hash_vec_cfg(&vec, vec_name, cfg,
- reqs, desc, tsgl, hashstate);
+ req, desc, tsgl, hashstate);
if (err)
goto out;
cond_resched();
@@ -1987,7 +1885,7 @@ out:
#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
static int test_hash_vs_generic_impl(const char *generic_driver,
unsigned int maxkeysize,
- struct ahash_request *reqs[MAX_MB_MSGS],
+ struct ahash_request *req,
struct shash_desc *desc,
struct test_sglist *tsgl,
u8 *hashstate)
@@ -2034,8 +1932,8 @@ static int __alg_test_hash(const struct hash_testvec *vecs,
u32 type, u32 mask,
const char *generic_driver, unsigned int maxkeysize)
{
- struct ahash_request *reqs[MAX_MB_MSGS] = {};
struct crypto_ahash *atfm = NULL;
+ struct ahash_request *req = NULL;
struct crypto_shash *stfm = NULL;
struct shash_desc *desc = NULL;
struct test_sglist *tsgl = NULL;
@@ -2059,14 +1957,12 @@ static int __alg_test_hash(const struct hash_testvec *vecs,
}
driver = crypto_ahash_driver_name(atfm);
- for (i = 0; i < MAX_MB_MSGS; i++) {
- reqs[i] = ahash_request_alloc(atfm, GFP_KERNEL);
- if (!reqs[i]) {
- pr_err("alg: hash: failed to allocate request for %s\n",
- driver);
- err = -ENOMEM;
- goto out;
- }
+ req = ahash_request_alloc(atfm, GFP_KERNEL);
+ if (!req) {
+ pr_err("alg: hash: failed to allocate request for %s\n",
+ driver);
+ err = -ENOMEM;
+ goto out;
}
/*
@@ -2102,12 +1998,12 @@ static int __alg_test_hash(const struct hash_testvec *vecs,
if (fips_enabled && vecs[i].fips_skip)
continue;
- err = test_hash_vec(&vecs[i], i, reqs, desc, tsgl, hashstate);
+ err = test_hash_vec(&vecs[i], i, req, desc, tsgl, hashstate);
if (err)
goto out;
cond_resched();
}
- err = test_hash_vs_generic_impl(generic_driver, maxkeysize, reqs,
+ err = test_hash_vs_generic_impl(generic_driver, maxkeysize, req,
desc, tsgl, hashstate);
out:
kfree(hashstate);
@@ -2117,12 +2013,7 @@ out:
}
kfree(desc);
crypto_free_shash(stfm);
- if (reqs[0]) {
- ahash_request_set_callback(reqs[0], 0, NULL, NULL);
- for (i = 1; i < MAX_MB_MSGS && reqs[i]; i++)
- ahash_request_chain(reqs[i], reqs[0]);
- ahash_request_free(reqs[0]);
- }
+ ahash_request_free(req);
crypto_free_ahash(atfm);
return err;
}
diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c
index 435ec60a9682..4ad88187dc7a 100644
--- a/drivers/acpi/acpi_pnp.c
+++ b/drivers/acpi/acpi_pnp.c
@@ -353,8 +353,10 @@ static bool acpi_pnp_match(const char *idstr, const struct acpi_device_id **matc
* device represented by it.
*/
static const struct acpi_device_id acpi_nonpnp_device_ids[] = {
+ {"INT3F0D"},
{"INTC1080"},
{"INTC1081"},
+ {"INTC1099"},
{""},
};
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index efdadc74e3f4..103f29661576 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -649,6 +649,13 @@ acpi_video_device_EDID(struct acpi_video_device *device, void **edid, int length
obj = buffer.pointer;
+ /*
+ * Some buggy implementations incorrectly return the EDID buffer in an ACPI package.
+ * In this case, extract the buffer from the package.
+ */
+ if (obj && obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 1)
+ obj = &obj->package.elements[0];
+
if (obj && obj->type == ACPI_TYPE_BUFFER) {
*edid = kmemdup(obj->buffer.pointer, obj->buffer.length, GFP_KERNEL);
ret = *edid ? obj->buffer.length : -ENOMEM;
@@ -658,7 +665,7 @@ acpi_video_device_EDID(struct acpi_video_device *device, void **edid, int length
ret = -EFAULT;
}
- kfree(obj);
+ kfree(buffer.pointer);
return ret;
}
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index b72772494655..289e365f84b2 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -674,6 +674,105 @@ static void ghes_defer_non_standard_event(struct acpi_hest_generic_data *gdata,
schedule_work(&entry->work);
}
+/* Room for 8 entries */
+#define CXL_CPER_PROT_ERR_FIFO_DEPTH 8
+static DEFINE_KFIFO(cxl_cper_prot_err_fifo, struct cxl_cper_prot_err_work_data,
+ CXL_CPER_PROT_ERR_FIFO_DEPTH);
+
+/* Synchronize schedule_work() with cxl_cper_prot_err_work changes */
+static DEFINE_SPINLOCK(cxl_cper_prot_err_work_lock);
+struct work_struct *cxl_cper_prot_err_work;
+
+static void cxl_cper_post_prot_err(struct cxl_cper_sec_prot_err *prot_err,
+ int severity)
+{
+#ifdef CONFIG_ACPI_APEI_PCIEAER
+ struct cxl_cper_prot_err_work_data wd;
+ u8 *dvsec_start, *cap_start;
+
+ if (!(prot_err->valid_bits & PROT_ERR_VALID_AGENT_ADDRESS)) {
+ pr_err_ratelimited("CXL CPER invalid agent type\n");
+ return;
+ }
+
+ if (!(prot_err->valid_bits & PROT_ERR_VALID_ERROR_LOG)) {
+ pr_err_ratelimited("CXL CPER invalid protocol error log\n");
+ return;
+ }
+
+ if (prot_err->err_len != sizeof(struct cxl_ras_capability_regs)) {
+ pr_err_ratelimited("CXL CPER invalid RAS Cap size (%u)\n",
+ prot_err->err_len);
+ return;
+ }
+
+ if (!(prot_err->valid_bits & PROT_ERR_VALID_SERIAL_NUMBER))
+ pr_warn(FW_WARN "CXL CPER no device serial number\n");
+
+ guard(spinlock_irqsave)(&cxl_cper_prot_err_work_lock);
+
+ if (!cxl_cper_prot_err_work)
+ return;
+
+ switch (prot_err->agent_type) {
+ case RCD:
+ case DEVICE:
+ case LD:
+ case FMLD:
+ case RP:
+ case DSP:
+ case USP:
+ memcpy(&wd.prot_err, prot_err, sizeof(wd.prot_err));
+
+ dvsec_start = (u8 *)(prot_err + 1);
+ cap_start = dvsec_start + prot_err->dvsec_len;
+
+ memcpy(&wd.ras_cap, cap_start, sizeof(wd.ras_cap));
+ wd.severity = cper_severity_to_aer(severity);
+ break;
+ default:
+ pr_err_ratelimited("CXL CPER invalid agent type: %d\n",
+ prot_err->agent_type);
+ return;
+ }
+
+ if (!kfifo_put(&cxl_cper_prot_err_fifo, wd)) {
+ pr_err_ratelimited("CXL CPER kfifo overflow\n");
+ return;
+ }
+
+ schedule_work(cxl_cper_prot_err_work);
+#endif
+}
+
+int cxl_cper_register_prot_err_work(struct work_struct *work)
+{
+ if (cxl_cper_prot_err_work)
+ return -EINVAL;
+
+ guard(spinlock)(&cxl_cper_prot_err_work_lock);
+ cxl_cper_prot_err_work = work;
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_cper_register_prot_err_work, "CXL");
+
+int cxl_cper_unregister_prot_err_work(struct work_struct *work)
+{
+ if (cxl_cper_prot_err_work != work)
+ return -EINVAL;
+
+ guard(spinlock)(&cxl_cper_prot_err_work_lock);
+ cxl_cper_prot_err_work = NULL;
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_cper_unregister_prot_err_work, "CXL");
+
+int cxl_cper_prot_err_kfifo_get(struct cxl_cper_prot_err_work_data *wd)
+{
+ return kfifo_get(&cxl_cper_prot_err_fifo, wd);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_cper_prot_err_kfifo_get, "CXL");
+
/* Room for 8 entries for each of the 4 event log queues */
#define CXL_CPER_FIFO_DEPTH 32
DEFINE_KFIFO(cxl_cper_fifo, struct cxl_cper_work_data, CXL_CPER_FIFO_DEPTH);
@@ -777,6 +876,10 @@ static bool ghes_do_proc(struct ghes *ghes,
}
else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
queued = ghes_handle_arm_hw_error(gdata, sev, sync);
+ } else if (guid_equal(sec_type, &CPER_SEC_CXL_PROT_ERR)) {
+ struct cxl_cper_sec_prot_err *prot_err = acpi_hest_get_payload(gdata);
+
+ cxl_cper_post_prot_err(prot_err, gdata->error_severity);
} else if (guid_equal(sec_type, &CPER_SEC_CXL_GEN_MEDIA_GUID)) {
struct cxl_cper_event_rec *rec = acpi_hest_get_payload(gdata);
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index a5d47819b3a4..ae035b93da08 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -485,7 +485,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
cmd_mask = nd_desc->cmd_mask;
if (cmd == ND_CMD_CALL && call_pkg->nd_family) {
family = call_pkg->nd_family;
- if (family > NVDIMM_BUS_FAMILY_MAX ||
+ if (call_pkg->nd_family > NVDIMM_BUS_FAMILY_MAX ||
!test_bit(family, &nd_desc->bus_family_mask))
return -EINVAL;
family = array_index_nospec(family,
diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c
index bfbb08b1e6af..9d9052258e92 100644
--- a/drivers/acpi/numa/hmat.c
+++ b/drivers/acpi/numa/hmat.c
@@ -108,6 +108,45 @@ static struct memory_target *find_mem_target(unsigned int mem_pxm)
return NULL;
}
+/**
+ * hmat_get_extended_linear_cache_size - Retrieve the extended linear cache size
+ * @backing_res: resource from the backing media
+ * @nid: node id for the memory region
+ * @cache_size: (Output) size of extended linear cache.
+ *
+ * Return: 0 on success. Errno on failure.
+ *
+ */
+int hmat_get_extended_linear_cache_size(struct resource *backing_res, int nid,
+ resource_size_t *cache_size)
+{
+ unsigned int pxm = node_to_pxm(nid);
+ struct memory_target *target;
+ struct target_cache *tcache;
+ struct resource *res;
+
+ target = find_mem_target(pxm);
+ if (!target)
+ return -ENOENT;
+
+ list_for_each_entry(tcache, &target->caches, node) {
+ if (tcache->cache_attrs.address_mode !=
+ NODE_CACHE_ADDR_MODE_EXTENDED_LINEAR)
+ continue;
+
+ res = &target->memregions;
+ if (!resource_contains(res, backing_res))
+ continue;
+
+ *cache_size = tcache->cache_attrs.size;
+ return 0;
+ }
+
+ *cache_size = 0;
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(hmat_get_extended_linear_cache_size, "CXL");
+
static struct memory_target *acpi_find_genport_target(u32 uid)
{
struct memory_target *target;
@@ -506,6 +545,11 @@ static __init int hmat_parse_cache(union acpi_subtable_headers *header,
switch ((attrs & ACPI_HMAT_CACHE_ASSOCIATIVITY) >> 8) {
case ACPI_HMAT_CA_DIRECT_MAPPED:
tcache->cache_attrs.indexing = NODE_CACHE_DIRECT_MAP;
+ /* Extended Linear mode is only valid if cache is direct mapped */
+ if (cache->address_mode == ACPI_HMAT_CACHE_MODE_EXTENDED_LINEAR) {
+ tcache->cache_attrs.address_mode =
+ NODE_CACHE_ADDR_MODE_EXTENDED_LINEAR;
+ }
break;
case ACPI_HMAT_CA_COMPLEX_CACHE_INDEXING:
tcache->cache_attrs.indexing = NODE_CACHE_INDEXED;
diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
index ce815d7cb8f6..0a725e46d017 100644
--- a/drivers/acpi/numa/srat.c
+++ b/drivers/acpi/numa/srat.c
@@ -18,6 +18,7 @@
#include <linux/nodemask.h>
#include <linux/topology.h>
#include <linux/numa_memblks.h>
+#include <linux/string_choices.h>
static nodemask_t nodes_found_map = NODE_MASK_NONE;
@@ -188,8 +189,7 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
pr_debug("SRAT Processor (id[0x%02x] eid[0x%02x]) in proximity domain %d %s\n",
p->apic_id, p->local_sapic_eid,
p->proximity_domain_lo,
- (p->flags & ACPI_SRAT_CPU_ENABLED) ?
- "enabled" : "disabled");
+ str_enabled_disabled(p->flags & ACPI_SRAT_CPU_ENABLED));
}
break;
@@ -201,8 +201,7 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
(unsigned long long)p->base_address,
(unsigned long long)p->length,
p->proximity_domain,
- (p->flags & ACPI_SRAT_MEM_ENABLED) ?
- "enabled" : "disabled",
+ str_enabled_disabled(p->flags & ACPI_SRAT_MEM_ENABLED),
(p->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) ?
" hot-pluggable" : "",
(p->flags & ACPI_SRAT_MEM_NON_VOLATILE) ?
@@ -217,8 +216,7 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
pr_debug("SRAT Processor (x2apicid[0x%08x]) in proximity domain %d %s\n",
p->apic_id,
p->proximity_domain,
- (p->flags & ACPI_SRAT_CPU_ENABLED) ?
- "enabled" : "disabled");
+ str_enabled_disabled(p->flags & ACPI_SRAT_CPU_ENABLED));
}
break;
@@ -229,8 +227,7 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
pr_debug("SRAT Processor (acpi id[0x%04x]) in proximity domain %d %s\n",
p->acpi_processor_uid,
p->proximity_domain,
- (p->flags & ACPI_SRAT_GICC_ENABLED) ?
- "enabled" : "disabled");
+ str_enabled_disabled(p->flags & ACPI_SRAT_GICC_ENABLED));
}
break;
@@ -248,8 +245,7 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
*(u16 *)(&p->device_handle[0]),
*(u16 *)(&p->device_handle[2]),
p->proximity_domain,
- (p->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED) ?
- "enabled" : "disabled");
+ str_enabled_disabled(p->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED));
} else {
/*
* In this case we can rely on the device having a
@@ -259,8 +255,7 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
(char *)(&p->device_handle[0]),
(char *)(&p->device_handle[8]),
p->proximity_domain,
- (p->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED) ?
- "enabled" : "disabled");
+ str_enabled_disabled(p->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED));
}
}
break;
@@ -272,8 +267,7 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
pr_debug("SRAT Processor (acpi id[0x%04x]) in proximity domain %d %s\n",
p->acpi_processor_uid,
p->proximity_domain,
- (p->flags & ACPI_SRAT_RINTC_ENABLED) ?
- "enabled" : "disabled");
+ str_enabled_disabled(p->flags & ACPI_SRAT_RINTC_ENABLED));
}
break;
diff --git a/drivers/acpi/platform_profile.c b/drivers/acpi/platform_profile.c
index 671407fc2bd4..ffbfd32f4cf1 100644
--- a/drivers/acpi/platform_profile.c
+++ b/drivers/acpi/platform_profile.c
@@ -245,7 +245,8 @@ static const struct class platform_profile_class = {
/**
* _aggregate_choices - Aggregate the available profile choices
* @dev: The device
- * @arg: struct aggregate_choices_data
+ * @arg: struct aggregate_choices_data, with it's aggregate member bitmap
+ * initially filled with ones
*
* Return: 0 on success, -errno on failure
*/
@@ -256,12 +257,10 @@ static int _aggregate_choices(struct device *dev, void *arg)
struct platform_profile_handler *handler;
lockdep_assert_held(&profile_lock);
+
handler = to_pprof_handler(dev);
bitmap_or(tmp, handler->choices, handler->hidden_choices, PLATFORM_PROFILE_LAST);
- if (test_bit(PLATFORM_PROFILE_LAST, data->aggregate))
- bitmap_copy(data->aggregate, tmp, PLATFORM_PROFILE_LAST);
- else
- bitmap_and(data->aggregate, tmp, data->aggregate, PLATFORM_PROFILE_LAST);
+ bitmap_and(data->aggregate, tmp, data->aggregate, PLATFORM_PROFILE_LAST);
data->count++;
return 0;
@@ -305,7 +304,6 @@ static ssize_t platform_profile_choices_show(struct kobject *kobj,
};
int err;
- set_bit(PLATFORM_PROFILE_LAST, data.aggregate);
scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
err = class_for_each_device(&platform_profile_class, NULL,
&data, _aggregate_choices);
@@ -422,7 +420,7 @@ static ssize_t platform_profile_store(struct kobject *kobj,
i = sysfs_match_string(profile_names, buf);
if (i < 0 || i == PLATFORM_PROFILE_CUSTOM)
return -EINVAL;
- set_bit(PLATFORM_PROFILE_LAST, data.aggregate);
+
scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
ret = class_for_each_device(&platform_profile_class, NULL,
&data, _aggregate_choices);
@@ -502,7 +500,6 @@ int platform_profile_cycle(void)
enum platform_profile_option profile = PLATFORM_PROFILE_LAST;
int err;
- set_bit(PLATFORM_PROFILE_LAST, data.aggregate);
scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
err = class_for_each_device(&platform_profile_class, NULL,
&profile, _aggregate_profiles);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 586cc7d1d8aa..b181f7fc2090 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -268,6 +268,10 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
ACPI_CX_DESC_LEN, "ACPI P_LVL3 IOPORT 0x%x",
pr->power.states[ACPI_STATE_C3].address);
+ if (!pr->power.states[ACPI_STATE_C2].address &&
+ !pr->power.states[ACPI_STATE_C3].address)
+ return -ENODEV;
+
return 0;
}
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index b4cd14e7fa76..14c7bac4100b 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -441,6 +441,13 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
},
},
{
+ /* Asus Vivobook X1404VAP */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "X1404VAP"),
+ },
+ },
+ {
/* Asus Vivobook X1504VAP */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index 068c1612660b..4ee30c2897a2 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -374,7 +374,8 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
},
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
- ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
+ ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
+ ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
},
{
/* Medion Lifetab S10346 */
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 0ea653fa3433..cd13ef287011 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -244,12 +244,14 @@ CACHE_ATTR(size, "%llu")
CACHE_ATTR(line_size, "%u")
CACHE_ATTR(indexing, "%u")
CACHE_ATTR(write_policy, "%u")
+CACHE_ATTR(address_mode, "%#x")
static struct attribute *cache_attrs[] = {
&dev_attr_indexing.attr,
&dev_attr_size.attr,
&dev_attr_line_size.attr,
&dev_attr_write_policy.attr,
+ &dev_attr_address_mode.attr,
NULL,
};
ATTRIBUTE_GROUPS(cache);
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index c060da409ed8..2fd05c1bd30b 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -74,13 +74,30 @@
#define UBLK_PARAM_TYPE_ALL \
(UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DISCARD | \
UBLK_PARAM_TYPE_DEVT | UBLK_PARAM_TYPE_ZONED | \
- UBLK_PARAM_TYPE_DMA_ALIGN)
+ UBLK_PARAM_TYPE_DMA_ALIGN | UBLK_PARAM_TYPE_SEGMENT)
struct ublk_rq_data {
struct kref ref;
};
struct ublk_uring_cmd_pdu {
+ /*
+ * Store requests in same batch temporarily for queuing them to
+ * daemon context.
+ *
+ * It should have been stored to request payload, but we do want
+ * to avoid extra pre-allocation, and uring_cmd payload is always
+ * free for us
+ */
+ union {
+ struct request *req;
+ struct request *req_list;
+ };
+
+ /*
+ * The following two are valid in this cmd whole lifetime, and
+ * setup in ublk uring_cmd handler
+ */
struct ublk_queue *ubq;
u16 tag;
};
@@ -141,10 +158,8 @@ struct ublk_queue {
unsigned long flags;
struct task_struct *ubq_daemon;
- char *io_cmd_buf;
+ struct ublksrv_io_desc *io_cmd_buf;
- unsigned long io_addr; /* mapped vm address */
- unsigned int max_io_sz;
bool force_abort;
bool timeout;
bool canceling;
@@ -582,6 +597,18 @@ static int ublk_validate_params(const struct ublk_device *ub)
return -EINVAL;
}
+ if (ub->params.types & UBLK_PARAM_TYPE_SEGMENT) {
+ const struct ublk_param_segment *p = &ub->params.seg;
+
+ if (!is_power_of_2(p->seg_boundary_mask + 1))
+ return -EINVAL;
+
+ if (p->seg_boundary_mask + 1 < UBLK_MIN_SEGMENT_SIZE)
+ return -EINVAL;
+ if (p->max_segment_size < UBLK_MIN_SEGMENT_SIZE)
+ return -EINVAL;
+ }
+
return 0;
}
@@ -598,6 +625,11 @@ static inline bool ublk_support_user_copy(const struct ublk_queue *ubq)
return ubq->flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY);
}
+static inline bool ublk_need_map_io(const struct ublk_queue *ubq)
+{
+ return !ublk_support_user_copy(ubq);
+}
+
static inline bool ublk_need_req_ref(const struct ublk_queue *ubq)
{
/*
@@ -674,11 +706,11 @@ static inline bool ublk_rq_has_data(const struct request *rq)
static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq,
int tag)
{
- return (struct ublksrv_io_desc *)
- &(ubq->io_cmd_buf[tag * sizeof(struct ublksrv_io_desc)]);
+ return &ubq->io_cmd_buf[tag];
}
-static inline char *ublk_queue_cmd_buf(struct ublk_device *ub, int q_id)
+static inline struct ublksrv_io_desc *
+ublk_queue_cmd_buf(struct ublk_device *ub, int q_id)
{
return ublk_get_queue(ub, q_id)->io_cmd_buf;
}
@@ -925,7 +957,7 @@ static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
{
const unsigned int rq_bytes = blk_rq_bytes(req);
- if (ublk_support_user_copy(ubq))
+ if (!ublk_need_map_io(ubq))
return rq_bytes;
/*
@@ -949,7 +981,7 @@ static int ublk_unmap_io(const struct ublk_queue *ubq,
{
const unsigned int rq_bytes = blk_rq_bytes(req);
- if (ublk_support_user_copy(ubq))
+ if (!ublk_need_map_io(ubq))
return rq_bytes;
if (ublk_need_unmap_req(req)) {
@@ -1037,7 +1069,7 @@ static blk_status_t ublk_setup_iod(struct ublk_queue *ubq, struct request *req)
static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
struct io_uring_cmd *ioucmd)
{
- return (struct ublk_uring_cmd_pdu *)&ioucmd->pdu;
+ return io_uring_cmd_to_pdu(ioucmd, struct ublk_uring_cmd_pdu);
}
static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq)
@@ -1155,14 +1187,11 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq,
blk_mq_end_request(rq, BLK_STS_IOERR);
}
-static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd,
- unsigned int issue_flags)
+static void ublk_dispatch_req(struct ublk_queue *ubq,
+ struct request *req,
+ unsigned int issue_flags)
{
- struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
- struct ublk_queue *ubq = pdu->ubq;
- int tag = pdu->tag;
- struct request *req = blk_mq_tag_to_rq(
- ubq->dev->tag_set.tags[ubq->q_id], tag);
+ int tag = req->tag;
struct ublk_io *io = &ubq->ios[tag];
unsigned int mapped_bytes;
@@ -1237,11 +1266,49 @@ static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd,
ubq_complete_io_cmd(io, UBLK_IO_RES_OK, issue_flags);
}
+static void ublk_cmd_tw_cb(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
+{
+ struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
+ struct ublk_queue *ubq = pdu->ubq;
+
+ ublk_dispatch_req(ubq, pdu->req, issue_flags);
+}
+
static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
{
- struct ublk_io *io = &ubq->ios[rq->tag];
+ struct io_uring_cmd *cmd = ubq->ios[rq->tag].cmd;
+ struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
+
+ pdu->req = rq;
+ io_uring_cmd_complete_in_task(cmd, ublk_cmd_tw_cb);
+}
+
+static void ublk_cmd_list_tw_cb(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
+{
+ struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
+ struct request *rq = pdu->req_list;
+ struct ublk_queue *ubq = pdu->ubq;
+ struct request *next;
+
+ do {
+ next = rq->rq_next;
+ rq->rq_next = NULL;
+ ublk_dispatch_req(ubq, rq, issue_flags);
+ rq = next;
+ } while (rq);
+}
+
+static void ublk_queue_cmd_list(struct ublk_queue *ubq, struct rq_list *l)
+{
+ struct request *rq = rq_list_peek(l);
+ struct io_uring_cmd *cmd = ubq->ios[rq->tag].cmd;
+ struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
- io_uring_cmd_complete_in_task(io->cmd, ublk_rq_task_work_cb);
+ pdu->req_list = rq;
+ rq_list_init(l);
+ io_uring_cmd_complete_in_task(cmd, ublk_cmd_list_tw_cb);
}
static enum blk_eh_timer_return ublk_timeout(struct request *rq)
@@ -1282,21 +1349,12 @@ static enum blk_eh_timer_return ublk_timeout(struct request *rq)
return BLK_EH_RESET_TIMER;
}
-static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
- const struct blk_mq_queue_data *bd)
+static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq)
{
- struct ublk_queue *ubq = hctx->driver_data;
- struct request *rq = bd->rq;
blk_status_t res;
- if (unlikely(ubq->fail_io)) {
+ if (unlikely(ubq->fail_io))
return BLK_STS_TARGET;
- }
-
- /* fill iod to slot in io cmd buffer */
- res = ublk_setup_iod(ubq, rq);
- if (unlikely(res != BLK_STS_OK))
- return BLK_STS_IOERR;
/* With recovery feature enabled, force_abort is set in
* ublk_stop_dev() before calling del_gendisk(). We have to
@@ -1310,17 +1368,68 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
if (ublk_nosrv_should_queue_io(ubq) && unlikely(ubq->force_abort))
return BLK_STS_IOERR;
+ if (unlikely(ubq->canceling))
+ return BLK_STS_IOERR;
+
+ /* fill iod to slot in io cmd buffer */
+ res = ublk_setup_iod(ubq, rq);
+ if (unlikely(res != BLK_STS_OK))
+ return BLK_STS_IOERR;
+
+ blk_mq_start_request(rq);
+ return BLK_STS_OK;
+}
+
+static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct ublk_queue *ubq = hctx->driver_data;
+ struct request *rq = bd->rq;
+ blk_status_t res;
+
+ res = ublk_prep_req(ubq, rq);
+ if (res != BLK_STS_OK)
+ return res;
+
+ /*
+ * ->canceling has to be handled after ->force_abort and ->fail_io
+ * is dealt with, otherwise this request may not be failed in case
+ * of recovery, and cause hang when deleting disk
+ */
if (unlikely(ubq->canceling)) {
__ublk_abort_rq(ubq, rq);
return BLK_STS_OK;
}
- blk_mq_start_request(bd->rq);
ublk_queue_cmd(ubq, rq);
-
return BLK_STS_OK;
}
+static void ublk_queue_rqs(struct rq_list *rqlist)
+{
+ struct rq_list requeue_list = { };
+ struct rq_list submit_list = { };
+ struct ublk_queue *ubq = NULL;
+ struct request *req;
+
+ while ((req = rq_list_pop(rqlist))) {
+ struct ublk_queue *this_q = req->mq_hctx->driver_data;
+
+ if (ubq && ubq != this_q && !rq_list_empty(&submit_list))
+ ublk_queue_cmd_list(ubq, &submit_list);
+ ubq = this_q;
+
+ if (ublk_prep_req(ubq, req) == BLK_STS_OK)
+ rq_list_add_tail(&submit_list, req);
+ else
+ rq_list_add_tail(&requeue_list, req);
+ }
+
+ if (ubq && !rq_list_empty(&submit_list))
+ ublk_queue_cmd_list(ubq, &submit_list);
+ *rqlist = requeue_list;
+}
+
static int ublk_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
unsigned int hctx_idx)
{
@@ -1333,6 +1442,7 @@ static int ublk_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
static const struct blk_mq_ops ublk_mq_ops = {
.queue_rq = ublk_queue_rq,
+ .queue_rqs = ublk_queue_rqs,
.init_hctx = ublk_init_hctx,
.timeout = ublk_timeout,
};
@@ -1446,17 +1556,27 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
}
}
+/* Must be called when queue is frozen */
+static bool ublk_mark_queue_canceling(struct ublk_queue *ubq)
+{
+ bool canceled;
+
+ spin_lock(&ubq->cancel_lock);
+ canceled = ubq->canceling;
+ if (!canceled)
+ ubq->canceling = true;
+ spin_unlock(&ubq->cancel_lock);
+
+ return canceled;
+}
+
static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq)
{
+ bool was_canceled = ubq->canceling;
struct gendisk *disk;
- spin_lock(&ubq->cancel_lock);
- if (ubq->canceling) {
- spin_unlock(&ubq->cancel_lock);
+ if (was_canceled)
return false;
- }
- ubq->canceling = true;
- spin_unlock(&ubq->cancel_lock);
spin_lock(&ub->lock);
disk = ub->ub_disk;
@@ -1468,14 +1588,23 @@ static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq)
if (!disk)
return false;
- /* Now we are serialized with ublk_queue_rq() */
+ /*
+ * Now we are serialized with ublk_queue_rq()
+ *
+ * Make sure that ubq->canceling is set when queue is frozen,
+ * because ublk_queue_rq() has to rely on this flag for avoiding to
+ * touch completed uring_cmd
+ */
blk_mq_quiesce_queue(disk->queue);
- /* abort queue is for making forward progress */
- ublk_abort_queue(ub, ubq);
+ was_canceled = ublk_mark_queue_canceling(ubq);
+ if (!was_canceled) {
+ /* abort queue is for making forward progress */
+ ublk_abort_queue(ub, ubq);
+ }
blk_mq_unquiesce_queue(disk->queue);
put_device(disk_to_dev(disk));
- return true;
+ return !was_canceled;
}
static void ublk_cancel_cmd(struct ublk_queue *ubq, struct ublk_io *io,
@@ -1845,7 +1974,7 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)
goto out;
- if (!ublk_support_user_copy(ubq)) {
+ if (ublk_need_map_io(ubq)) {
/*
* FETCH_RQ has to provide IO buffer if NEED GET
* DATA is not enabled
@@ -1867,7 +1996,7 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
goto out;
- if (!ublk_support_user_copy(ubq)) {
+ if (ublk_need_map_io(ubq)) {
/*
* COMMIT_AND_FETCH_REQ has to provide IO buffer if
* NEED GET DATA is not enabled or it is Read IO.
@@ -2343,6 +2472,12 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
if (ub->params.types & UBLK_PARAM_TYPE_DMA_ALIGN)
lim.dma_alignment = ub->params.dma.alignment;
+ if (ub->params.types & UBLK_PARAM_TYPE_SEGMENT) {
+ lim.seg_boundary_mask = ub->params.seg.seg_boundary_mask;
+ lim.max_segment_size = ub->params.seg.max_segment_size;
+ lim.max_segments = ub->params.seg.max_segments;
+ }
+
if (wait_for_completion_interruptible(&ub->completion) != 0)
return -EINTR;
diff --git a/drivers/bus/fsl-mc/dpmcp.c b/drivers/bus/fsl-mc/dpmcp.c
index 5fbd0dbde24a..7816c0a728ef 100644
--- a/drivers/bus/fsl-mc/dpmcp.c
+++ b/drivers/bus/fsl-mc/dpmcp.c
@@ -75,25 +75,3 @@ int dpmcp_close(struct fsl_mc_io *mc_io,
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
-
-/**
- * dpmcp_reset() - Reset the DPMCP, returns the object to initial state.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPMCP object
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpmcp_reset(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token)
-{
- struct fsl_mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_RESET,
- cmd_flags, token);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
diff --git a/drivers/bus/fsl-mc/fsl-mc-allocator.c b/drivers/bus/fsl-mc/fsl-mc-allocator.c
index b5e8c021fa1f..6c3beb82dd1b 100644
--- a/drivers/bus/fsl-mc/fsl-mc-allocator.c
+++ b/drivers/bus/fsl-mc/fsl-mc-allocator.c
@@ -656,8 +656,3 @@ int __init fsl_mc_allocator_driver_init(void)
{
return fsl_mc_driver_register(&fsl_mc_allocator_driver);
}
-
-void fsl_mc_allocator_driver_exit(void)
-{
- fsl_mc_driver_unregister(&fsl_mc_allocator_driver);
-}
diff --git a/drivers/bus/fsl-mc/fsl-mc-private.h b/drivers/bus/fsl-mc/fsl-mc-private.h
index b3520ea1b9f4..e1b7ec3ed1a7 100644
--- a/drivers/bus/fsl-mc/fsl-mc-private.h
+++ b/drivers/bus/fsl-mc/fsl-mc-private.h
@@ -66,10 +66,6 @@ int dpmcp_close(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token);
-int dpmcp_reset(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token);
-
/*
* Data Path Resource Container (DPRC) API
*/
@@ -631,8 +627,6 @@ int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
int __init fsl_mc_allocator_driver_init(void);
-void fsl_mc_allocator_driver_exit(void);
-
void fsl_mc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev);
void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev);
diff --git a/drivers/bus/fsl-mc/mc-io.c b/drivers/bus/fsl-mc/mc-io.c
index 95b10a6cf307..a0ad7866cbfc 100644
--- a/drivers/bus/fsl-mc/mc-io.c
+++ b/drivers/bus/fsl-mc/mc-io.c
@@ -263,23 +263,3 @@ void fsl_mc_portal_free(struct fsl_mc_io *mc_io)
dpmcp_dev->consumer_link = NULL;
}
EXPORT_SYMBOL_GPL(fsl_mc_portal_free);
-
-/**
- * fsl_mc_portal_reset - Resets the dpmcp object for a given fsl_mc_io object
- *
- * @mc_io: Pointer to the fsl_mc_io object that wraps the MC portal to free
- */
-int fsl_mc_portal_reset(struct fsl_mc_io *mc_io)
-{
- int error;
- struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;
-
- error = dpmcp_reset(mc_io, 0, dpmcp_dev->mc_handle);
- if (error < 0) {
- dev_err(&dpmcp_dev->dev, "dpmcp_reset() failed: %d\n", error);
- return error;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(fsl_mc_portal_reset);
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index 2f83fb97c6fb..e0bede6350e1 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -153,7 +153,7 @@ struct clk_lookup_alloc {
char con_id[MAX_CON_ID];
};
-static struct clk_lookup * __ref
+static __printf(3, 0) struct clk_lookup * __ref
vclkdev_alloc(struct clk_hw *hw, const char *con_id, const char *dev_fmt,
va_list ap)
{
@@ -215,7 +215,7 @@ fail:
return &cla->cl;
}
-static struct clk_lookup *
+static __printf(3, 0) struct clk_lookup *
vclkdev_create(struct clk_hw *hw, const char *con_id, const char *dev_fmt,
va_list ap)
{
@@ -303,9 +303,8 @@ void clkdev_drop(struct clk_lookup *cl)
}
EXPORT_SYMBOL(clkdev_drop);
-static struct clk_lookup *__clk_register_clkdev(struct clk_hw *hw,
- const char *con_id,
- const char *dev_id, ...)
+static __printf(3, 4) struct clk_lookup *
+__clk_register_clkdev(struct clk_hw *hw, const char *con_id, const char *dev_id, ...)
{
struct clk_lookup *cl;
va_list ap;
diff --git a/drivers/counter/microchip-tcb-capture.c b/drivers/counter/microchip-tcb-capture.c
index 00a463b044b5..1de3c50b9804 100644
--- a/drivers/counter/microchip-tcb-capture.c
+++ b/drivers/counter/microchip-tcb-capture.c
@@ -519,6 +519,25 @@ static int mchp_tc_probe(struct platform_device *pdev)
channel);
}
+ /* Disable Quadrature Decoder and position measure */
+ ret = regmap_update_bits(regmap, ATMEL_TC_BMR, ATMEL_TC_QDEN | ATMEL_TC_POSEN, 0);
+ if (ret)
+ return ret;
+
+ /* Setup the period capture mode */
+ ret = regmap_update_bits(regmap, ATMEL_TC_REG(priv->channel[0], CMR),
+ ATMEL_TC_WAVE | ATMEL_TC_ABETRG | ATMEL_TC_CMR_MASK |
+ ATMEL_TC_TCCLKS,
+ ATMEL_TC_CMR_MASK);
+ if (ret)
+ return ret;
+
+ /* Enable clock and trigger counter */
+ ret = regmap_write(regmap, ATMEL_TC_REG(priv->channel[0], CCR),
+ ATMEL_TC_CLKEN | ATMEL_TC_SWTRG);
+ if (ret)
+ return ret;
+
priv->tc_cfg = tcb_config;
priv->regmap = regmap;
counter->name = dev_name(&pdev->dev);
diff --git a/drivers/counter/stm32-lptimer-cnt.c b/drivers/counter/stm32-lptimer-cnt.c
index cf73f65baf60..b249c8647639 100644
--- a/drivers/counter/stm32-lptimer-cnt.c
+++ b/drivers/counter/stm32-lptimer-cnt.c
@@ -58,37 +58,43 @@ static int stm32_lptim_set_enable_state(struct stm32_lptim_cnt *priv,
return 0;
}
+ ret = clk_enable(priv->clk);
+ if (ret)
+ goto disable_cnt;
+
/* LP timer must be enabled before writing CMP & ARR */
ret = regmap_write(priv->regmap, STM32_LPTIM_ARR, priv->ceiling);
if (ret)
- return ret;
+ goto disable_clk;
ret = regmap_write(priv->regmap, STM32_LPTIM_CMP, 0);
if (ret)
- return ret;
+ goto disable_clk;
/* ensure CMP & ARR registers are properly written */
ret = regmap_read_poll_timeout(priv->regmap, STM32_LPTIM_ISR, val,
(val & STM32_LPTIM_CMPOK_ARROK) == STM32_LPTIM_CMPOK_ARROK,
100, 1000);
if (ret)
- return ret;
+ goto disable_clk;
ret = regmap_write(priv->regmap, STM32_LPTIM_ICR,
STM32_LPTIM_CMPOKCF_ARROKCF);
if (ret)
- return ret;
+ goto disable_clk;
- ret = clk_enable(priv->clk);
- if (ret) {
- regmap_write(priv->regmap, STM32_LPTIM_CR, 0);
- return ret;
- }
priv->enabled = true;
/* Start LP timer in continuous mode */
return regmap_update_bits(priv->regmap, STM32_LPTIM_CR,
STM32_LPTIM_CNTSTRT, STM32_LPTIM_CNTSTRT);
+
+disable_clk:
+ clk_disable(priv->clk);
+disable_cnt:
+ regmap_write(priv->regmap, STM32_LPTIM_CR, 0);
+
+ return ret;
}
static int stm32_lptim_setup(struct stm32_lptim_cnt *priv, int enable)
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 0cf5a320bb5e..3841c9da6cac 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2809,6 +2809,12 @@ EXPORT_SYMBOL(cpufreq_update_policy);
*/
void cpufreq_update_limits(unsigned int cpu)
{
+ struct cpufreq_policy *policy __free(put_cpufreq_policy);
+
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy)
+ return;
+
if (cpufreq_driver->update_limits)
cpufreq_driver->update_limits(cpu);
else
diff --git a/drivers/cxl/Kconfig b/drivers/cxl/Kconfig
index 8ac1e9d70eeb..cf1ba673b8c2 100644
--- a/drivers/cxl/Kconfig
+++ b/drivers/cxl/Kconfig
@@ -158,4 +158,8 @@ config CXL_REGION_INVALIDATION_TEST
If unsure, or if this kernel is meant for production environments,
say N.
+config CXL_MCE
+ def_bool y
+ depends on X86_MCE && MEMORY_FAILURE
+
endif
diff --git a/drivers/cxl/core/Makefile b/drivers/cxl/core/Makefile
index b0bfbd9eac9b..086df97a0fcf 100644
--- a/drivers/cxl/core/Makefile
+++ b/drivers/cxl/core/Makefile
@@ -14,6 +14,9 @@ cxl_core-y += pci.o
cxl_core-y += hdm.o
cxl_core-y += pmu.o
cxl_core-y += cdat.o
+cxl_core-y += ras.o
+cxl_core-y += acpi.o
cxl_core-$(CONFIG_TRACING) += trace.o
cxl_core-$(CONFIG_CXL_REGION) += region.o
+cxl_core-$(CONFIG_CXL_MCE) += mce.o
cxl_core-$(CONFIG_CXL_FEATURES) += features.o
diff --git a/drivers/cxl/core/acpi.c b/drivers/cxl/core/acpi.c
new file mode 100644
index 000000000000..f13b4dae6ac5
--- /dev/null
+++ b/drivers/cxl/core/acpi.c
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2024 Intel Corporation. All rights reserved. */
+#include <linux/acpi.h>
+#include "cxl.h"
+#include "core.h"
+
+int cxl_acpi_get_extended_linear_cache_size(struct resource *backing_res,
+ int nid, resource_size_t *size)
+{
+ return hmat_get_extended_linear_cache_size(backing_res, nid, size);
+}
diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
index 8153f8d83a16..edb4f41eeacc 100644
--- a/drivers/cxl/core/cdat.c
+++ b/drivers/cxl/core/cdat.c
@@ -258,27 +258,29 @@ static void update_perf_entry(struct device *dev, struct dsmas_entry *dent,
static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds,
struct xarray *dsmas_xa)
{
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
struct device *dev = cxlds->dev;
- struct range pmem_range = {
- .start = cxlds->pmem_res.start,
- .end = cxlds->pmem_res.end,
- };
- struct range ram_range = {
- .start = cxlds->ram_res.start,
- .end = cxlds->ram_res.end,
- };
struct dsmas_entry *dent;
unsigned long index;
xa_for_each(dsmas_xa, index, dent) {
- if (resource_size(&cxlds->ram_res) &&
- range_contains(&ram_range, &dent->dpa_range))
- update_perf_entry(dev, dent, &mds->ram_perf);
- else if (resource_size(&cxlds->pmem_res) &&
- range_contains(&pmem_range, &dent->dpa_range))
- update_perf_entry(dev, dent, &mds->pmem_perf);
- else
+ bool found = false;
+
+ for (int i = 0; i < cxlds->nr_partitions; i++) {
+ struct resource *res = &cxlds->part[i].res;
+ struct range range = {
+ .start = res->start,
+ .end = res->end,
+ };
+
+ if (range_contains(&range, &dent->dpa_range)) {
+ update_perf_entry(dev, dent,
+ &cxlds->part[i].perf);
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
dev_dbg(dev, "no partition for dsmas dpa: %pra\n",
&dent->dpa_range);
}
@@ -343,36 +345,46 @@ static int match_cxlrd_hb(struct device *dev, void *data)
return 0;
}
-static int cxl_qos_class_verify(struct cxl_memdev *cxlmd)
+static void cxl_qos_class_verify(struct cxl_memdev *cxlmd)
{
struct cxl_dev_state *cxlds = cxlmd->cxlds;
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
struct cxl_port *root_port;
- int rc;
struct cxl_root *cxl_root __free(put_cxl_root) =
find_cxl_root(cxlmd->endpoint);
+ /*
+ * No need to reset_dpa_perf() here as find_cxl_root() is guaranteed to
+ * succeed when called in the cxl_endpoint_port_probe() path.
+ */
if (!cxl_root)
- return -ENODEV;
+ return;
root_port = &cxl_root->port;
- /* Check that the QTG IDs are all sane between end device and root decoders */
- if (!cxl_qos_match(root_port, &mds->ram_perf))
- reset_dpa_perf(&mds->ram_perf);
- if (!cxl_qos_match(root_port, &mds->pmem_perf))
- reset_dpa_perf(&mds->pmem_perf);
-
- /* Check to make sure that the device's host bridge is under a root decoder */
- rc = device_for_each_child(&root_port->dev,
- cxlmd->endpoint->host_bridge, match_cxlrd_hb);
- if (!rc) {
- reset_dpa_perf(&mds->ram_perf);
- reset_dpa_perf(&mds->pmem_perf);
+ /*
+ * Save userspace from needing to check if a qos class has any matches
+ * by hiding qos class info if the memdev is not mapped by a root
+ * decoder, or the partition class does not match any root decoder
+ * class.
+ */
+ if (!device_for_each_child(&root_port->dev,
+ cxlmd->endpoint->host_bridge,
+ match_cxlrd_hb)) {
+ for (int i = 0; i < cxlds->nr_partitions; i++) {
+ struct cxl_dpa_perf *perf = &cxlds->part[i].perf;
+
+ reset_dpa_perf(perf);
+ }
+ return;
}
- return rc;
+ for (int i = 0; i < cxlds->nr_partitions; i++) {
+ struct cxl_dpa_perf *perf = &cxlds->part[i].perf;
+
+ if (!cxl_qos_match(root_port, perf))
+ reset_dpa_perf(perf);
+ }
}
static void discard_dsmas(struct xarray *xa)
@@ -570,23 +582,18 @@ static bool dpa_perf_contains(struct cxl_dpa_perf *perf,
return range_contains(&perf->dpa_range, &dpa);
}
-static struct cxl_dpa_perf *cxled_get_dpa_perf(struct cxl_endpoint_decoder *cxled,
- enum cxl_decoder_mode mode)
+static struct cxl_dpa_perf *cxled_get_dpa_perf(struct cxl_endpoint_decoder *cxled)
{
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_dpa_perf *perf;
- switch (mode) {
- case CXL_DECODER_RAM:
- perf = &mds->ram_perf;
- break;
- case CXL_DECODER_PMEM:
- perf = &mds->pmem_perf;
- break;
- default:
+ if (cxled->part < 0)
+ return ERR_PTR(-EINVAL);
+ perf = &cxlds->part[cxled->part].perf;
+
+ if (!perf)
return ERR_PTR(-EINVAL);
- }
if (!dpa_perf_contains(perf, cxled->dpa_res))
return ERR_PTR(-EINVAL);
@@ -647,11 +654,10 @@ static int cxl_endpoint_gather_bandwidth(struct cxl_region *cxlr,
if (cxlds->rcd)
return -ENODEV;
- perf = cxled_get_dpa_perf(cxled, cxlr->mode);
+ perf = cxled_get_dpa_perf(cxled);
if (IS_ERR(perf))
return PTR_ERR(perf);
- gp_port = to_cxl_port(parent_port->dev.parent);
*gp_is_root = is_cxl_root(gp_port);
/*
@@ -1053,7 +1059,7 @@ void cxl_region_perf_data_calculate(struct cxl_region *cxlr,
lockdep_assert_held(&cxl_dpa_rwsem);
- perf = cxled_get_dpa_perf(cxled, cxlr->mode);
+ perf = cxled_get_dpa_perf(cxled);
if (IS_ERR(perf))
return;
diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
index 17e99a25c29a..15699299dc11 100644
--- a/drivers/cxl/core/core.h
+++ b/drivers/cxl/core/core.h
@@ -74,8 +74,8 @@ void __iomem *devm_cxl_iomap_block(struct device *dev, resource_size_t addr,
resource_size_t length);
struct dentry *cxl_debugfs_create_dir(const char *dir);
-int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
- enum cxl_decoder_mode mode);
+int cxl_dpa_set_part(struct cxl_endpoint_decoder *cxled,
+ enum cxl_partition_mode mode);
int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size);
int cxl_dpa_free(struct cxl_endpoint_decoder *cxled);
resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled);
@@ -117,6 +117,12 @@ bool cxl_need_node_perf_attrs_update(int nid);
int cxl_port_get_switch_dport_bandwidth(struct cxl_port *port,
struct access_coordinate *c);
+int cxl_ras_init(void);
+void cxl_ras_exit(void);
+int cxl_gpf_port_setup(struct device *dport_dev, struct cxl_port *port);
+int cxl_acpi_get_extended_linear_cache_size(struct resource *backing_res,
+ int nid, resource_size_t *size);
+
#ifdef CONFIG_CXL_FEATURES
size_t cxl_get_feature(struct cxl_mailbox *cxl_mbox, const uuid_t *feat_uuid,
enum cxl_get_feat_selection selection,
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
index 50e6a45b30ba..70cae4ebf8a4 100644
--- a/drivers/cxl/core/hdm.c
+++ b/drivers/cxl/core/hdm.c
@@ -213,16 +213,46 @@ void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds)
{
struct resource *p1, *p2;
- down_read(&cxl_dpa_rwsem);
+ guard(rwsem_read)(&cxl_dpa_rwsem);
for (p1 = cxlds->dpa_res.child; p1; p1 = p1->sibling) {
__cxl_dpa_debug(file, p1, 0);
for (p2 = p1->child; p2; p2 = p2->sibling)
__cxl_dpa_debug(file, p2, 1);
}
- up_read(&cxl_dpa_rwsem);
}
EXPORT_SYMBOL_NS_GPL(cxl_dpa_debug, "CXL");
+/* See request_skip() kernel-doc */
+static resource_size_t __adjust_skip(struct cxl_dev_state *cxlds,
+ const resource_size_t skip_base,
+ const resource_size_t skip_len,
+ const char *requester)
+{
+ const resource_size_t skip_end = skip_base + skip_len - 1;
+
+ for (int i = 0; i < cxlds->nr_partitions; i++) {
+ const struct resource *part_res = &cxlds->part[i].res;
+ resource_size_t adjust_start, adjust_end, size;
+
+ adjust_start = max(skip_base, part_res->start);
+ adjust_end = min(skip_end, part_res->end);
+
+ if (adjust_end < adjust_start)
+ continue;
+
+ size = adjust_end - adjust_start + 1;
+
+ if (!requester)
+ __release_region(&cxlds->dpa_res, adjust_start, size);
+ else if (!__request_region(&cxlds->dpa_res, adjust_start, size,
+ requester, 0))
+ return adjust_start - skip_base;
+ }
+
+ return skip_len;
+}
+#define release_skip(c, b, l) __adjust_skip((c), (b), (l), NULL)
+
/*
* Must be called in a context that synchronizes against this decoder's
* port ->remove() callback (like an endpoint decoder sysfs attribute)
@@ -241,7 +271,7 @@ static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
skip_start = res->start - cxled->skip;
__release_region(&cxlds->dpa_res, res->start, resource_size(res));
if (cxled->skip)
- __release_region(&cxlds->dpa_res, skip_start, cxled->skip);
+ release_skip(cxlds, skip_start, cxled->skip);
cxled->skip = 0;
cxled->dpa_res = NULL;
put_device(&cxled->cxld.dev);
@@ -250,9 +280,8 @@ static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
static void cxl_dpa_release(void *cxled)
{
- down_write(&cxl_dpa_rwsem);
+ guard(rwsem_write)(&cxl_dpa_rwsem);
__cxl_dpa_release(cxled);
- up_write(&cxl_dpa_rwsem);
}
/*
@@ -268,6 +297,58 @@ static void devm_cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
__cxl_dpa_release(cxled);
}
+/**
+ * request_skip() - Track DPA 'skip' in @cxlds->dpa_res resource tree
+ * @cxlds: CXL.mem device context that parents @cxled
+ * @cxled: Endpoint decoder establishing new allocation that skips lower DPA
+ * @skip_base: DPA < start of new DPA allocation (DPAnew)
+ * @skip_len: @skip_base + @skip_len == DPAnew
+ *
+ * DPA 'skip' arises from out-of-sequence DPA allocation events relative
+ * to free capacity across multiple partitions. It is a wasteful event
+ * as usable DPA gets thrown away, but if a deployment has, for example,
+ * a dual RAM+PMEM device, wants to use PMEM, and has unallocated RAM
+ * DPA, the free RAM DPA must be sacrificed to start allocating PMEM.
+ * See third "Implementation Note" in CXL 3.1 8.2.4.19.13 "Decoder
+ * Protection" for more details.
+ *
+ * A 'skip' always covers the last allocated DPA in a previous partition
+ * to the start of the current partition to allocate. Allocations never
+ * start in the middle of a partition, and allocations are always
+ * de-allocated in reverse order (see cxl_dpa_free(), or natural devm
+ * unwind order from forced in-order allocation).
+ *
+ * If @cxlds->nr_partitions was guaranteed to be <= 2 then the 'skip'
+ * would always be contained to a single partition. Given
+ * @cxlds->nr_partitions may be > 2 it results in cases where the 'skip'
+ * might span "tail capacity of partition[0], all of partition[1], ...,
+ * all of partition[N-1]" to support allocating from partition[N]. That
+ * in turn interacts with the partition 'struct resource' boundaries
+ * within @cxlds->dpa_res whereby 'skip' requests need to be divided by
+ * partition. I.e. this is a quirk of using a 'struct resource' tree to
+ * detect range conflicts while also tracking partition boundaries in
+ * @cxlds->dpa_res.
+ */
+static int request_skip(struct cxl_dev_state *cxlds,
+ struct cxl_endpoint_decoder *cxled,
+ const resource_size_t skip_base,
+ const resource_size_t skip_len)
+{
+ resource_size_t skipped = __adjust_skip(cxlds, skip_base, skip_len,
+ dev_name(&cxled->cxld.dev));
+
+ if (skipped == skip_len)
+ return 0;
+
+ dev_dbg(cxlds->dev,
+ "%s: failed to reserve skipped space (%pa %pa %pa)\n",
+ dev_name(&cxled->cxld.dev), &skip_base, &skip_len, &skipped);
+
+ release_skip(cxlds, skip_base, skipped);
+
+ return -EBUSY;
+}
+
static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
resource_size_t base, resource_size_t len,
resource_size_t skipped)
@@ -277,6 +358,7 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct device *dev = &port->dev;
struct resource *res;
+ int rc;
lockdep_assert_held_write(&cxl_dpa_rwsem);
@@ -305,14 +387,9 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
}
if (skipped) {
- res = __request_region(&cxlds->dpa_res, base - skipped, skipped,
- dev_name(&cxled->cxld.dev), 0);
- if (!res) {
- dev_dbg(dev,
- "decoder%d.%d: failed to reserve skipped space\n",
- port->id, cxled->cxld.id);
- return -EBUSY;
- }
+ rc = request_skip(cxlds, cxled, base - skipped, skipped);
+ if (rc)
+ return rc;
}
res = __request_region(&cxlds->dpa_res, base, len,
dev_name(&cxled->cxld.dev), 0);
@@ -320,28 +397,117 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
dev_dbg(dev, "decoder%d.%d: failed to reserve allocation\n",
port->id, cxled->cxld.id);
if (skipped)
- __release_region(&cxlds->dpa_res, base - skipped,
- skipped);
+ release_skip(cxlds, base - skipped, skipped);
return -EBUSY;
}
cxled->dpa_res = res;
cxled->skip = skipped;
- if (resource_contains(&cxlds->pmem_res, res))
- cxled->mode = CXL_DECODER_PMEM;
- else if (resource_contains(&cxlds->ram_res, res))
- cxled->mode = CXL_DECODER_RAM;
- else {
- dev_warn(dev, "decoder%d.%d: %pr mixed mode not supported\n",
- port->id, cxled->cxld.id, cxled->dpa_res);
- cxled->mode = CXL_DECODER_MIXED;
- }
+ /*
+ * When allocating new capacity, ->part is already set, when
+ * discovering decoder settings at initial enumeration, ->part
+ * is not set.
+ */
+ if (cxled->part < 0)
+ for (int i = 0; cxlds->nr_partitions; i++)
+ if (resource_contains(&cxlds->part[i].res, res)) {
+ cxled->part = i;
+ break;
+ }
+
+ if (cxled->part < 0)
+ dev_warn(dev, "decoder%d.%d: %pr does not map any partition\n",
+ port->id, cxled->cxld.id, res);
port->hdm_end++;
get_device(&cxled->cxld.dev);
return 0;
}
+static int add_dpa_res(struct device *dev, struct resource *parent,
+ struct resource *res, resource_size_t start,
+ resource_size_t size, const char *type)
+{
+ int rc;
+
+ *res = (struct resource) {
+ .name = type,
+ .start = start,
+ .end = start + size - 1,
+ .flags = IORESOURCE_MEM,
+ };
+ if (resource_size(res) == 0) {
+ dev_dbg(dev, "DPA(%s): no capacity\n", res->name);
+ return 0;
+ }
+ rc = request_resource(parent, res);
+ if (rc) {
+ dev_err(dev, "DPA(%s): failed to track %pr (%d)\n", res->name,
+ res, rc);
+ return rc;
+ }
+
+ dev_dbg(dev, "DPA(%s): %pr\n", res->name, res);
+
+ return 0;
+}
+
+static const char *cxl_mode_name(enum cxl_partition_mode mode)
+{
+ switch (mode) {
+ case CXL_PARTMODE_RAM:
+ return "ram";
+ case CXL_PARTMODE_PMEM:
+ return "pmem";
+ default:
+ return "";
+ };
+}
+
+/* if this fails the caller must destroy @cxlds, there is no recovery */
+int cxl_dpa_setup(struct cxl_dev_state *cxlds, const struct cxl_dpa_info *info)
+{
+ struct device *dev = cxlds->dev;
+
+ guard(rwsem_write)(&cxl_dpa_rwsem);
+
+ if (cxlds->nr_partitions)
+ return -EBUSY;
+
+ if (!info->size || !info->nr_partitions) {
+ cxlds->dpa_res = DEFINE_RES_MEM(0, 0);
+ cxlds->nr_partitions = 0;
+ return 0;
+ }
+
+ cxlds->dpa_res = DEFINE_RES_MEM(0, info->size);
+
+ for (int i = 0; i < info->nr_partitions; i++) {
+ const struct cxl_dpa_part_info *part = &info->part[i];
+ int rc;
+
+ cxlds->part[i].perf.qos_class = CXL_QOS_CLASS_INVALID;
+ cxlds->part[i].mode = part->mode;
+
+ /* Require ordered + contiguous partitions */
+ if (i) {
+ const struct cxl_dpa_part_info *prev = &info->part[i - 1];
+
+ if (prev->range.end + 1 != part->range.start)
+ return -EINVAL;
+ }
+ rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->part[i].res,
+ part->range.start, range_len(&part->range),
+ cxl_mode_name(part->mode));
+ if (rc)
+ return rc;
+ cxlds->nr_partitions++;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cxl_dpa_setup);
+
int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
resource_size_t base, resource_size_t len,
resource_size_t skipped)
@@ -362,14 +528,11 @@ EXPORT_SYMBOL_NS_GPL(devm_cxl_dpa_reserve, "CXL");
resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled)
{
- resource_size_t size = 0;
-
- down_read(&cxl_dpa_rwsem);
+ guard(rwsem_read)(&cxl_dpa_rwsem);
if (cxled->dpa_res)
- size = resource_size(cxled->dpa_res);
- up_read(&cxl_dpa_rwsem);
+ return resource_size(cxled->dpa_res);
- return size;
+ return 0;
}
resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled)
@@ -387,151 +550,136 @@ int cxl_dpa_free(struct cxl_endpoint_decoder *cxled)
{
struct cxl_port *port = cxled_to_port(cxled);
struct device *dev = &cxled->cxld.dev;
- int rc;
- down_write(&cxl_dpa_rwsem);
- if (!cxled->dpa_res) {
- rc = 0;
- goto out;
- }
+ guard(rwsem_write)(&cxl_dpa_rwsem);
+ if (!cxled->dpa_res)
+ return 0;
if (cxled->cxld.region) {
dev_dbg(dev, "decoder assigned to: %s\n",
dev_name(&cxled->cxld.region->dev));
- rc = -EBUSY;
- goto out;
+ return -EBUSY;
}
if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
dev_dbg(dev, "decoder enabled\n");
- rc = -EBUSY;
- goto out;
+ return -EBUSY;
}
if (cxled->cxld.id != port->hdm_end) {
dev_dbg(dev, "expected decoder%d.%d\n", port->id,
port->hdm_end);
- rc = -EBUSY;
- goto out;
+ return -EBUSY;
}
+
devm_cxl_dpa_release(cxled);
- rc = 0;
-out:
- up_write(&cxl_dpa_rwsem);
- return rc;
+ return 0;
}
-int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
- enum cxl_decoder_mode mode)
+int cxl_dpa_set_part(struct cxl_endpoint_decoder *cxled,
+ enum cxl_partition_mode mode)
{
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct device *dev = &cxled->cxld.dev;
-
- switch (mode) {
- case CXL_DECODER_RAM:
- case CXL_DECODER_PMEM:
- break;
- default:
- dev_dbg(dev, "unsupported mode: %d\n", mode);
- return -EINVAL;
- }
+ int part;
guard(rwsem_write)(&cxl_dpa_rwsem);
if (cxled->cxld.flags & CXL_DECODER_F_ENABLE)
return -EBUSY;
- /*
- * Only allow modes that are supported by the current partition
- * configuration
- */
- if (mode == CXL_DECODER_PMEM && !resource_size(&cxlds->pmem_res)) {
- dev_dbg(dev, "no available pmem capacity\n");
- return -ENXIO;
+ for (part = 0; part < cxlds->nr_partitions; part++)
+ if (cxlds->part[part].mode == mode)
+ break;
+
+ if (part >= cxlds->nr_partitions) {
+ dev_dbg(dev, "unsupported mode: %d\n", mode);
+ return -EINVAL;
}
- if (mode == CXL_DECODER_RAM && !resource_size(&cxlds->ram_res)) {
- dev_dbg(dev, "no available ram capacity\n");
+
+ if (!resource_size(&cxlds->part[part].res)) {
+ dev_dbg(dev, "no available capacity for mode: %d\n", mode);
return -ENXIO;
}
- cxled->mode = mode;
+ cxled->part = part;
return 0;
}
-int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
+static int __cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
{
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
- resource_size_t free_ram_start, free_pmem_start;
- struct cxl_port *port = cxled_to_port(cxled);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct device *dev = &cxled->cxld.dev;
- resource_size_t start, avail, skip;
+ struct resource *res, *prev = NULL;
+ resource_size_t start, avail, skip, skip_start;
struct resource *p, *last;
- int rc;
+ int part;
- down_write(&cxl_dpa_rwsem);
+ guard(rwsem_write)(&cxl_dpa_rwsem);
if (cxled->cxld.region) {
dev_dbg(dev, "decoder attached to %s\n",
dev_name(&cxled->cxld.region->dev));
- rc = -EBUSY;
- goto out;
+ return -EBUSY;
}
if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
dev_dbg(dev, "decoder enabled\n");
- rc = -EBUSY;
- goto out;
+ return -EBUSY;
}
- for (p = cxlds->ram_res.child, last = NULL; p; p = p->sibling)
- last = p;
- if (last)
- free_ram_start = last->end + 1;
- else
- free_ram_start = cxlds->ram_res.start;
+ part = cxled->part;
+ if (part < 0) {
+ dev_dbg(dev, "partition not set\n");
+ return -EBUSY;
+ }
- for (p = cxlds->pmem_res.child, last = NULL; p; p = p->sibling)
+ res = &cxlds->part[part].res;
+ for (p = res->child, last = NULL; p; p = p->sibling)
last = p;
if (last)
- free_pmem_start = last->end + 1;
+ start = last->end + 1;
else
- free_pmem_start = cxlds->pmem_res.start;
-
- if (cxled->mode == CXL_DECODER_RAM) {
- start = free_ram_start;
- avail = cxlds->ram_res.end - start + 1;
- skip = 0;
- } else if (cxled->mode == CXL_DECODER_PMEM) {
- resource_size_t skip_start, skip_end;
-
- start = free_pmem_start;
- avail = cxlds->pmem_res.end - start + 1;
- skip_start = free_ram_start;
+ start = res->start;
- /*
- * If some pmem is already allocated, then that allocation
- * already handled the skip.
- */
- if (cxlds->pmem_res.child &&
- skip_start == cxlds->pmem_res.child->start)
- skip_end = skip_start - 1;
- else
- skip_end = start - 1;
- skip = skip_end - skip_start + 1;
- } else {
- dev_dbg(dev, "mode not set\n");
- rc = -EINVAL;
- goto out;
+ /*
+ * To allocate at partition N, a skip needs to be calculated for all
+ * unallocated space at lower partitions indices.
+ *
+ * If a partition has any allocations, the search can end because a
+ * previous cxl_dpa_alloc() invocation is assumed to have accounted for
+ * all previous partitions.
+ */
+ skip_start = CXL_RESOURCE_NONE;
+ for (int i = part; i; i--) {
+ prev = &cxlds->part[i - 1].res;
+ for (p = prev->child, last = NULL; p; p = p->sibling)
+ last = p;
+ if (last) {
+ skip_start = last->end + 1;
+ break;
+ }
+ skip_start = prev->start;
}
+ avail = res->end - start + 1;
+ if (skip_start == CXL_RESOURCE_NONE)
+ skip = 0;
+ else
+ skip = res->start - skip_start;
+
if (size > avail) {
dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size,
- cxl_decoder_mode_name(cxled->mode), &avail);
- rc = -ENOSPC;
- goto out;
+ res->name, &avail);
+ return -ENOSPC;
}
- rc = __cxl_dpa_reserve(cxled, start, size, skip);
-out:
- up_write(&cxl_dpa_rwsem);
+ return __cxl_dpa_reserve(cxled, start, size, skip);
+}
+
+int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
+{
+ struct cxl_port *port = cxled_to_port(cxled);
+ int rc;
+ rc = __cxl_dpa_alloc(cxled, size);
if (rc)
return rc;
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index 78c5346e3e89..d72764056ce6 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -11,6 +11,7 @@
#include "core.h"
#include "trace.h"
+#include "mce.h"
static bool cxl_raw_allow_all;
@@ -900,7 +901,7 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
}
if (trace_cxl_general_media_enabled() || trace_cxl_dram_enabled()) {
- u64 dpa, hpa = ULLONG_MAX;
+ u64 dpa, hpa = ULLONG_MAX, hpa_alias = ULLONG_MAX;
struct cxl_region *cxlr;
/*
@@ -913,14 +914,20 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
dpa = le64_to_cpu(evt->media_hdr.phys_addr) & CXL_DPA_MASK;
cxlr = cxl_dpa_to_region(cxlmd, dpa);
- if (cxlr)
+ if (cxlr) {
+ u64 cache_size = cxlr->params.cache_size;
+
hpa = cxl_dpa_to_hpa(cxlr, cxlmd, dpa);
+ if (cache_size)
+ hpa_alias = hpa - cache_size;
+ }
if (event_type == CXL_CPER_EVENT_GEN_MEDIA)
trace_cxl_general_media(cxlmd, type, cxlr, hpa,
- &evt->gen_media);
+ hpa_alias, &evt->gen_media);
else if (event_type == CXL_CPER_EVENT_DRAM)
- trace_cxl_dram(cxlmd, type, cxlr, hpa, &evt->dram);
+ trace_cxl_dram(cxlmd, type, cxlr, hpa, hpa_alias,
+ &evt->dram);
}
}
EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record, "CXL");
@@ -1126,10 +1133,6 @@ static int cxl_mem_get_partition_info(struct cxl_memdev_state *mds)
le64_to_cpu(pi.active_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
mds->active_persistent_bytes =
le64_to_cpu(pi.active_persistent_cap) * CXL_CAPACITY_MULTIPLIER;
- mds->next_volatile_bytes =
- le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
- mds->next_persistent_bytes =
- le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
return 0;
}
@@ -1251,74 +1254,54 @@ int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd)
{
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
struct cxl_port *endpoint;
- int rc;
/* synchronize with cxl_mem_probe() and decoder write operations */
guard(device)(&cxlmd->dev);
endpoint = cxlmd->endpoint;
- down_read(&cxl_region_rwsem);
+ guard(rwsem_read)(&cxl_region_rwsem);
/*
* Require an endpoint to be safe otherwise the driver can not
* be sure that the device is unmapped.
*/
if (endpoint && cxl_num_decoders_committed(endpoint) == 0)
- rc = __cxl_mem_sanitize(mds, cmd);
- else
- rc = -EBUSY;
- up_read(&cxl_region_rwsem);
+ return __cxl_mem_sanitize(mds, cmd);
- return rc;
+ return -EBUSY;
}
-static int add_dpa_res(struct device *dev, struct resource *parent,
- struct resource *res, resource_size_t start,
- resource_size_t size, const char *type)
+static void add_part(struct cxl_dpa_info *info, u64 start, u64 size, enum cxl_partition_mode mode)
{
- int rc;
-
- res->name = type;
- res->start = start;
- res->end = start + size - 1;
- res->flags = IORESOURCE_MEM;
- if (resource_size(res) == 0) {
- dev_dbg(dev, "DPA(%s): no capacity\n", res->name);
- return 0;
- }
- rc = request_resource(parent, res);
- if (rc) {
- dev_err(dev, "DPA(%s): failed to track %pr (%d)\n", res->name,
- res, rc);
- return rc;
- }
+ int i = info->nr_partitions;
- dev_dbg(dev, "DPA(%s): %pr\n", res->name, res);
+ if (size == 0)
+ return;
- return 0;
+ info->part[i].range = (struct range) {
+ .start = start,
+ .end = start + size - 1,
+ };
+ info->part[i].mode = mode;
+ info->nr_partitions++;
}
-int cxl_mem_create_range_info(struct cxl_memdev_state *mds)
+int cxl_mem_dpa_fetch(struct cxl_memdev_state *mds, struct cxl_dpa_info *info)
{
struct cxl_dev_state *cxlds = &mds->cxlds;
struct device *dev = cxlds->dev;
int rc;
if (!cxlds->media_ready) {
- cxlds->dpa_res = DEFINE_RES_MEM(0, 0);
- cxlds->ram_res = DEFINE_RES_MEM(0, 0);
- cxlds->pmem_res = DEFINE_RES_MEM(0, 0);
+ info->size = 0;
return 0;
}
- cxlds->dpa_res = DEFINE_RES_MEM(0, mds->total_bytes);
+ info->size = mds->total_bytes;
if (mds->partition_align_bytes == 0) {
- rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0,
- mds->volatile_only_bytes, "ram");
- if (rc)
- return rc;
- return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res,
- mds->volatile_only_bytes,
- mds->persistent_only_bytes, "pmem");
+ add_part(info, 0, mds->volatile_only_bytes, CXL_PARTMODE_RAM);
+ add_part(info, mds->volatile_only_bytes,
+ mds->persistent_only_bytes, CXL_PARTMODE_PMEM);
+ return 0;
}
rc = cxl_mem_get_partition_info(mds);
@@ -1327,15 +1310,52 @@ int cxl_mem_create_range_info(struct cxl_memdev_state *mds)
return rc;
}
- rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0,
- mds->active_volatile_bytes, "ram");
- if (rc)
- return rc;
- return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res,
- mds->active_volatile_bytes,
- mds->active_persistent_bytes, "pmem");
+ add_part(info, 0, mds->active_volatile_bytes, CXL_PARTMODE_RAM);
+ add_part(info, mds->active_volatile_bytes, mds->active_persistent_bytes,
+ CXL_PARTMODE_PMEM);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_mem_dpa_fetch, "CXL");
+
+int cxl_get_dirty_count(struct cxl_memdev_state *mds, u32 *count)
+{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
+ struct cxl_mbox_get_health_info_out hi;
+ struct cxl_mbox_cmd mbox_cmd;
+ int rc;
+
+ mbox_cmd = (struct cxl_mbox_cmd) {
+ .opcode = CXL_MBOX_OP_GET_HEALTH_INFO,
+ .size_out = sizeof(hi),
+ .payload_out = &hi,
+ };
+
+ rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
+ if (!rc)
+ *count = le32_to_cpu(hi.dirty_shutdown_cnt);
+
+ return rc;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_get_dirty_count, "CXL");
+
+int cxl_arm_dirty_shutdown(struct cxl_memdev_state *mds)
+{
+ struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
+ struct cxl_mbox_cmd mbox_cmd;
+ struct cxl_mbox_set_shutdown_state_in in = {
+ .state = 1
+ };
+
+ mbox_cmd = (struct cxl_mbox_cmd) {
+ .opcode = CXL_MBOX_OP_SET_SHUTDOWN_STATE,
+ .size_in = sizeof(in),
+ .payload_in = &in,
+ };
+
+ return cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
}
-EXPORT_SYMBOL_NS_GPL(cxl_mem_create_range_info, "CXL");
+EXPORT_SYMBOL_NS_GPL(cxl_arm_dirty_shutdown, "CXL");
int cxl_set_timestamp(struct cxl_memdev_state *mds)
{
@@ -1467,6 +1487,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_mailbox_init, "CXL");
struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev)
{
struct cxl_memdev_state *mds;
+ int rc;
mds = devm_kzalloc(dev, sizeof(*mds), GFP_KERNEL);
if (!mds) {
@@ -1480,8 +1501,12 @@ struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev)
mds->cxlds.cxl_mbox.host = dev;
mds->cxlds.reg_map.resource = CXL_RESOURCE_NONE;
mds->cxlds.type = CXL_DEVTYPE_CLASSMEM;
- mds->ram_perf.qos_class = CXL_QOS_CLASS_INVALID;
- mds->pmem_perf.qos_class = CXL_QOS_CLASS_INVALID;
+
+ rc = devm_cxl_register_mce_notifier(dev, &mds->mce_notifier);
+ if (rc == -EOPNOTSUPP)
+ dev_warn(dev, "CXL MCE unsupported\n");
+ else if (rc)
+ return ERR_PTR(rc);
return mds;
}
diff --git a/drivers/cxl/core/mce.c b/drivers/cxl/core/mce.c
new file mode 100644
index 000000000000..ff8d078c6ca1
--- /dev/null
+++ b/drivers/cxl/core/mce.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2024 Intel Corporation. All rights reserved. */
+#include <linux/mm.h>
+#include <linux/notifier.h>
+#include <linux/set_memory.h>
+#include <asm/mce.h>
+#include <cxlmem.h>
+#include "mce.h"
+
+static int cxl_handle_mce(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct cxl_memdev_state *mds = container_of(nb, struct cxl_memdev_state,
+ mce_notifier);
+ struct cxl_memdev *cxlmd = mds->cxlds.cxlmd;
+ struct cxl_port *endpoint = cxlmd->endpoint;
+ struct mce *mce = data;
+ u64 spa, spa_alias;
+ unsigned long pfn;
+
+ if (!mce || !mce_usable_address(mce))
+ return NOTIFY_DONE;
+
+ if (!endpoint)
+ return NOTIFY_DONE;
+
+ spa = mce->addr & MCI_ADDR_PHYSADDR;
+
+ pfn = spa >> PAGE_SHIFT;
+ if (!pfn_valid(pfn))
+ return NOTIFY_DONE;
+
+ spa_alias = cxl_port_get_spa_cache_alias(endpoint, spa);
+ if (spa_alias == ~0ULL)
+ return NOTIFY_DONE;
+
+ pfn = spa_alias >> PAGE_SHIFT;
+
+ /*
+ * Take down the aliased memory page. The original memory page flagged
+ * by the MCE will be taken cared of by the standard MCE handler.
+ */
+ dev_emerg(mds->cxlds.dev, "Offlining aliased SPA address0: %#llx\n",
+ spa_alias);
+ if (!memory_failure(pfn, 0))
+ set_mce_nospec(pfn);
+
+ return NOTIFY_OK;
+}
+
+static void cxl_unregister_mce_notifier(void *mce_notifier)
+{
+ mce_unregister_decode_chain(mce_notifier);
+}
+
+int devm_cxl_register_mce_notifier(struct device *dev,
+ struct notifier_block *mce_notifier)
+{
+ mce_notifier->notifier_call = cxl_handle_mce;
+ mce_notifier->priority = MCE_PRIO_UC;
+ mce_register_decode_chain(mce_notifier);
+
+ return devm_add_action_or_reset(dev, cxl_unregister_mce_notifier,
+ mce_notifier);
+}
diff --git a/drivers/cxl/core/mce.h b/drivers/cxl/core/mce.h
new file mode 100644
index 000000000000..ace73424eeb6
--- /dev/null
+++ b/drivers/cxl/core/mce.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2024 Intel Corporation. All rights reserved. */
+#ifndef _CXL_CORE_MCE_H_
+#define _CXL_CORE_MCE_H_
+
+#include <linux/notifier.h>
+
+#ifdef CONFIG_CXL_MCE
+int devm_cxl_register_mce_notifier(struct device *dev,
+ struct notifier_block *mce_notifer);
+#else
+static inline int
+devm_cxl_register_mce_notifier(struct device *dev,
+ struct notifier_block *mce_notifier)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+#endif
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index 2e2e035abdaa..a16a5886d40a 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -75,12 +75,20 @@ static ssize_t label_storage_size_show(struct device *dev,
}
static DEVICE_ATTR_RO(label_storage_size);
+static resource_size_t cxl_ram_size(struct cxl_dev_state *cxlds)
+{
+ /* Static RAM is only expected at partition 0. */
+ if (cxlds->part[0].mode != CXL_PARTMODE_RAM)
+ return 0;
+ return resource_size(&cxlds->part[0].res);
+}
+
static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
- unsigned long long len = resource_size(&cxlds->ram_res);
+ unsigned long long len = cxl_ram_size(cxlds);
return sysfs_emit(buf, "%#llx\n", len);
}
@@ -93,7 +101,7 @@ static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr,
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
- unsigned long long len = resource_size(&cxlds->pmem_res);
+ unsigned long long len = cxl_pmem_size(cxlds);
return sysfs_emit(buf, "%#llx\n", len);
}
@@ -198,22 +206,17 @@ static int cxl_get_poison_by_memdev(struct cxl_memdev *cxlmd)
int rc = 0;
/* CXL 3.0 Spec 8.2.9.8.4.1 Separate pmem and ram poison requests */
- if (resource_size(&cxlds->pmem_res)) {
- offset = cxlds->pmem_res.start;
- length = resource_size(&cxlds->pmem_res);
- rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
- if (rc)
- return rc;
- }
- if (resource_size(&cxlds->ram_res)) {
- offset = cxlds->ram_res.start;
- length = resource_size(&cxlds->ram_res);
+ for (int i = 0; i < cxlds->nr_partitions; i++) {
+ const struct resource *res = &cxlds->part[i].res;
+
+ offset = res->start;
+ length = resource_size(res);
rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
/*
* Invalid Physical Address is not an error for
* volatile addresses. Device support is optional.
*/
- if (rc == -EFAULT)
+ if (rc == -EFAULT && cxlds->part[i].mode == CXL_PARTMODE_RAM)
rc = 0;
}
return rc;
@@ -404,14 +407,21 @@ static struct attribute *cxl_memdev_attributes[] = {
NULL,
};
+static struct cxl_dpa_perf *to_pmem_perf(struct cxl_dev_state *cxlds)
+{
+ for (int i = 0; i < cxlds->nr_partitions; i++)
+ if (cxlds->part[i].mode == CXL_PARTMODE_PMEM)
+ return &cxlds->part[i].perf;
+ return NULL;
+}
+
static ssize_t pmem_qos_class_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
- return sysfs_emit(buf, "%d\n", mds->pmem_perf.qos_class);
+ return sysfs_emit(buf, "%d\n", to_pmem_perf(cxlds)->qos_class);
}
static struct device_attribute dev_attr_pmem_qos_class =
@@ -423,14 +433,20 @@ static struct attribute *cxl_memdev_pmem_attributes[] = {
NULL,
};
+static struct cxl_dpa_perf *to_ram_perf(struct cxl_dev_state *cxlds)
+{
+ if (cxlds->part[0].mode != CXL_PARTMODE_RAM)
+ return NULL;
+ return &cxlds->part[0].perf;
+}
+
static ssize_t ram_qos_class_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
- return sysfs_emit(buf, "%d\n", mds->ram_perf.qos_class);
+ return sysfs_emit(buf, "%d\n", to_ram_perf(cxlds)->qos_class);
}
static struct device_attribute dev_attr_ram_qos_class =
@@ -466,11 +482,11 @@ static umode_t cxl_ram_visible(struct kobject *kobj, struct attribute *a, int n)
{
struct device *dev = kobj_to_dev(kobj);
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+ struct cxl_dpa_perf *perf = to_ram_perf(cxlmd->cxlds);
- if (a == &dev_attr_ram_qos_class.attr)
- if (mds->ram_perf.qos_class == CXL_QOS_CLASS_INVALID)
- return 0;
+ if (a == &dev_attr_ram_qos_class.attr &&
+ (!perf || perf->qos_class == CXL_QOS_CLASS_INVALID))
+ return 0;
return a->mode;
}
@@ -485,11 +501,11 @@ static umode_t cxl_pmem_visible(struct kobject *kobj, struct attribute *a, int n
{
struct device *dev = kobj_to_dev(kobj);
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+ struct cxl_dpa_perf *perf = to_pmem_perf(cxlmd->cxlds);
- if (a == &dev_attr_pmem_qos_class.attr)
- if (mds->pmem_perf.qos_class == CXL_QOS_CLASS_INVALID)
- return 0;
+ if (a == &dev_attr_pmem_qos_class.attr &&
+ (!perf || perf->qos_class == CXL_QOS_CLASS_INVALID))
+ return 0;
return a->mode;
}
@@ -566,10 +582,9 @@ void set_exclusive_cxl_commands(struct cxl_memdev_state *mds,
{
struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
- down_write(&cxl_memdev_rwsem);
+ guard(rwsem_write)(&cxl_memdev_rwsem);
bitmap_or(cxl_mbox->exclusive_cmds, cxl_mbox->exclusive_cmds,
cmds, CXL_MEM_COMMAND_ID_MAX);
- up_write(&cxl_memdev_rwsem);
}
EXPORT_SYMBOL_NS_GPL(set_exclusive_cxl_commands, "CXL");
@@ -583,10 +598,9 @@ void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds,
{
struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
- down_write(&cxl_memdev_rwsem);
+ guard(rwsem_write)(&cxl_memdev_rwsem);
bitmap_andnot(cxl_mbox->exclusive_cmds, cxl_mbox->exclusive_cmds,
cmds, CXL_MEM_COMMAND_ID_MAX);
- up_write(&cxl_memdev_rwsem);
}
EXPORT_SYMBOL_NS_GPL(clear_exclusive_cxl_commands, "CXL");
@@ -594,9 +608,8 @@ static void cxl_memdev_shutdown(struct device *dev)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- down_write(&cxl_memdev_rwsem);
+ guard(rwsem_write)(&cxl_memdev_rwsem);
cxlmd->cxlds = NULL;
- up_write(&cxl_memdev_rwsem);
}
static void cxl_memdev_unregister(void *_cxlmd)
@@ -678,15 +691,13 @@ static long cxl_memdev_ioctl(struct file *file, unsigned int cmd,
{
struct cxl_memdev *cxlmd = file->private_data;
struct cxl_dev_state *cxlds;
- int rc = -ENXIO;
- down_read(&cxl_memdev_rwsem);
+ guard(rwsem_read)(&cxl_memdev_rwsem);
cxlds = cxlmd->cxlds;
if (cxlds && cxlds->type == CXL_DEVTYPE_CLASSMEM)
- rc = __cxl_memdev_ioctl(cxlmd, cmd, arg);
- up_read(&cxl_memdev_rwsem);
+ return __cxl_memdev_ioctl(cxlmd, cmd, arg);
- return rc;
+ return -ENXIO;
}
static int cxl_memdev_open(struct inode *inode, struct file *file)
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index 013b869b66cb..96fecb799cbc 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -1054,3 +1054,100 @@ int cxl_pci_get_bandwidth(struct pci_dev *pdev, struct access_coordinate *c)
return 0;
}
+
+/*
+ * Set max timeout such that platforms will optimize GPF flow to avoid
+ * the implied worst-case scenario delays. On a sane platform, all
+ * devices should always complete GPF within the energy budget of
+ * the GPF flow. The kernel does not have enough information to pick
+ * anything better than "maximize timeouts and hope it works".
+ *
+ * A misbehaving device could block forward progress of GPF for all
+ * the other devices, exhausting the energy budget of the platform.
+ * However, the spec seems to assume that moving on from slow to respond
+ * devices is a virtue. It is not possible to know that, in actuality,
+ * the slow to respond device is *the* most critical device in the
+ * system to wait.
+ */
+#define GPF_TIMEOUT_BASE_MAX 2
+#define GPF_TIMEOUT_SCALE_MAX 7 /* 10 seconds */
+
+u16 cxl_gpf_get_dvsec(struct device *dev, bool is_port)
+{
+ u16 dvsec;
+
+ if (!dev_is_pci(dev))
+ return 0;
+
+ dvsec = pci_find_dvsec_capability(to_pci_dev(dev), PCI_VENDOR_ID_CXL,
+ is_port ? CXL_DVSEC_PORT_GPF : CXL_DVSEC_DEVICE_GPF);
+ if (!dvsec)
+ dev_warn(dev, "%s GPF DVSEC not present\n",
+ is_port ? "Port" : "Device");
+ return dvsec;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_gpf_get_dvsec, "CXL");
+
+static int update_gpf_port_dvsec(struct pci_dev *pdev, int dvsec, int phase)
+{
+ u64 base, scale;
+ int rc, offset;
+ u16 ctrl;
+
+ switch (phase) {
+ case 1:
+ offset = CXL_DVSEC_PORT_GPF_PHASE_1_CONTROL_OFFSET;
+ base = CXL_DVSEC_PORT_GPF_PHASE_1_TMO_BASE_MASK;
+ scale = CXL_DVSEC_PORT_GPF_PHASE_1_TMO_SCALE_MASK;
+ break;
+ case 2:
+ offset = CXL_DVSEC_PORT_GPF_PHASE_2_CONTROL_OFFSET;
+ base = CXL_DVSEC_PORT_GPF_PHASE_2_TMO_BASE_MASK;
+ scale = CXL_DVSEC_PORT_GPF_PHASE_2_TMO_SCALE_MASK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rc = pci_read_config_word(pdev, dvsec + offset, &ctrl);
+ if (rc)
+ return rc;
+
+ if (FIELD_GET(base, ctrl) == GPF_TIMEOUT_BASE_MAX &&
+ FIELD_GET(scale, ctrl) == GPF_TIMEOUT_SCALE_MAX)
+ return 0;
+
+ ctrl = FIELD_PREP(base, GPF_TIMEOUT_BASE_MAX);
+ ctrl |= FIELD_PREP(scale, GPF_TIMEOUT_SCALE_MAX);
+
+ rc = pci_write_config_word(pdev, dvsec + offset, ctrl);
+ if (!rc)
+ pci_dbg(pdev, "Port GPF phase %d timeout: %d0 secs\n",
+ phase, GPF_TIMEOUT_BASE_MAX);
+
+ return rc;
+}
+
+int cxl_gpf_port_setup(struct device *dport_dev, struct cxl_port *port)
+{
+ struct pci_dev *pdev;
+
+ if (!port)
+ return -EINVAL;
+
+ if (!port->gpf_dvsec) {
+ int dvsec;
+
+ dvsec = cxl_gpf_get_dvsec(dport_dev, true);
+ if (!dvsec)
+ return -EINVAL;
+
+ port->gpf_dvsec = dvsec;
+ }
+
+ pdev = to_pci_dev(dport_dev);
+ update_gpf_port_dvsec(pdev, port->gpf_dvsec, 1);
+ update_gpf_port_dvsec(pdev, port->gpf_dvsec, 2);
+
+ return 0;
+}
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index 78a5c2c25982..0fd6646c1a2e 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -194,25 +194,35 @@ static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ /* without @cxl_dpa_rwsem, make sure @part is not reloaded */
+ int part = READ_ONCE(cxled->part);
+ const char *desc;
+
+ if (part < 0)
+ desc = "none";
+ else
+ desc = cxlds->part[part].res.name;
- return sysfs_emit(buf, "%s\n", cxl_decoder_mode_name(cxled->mode));
+ return sysfs_emit(buf, "%s\n", desc);
}
static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t len)
{
struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev);
- enum cxl_decoder_mode mode;
+ enum cxl_partition_mode mode;
ssize_t rc;
if (sysfs_streq(buf, "pmem"))
- mode = CXL_DECODER_PMEM;
+ mode = CXL_PARTMODE_PMEM;
else if (sysfs_streq(buf, "ram"))
- mode = CXL_DECODER_RAM;
+ mode = CXL_PARTMODE_RAM;
else
return -EINVAL;
- rc = cxl_dpa_set_mode(cxled, mode);
+ rc = cxl_dpa_set_part(cxled, mode);
if (rc)
return rc;
@@ -549,13 +559,9 @@ static ssize_t decoders_committed_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cxl_port *port = to_cxl_port(dev);
- int rc;
-
- down_read(&cxl_region_rwsem);
- rc = sysfs_emit(buf, "%d\n", cxl_num_decoders_committed(port));
- up_read(&cxl_region_rwsem);
- return rc;
+ guard(rwsem_read)(&cxl_region_rwsem);
+ return sysfs_emit(buf, "%d\n", cxl_num_decoders_committed(port));
}
static DEVICE_ATTR_RO(decoders_committed);
@@ -1672,6 +1678,8 @@ retry:
if (rc && rc != -EBUSY)
return rc;
+ cxl_gpf_port_setup(dport_dev, port);
+
/* Any more ports to add between this one and the root? */
if (!dev_is_cxl_root_child(&port->dev))
continue;
@@ -1899,6 +1907,7 @@ struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port)
return ERR_PTR(-ENOMEM);
cxled->pos = -1;
+ cxled->part = -1;
cxld = &cxled->cxld;
rc = cxl_decoder_init(port, cxld);
if (rc) {
@@ -2339,8 +2348,14 @@ static __init int cxl_core_init(void)
if (rc)
goto err_region;
+ rc = cxl_ras_init();
+ if (rc)
+ goto err_ras;
+
return 0;
+err_ras:
+ cxl_region_exit();
err_region:
bus_unregister(&cxl_bus_type);
err_bus:
@@ -2352,6 +2367,7 @@ err_wq:
static void cxl_core_exit(void)
{
+ cxl_ras_exit();
cxl_region_exit();
bus_unregister(&cxl_bus_type);
destroy_workqueue(cxl_bus_wq);
diff --git a/drivers/cxl/core/ras.c b/drivers/cxl/core/ras.c
new file mode 100644
index 000000000000..485a831695c7
--- /dev/null
+++ b/drivers/cxl/core/ras.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2025 AMD Corporation. All rights reserved. */
+
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <cxl/event.h>
+#include <cxlmem.h>
+#include "trace.h"
+
+static void cxl_cper_trace_corr_port_prot_err(struct pci_dev *pdev,
+ struct cxl_ras_capability_regs ras_cap)
+{
+ u32 status = ras_cap.cor_status & ~ras_cap.cor_mask;
+
+ trace_cxl_port_aer_correctable_error(&pdev->dev, status);
+}
+
+static void cxl_cper_trace_uncorr_port_prot_err(struct pci_dev *pdev,
+ struct cxl_ras_capability_regs ras_cap)
+{
+ u32 status = ras_cap.uncor_status & ~ras_cap.uncor_mask;
+ u32 fe;
+
+ if (hweight32(status) > 1)
+ fe = BIT(FIELD_GET(CXL_RAS_CAP_CONTROL_FE_MASK,
+ ras_cap.cap_control));
+ else
+ fe = status;
+
+ trace_cxl_port_aer_uncorrectable_error(&pdev->dev, status, fe,
+ ras_cap.header_log);
+}
+
+static void cxl_cper_trace_corr_prot_err(struct pci_dev *pdev,
+ struct cxl_ras_capability_regs ras_cap)
+{
+ u32 status = ras_cap.cor_status & ~ras_cap.cor_mask;
+ struct cxl_dev_state *cxlds;
+
+ cxlds = pci_get_drvdata(pdev);
+ if (!cxlds)
+ return;
+
+ trace_cxl_aer_correctable_error(cxlds->cxlmd, status);
+}
+
+static void cxl_cper_trace_uncorr_prot_err(struct pci_dev *pdev,
+ struct cxl_ras_capability_regs ras_cap)
+{
+ u32 status = ras_cap.uncor_status & ~ras_cap.uncor_mask;
+ struct cxl_dev_state *cxlds;
+ u32 fe;
+
+ cxlds = pci_get_drvdata(pdev);
+ if (!cxlds)
+ return;
+
+ if (hweight32(status) > 1)
+ fe = BIT(FIELD_GET(CXL_RAS_CAP_CONTROL_FE_MASK,
+ ras_cap.cap_control));
+ else
+ fe = status;
+
+ trace_cxl_aer_uncorrectable_error(cxlds->cxlmd, status, fe,
+ ras_cap.header_log);
+}
+
+static void cxl_cper_handle_prot_err(struct cxl_cper_prot_err_work_data *data)
+{
+ unsigned int devfn = PCI_DEVFN(data->prot_err.agent_addr.device,
+ data->prot_err.agent_addr.function);
+ struct pci_dev *pdev __free(pci_dev_put) =
+ pci_get_domain_bus_and_slot(data->prot_err.agent_addr.segment,
+ data->prot_err.agent_addr.bus,
+ devfn);
+ int port_type;
+
+ if (!pdev)
+ return;
+
+ guard(device)(&pdev->dev);
+
+ port_type = pci_pcie_type(pdev);
+ if (port_type == PCI_EXP_TYPE_ROOT_PORT ||
+ port_type == PCI_EXP_TYPE_DOWNSTREAM ||
+ port_type == PCI_EXP_TYPE_UPSTREAM) {
+ if (data->severity == AER_CORRECTABLE)
+ cxl_cper_trace_corr_port_prot_err(pdev, data->ras_cap);
+ else
+ cxl_cper_trace_uncorr_port_prot_err(pdev, data->ras_cap);
+
+ return;
+ }
+
+ if (data->severity == AER_CORRECTABLE)
+ cxl_cper_trace_corr_prot_err(pdev, data->ras_cap);
+ else
+ cxl_cper_trace_uncorr_prot_err(pdev, data->ras_cap);
+}
+
+static void cxl_cper_prot_err_work_fn(struct work_struct *work)
+{
+ struct cxl_cper_prot_err_work_data wd;
+
+ while (cxl_cper_prot_err_kfifo_get(&wd))
+ cxl_cper_handle_prot_err(&wd);
+}
+static DECLARE_WORK(cxl_cper_prot_err_work, cxl_cper_prot_err_work_fn);
+
+int cxl_ras_init(void)
+{
+ return cxl_cper_register_prot_err_work(&cxl_cper_prot_err_work);
+}
+
+void cxl_ras_exit(void)
+{
+ cxl_cper_unregister_prot_err_work(&cxl_cper_prot_err_work);
+ cancel_work_sync(&cxl_cper_prot_err_work);
+}
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index e8d11a988fd9..c3f4dc244df7 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -144,7 +144,7 @@ static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
rc = down_read_interruptible(&cxl_region_rwsem);
if (rc)
return rc;
- if (cxlr->mode != CXL_DECODER_PMEM)
+ if (cxlr->mode != CXL_PARTMODE_PMEM)
rc = sysfs_emit(buf, "\n");
else
rc = sysfs_emit(buf, "%pUb\n", &p->uuid);
@@ -441,7 +441,7 @@ static umode_t cxl_region_visible(struct kobject *kobj, struct attribute *a,
* Support tooling that expects to find a 'uuid' attribute for all
* regions regardless of mode.
*/
- if (a == &dev_attr_uuid.attr && cxlr->mode != CXL_DECODER_PMEM)
+ if (a == &dev_attr_uuid.attr && cxlr->mode != CXL_PARTMODE_PMEM)
return 0444;
return a->mode;
}
@@ -603,8 +603,16 @@ static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct cxl_region *cxlr = to_cxl_region(dev);
+ const char *desc;
- return sysfs_emit(buf, "%s\n", cxl_decoder_mode_name(cxlr->mode));
+ if (cxlr->mode == CXL_PARTMODE_RAM)
+ desc = "ram";
+ else if (cxlr->mode == CXL_PARTMODE_PMEM)
+ desc = "pmem";
+ else
+ desc = "";
+
+ return sysfs_emit(buf, "%s\n", desc);
}
static DEVICE_ATTR_RO(mode);
@@ -630,7 +638,7 @@ static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size)
/* ways, granularity and uuid (if PMEM) need to be set before HPA */
if (!p->interleave_ways || !p->interleave_granularity ||
- (cxlr->mode == CXL_DECODER_PMEM && uuid_is_null(&p->uuid)))
+ (cxlr->mode == CXL_PARTMODE_PMEM && uuid_is_null(&p->uuid)))
return -ENXIO;
div64_u64_rem(size, (u64)SZ_256M * p->interleave_ways, &remainder);
@@ -824,6 +832,21 @@ static int match_free_decoder(struct device *dev, const void *data)
return 1;
}
+static bool region_res_match_cxl_range(const struct cxl_region_params *p,
+ struct range *range)
+{
+ if (!p->res)
+ return false;
+
+ /*
+ * If an extended linear cache region then the CXL range is assumed
+ * to be fronted by the DRAM range in current known implementation.
+ * This assumption will be made until a variant implementation exists.
+ */
+ return p->res->start + p->cache_size == range->start &&
+ p->res->end == range->end;
+}
+
static int match_auto_decoder(struct device *dev, const void *data)
{
const struct cxl_region_params *p = data;
@@ -836,7 +859,7 @@ static int match_auto_decoder(struct device *dev, const void *data)
cxld = to_cxl_decoder(dev);
r = &cxld->hpa_range;
- if (p->res && p->res->start == r->start && p->res->end == r->end)
+ if (region_res_match_cxl_range(p, r))
return 1;
return 0;
@@ -1424,8 +1447,7 @@ static int cxl_port_setup_targets(struct cxl_port *port,
if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
if (cxld->interleave_ways != iw ||
cxld->interleave_granularity != ig ||
- cxld->hpa_range.start != p->res->start ||
- cxld->hpa_range.end != p->res->end ||
+ !region_res_match_cxl_range(p, &cxld->hpa_range) ||
((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) {
dev_err(&cxlr->dev,
"%s:%s %s expected iw: %d ig: %d %pr\n",
@@ -1888,6 +1910,7 @@ static int cxl_region_attach(struct cxl_region *cxlr,
{
struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_region_params *p = &cxlr->params;
struct cxl_port *ep_port, *root_port;
struct cxl_dport *dport;
@@ -1902,17 +1925,17 @@ static int cxl_region_attach(struct cxl_region *cxlr,
return rc;
}
- if (cxled->mode != cxlr->mode) {
- dev_dbg(&cxlr->dev, "%s region mode: %d mismatch: %d\n",
- dev_name(&cxled->cxld.dev), cxlr->mode, cxled->mode);
- return -EINVAL;
- }
-
- if (cxled->mode == CXL_DECODER_DEAD) {
+ if (cxled->part < 0) {
dev_dbg(&cxlr->dev, "%s dead\n", dev_name(&cxled->cxld.dev));
return -ENODEV;
}
+ if (cxlds->part[cxled->part].mode != cxlr->mode) {
+ dev_dbg(&cxlr->dev, "%s region mode: %d mismatch\n",
+ dev_name(&cxled->cxld.dev), cxlr->mode);
+ return -EINVAL;
+ }
+
/* all full of members, or interleave config not established? */
if (p->state > CXL_CONFIG_INTERLEAVE_ACTIVE) {
dev_dbg(&cxlr->dev, "region already active\n");
@@ -1951,13 +1974,13 @@ static int cxl_region_attach(struct cxl_region *cxlr,
return -ENXIO;
}
- if (resource_size(cxled->dpa_res) * p->interleave_ways !=
+ if (resource_size(cxled->dpa_res) * p->interleave_ways + p->cache_size !=
resource_size(p->res)) {
dev_dbg(&cxlr->dev,
- "%s:%s: decoder-size-%#llx * ways-%d != region-size-%#llx\n",
+ "%s:%s-size-%#llx * ways-%d + cache-%#llx != region-size-%#llx\n",
dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
(u64)resource_size(cxled->dpa_res), p->interleave_ways,
- (u64)resource_size(p->res));
+ (u64)p->cache_size, (u64)resource_size(p->res));
return -EINVAL;
}
@@ -2115,7 +2138,7 @@ out:
void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled)
{
down_write(&cxl_region_rwsem);
- cxled->mode = CXL_DECODER_DEAD;
+ cxled->part = -1;
cxl_region_detach(cxled);
up_write(&cxl_region_rwsem);
}
@@ -2471,7 +2494,7 @@ static int cxl_region_calculate_adistance(struct notifier_block *nb,
*/
static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd,
int id,
- enum cxl_decoder_mode mode,
+ enum cxl_partition_mode mode,
enum cxl_decoder_type type)
{
struct cxl_port *port = to_cxl_port(cxlrd->cxlsd.cxld.dev.parent);
@@ -2525,13 +2548,13 @@ static ssize_t create_ram_region_show(struct device *dev,
}
static struct cxl_region *__create_region(struct cxl_root_decoder *cxlrd,
- enum cxl_decoder_mode mode, int id)
+ enum cxl_partition_mode mode, int id)
{
int rc;
switch (mode) {
- case CXL_DECODER_RAM:
- case CXL_DECODER_PMEM:
+ case CXL_PARTMODE_RAM:
+ case CXL_PARTMODE_PMEM:
break;
default:
dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode);
@@ -2551,7 +2574,7 @@ static struct cxl_region *__create_region(struct cxl_root_decoder *cxlrd,
}
static ssize_t create_region_store(struct device *dev, const char *buf,
- size_t len, enum cxl_decoder_mode mode)
+ size_t len, enum cxl_partition_mode mode)
{
struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
struct cxl_region *cxlr;
@@ -2572,7 +2595,7 @@ static ssize_t create_pmem_region_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
- return create_region_store(dev, buf, len, CXL_DECODER_PMEM);
+ return create_region_store(dev, buf, len, CXL_PARTMODE_PMEM);
}
DEVICE_ATTR_RW(create_pmem_region);
@@ -2580,7 +2603,7 @@ static ssize_t create_ram_region_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
- return create_region_store(dev, buf, len, CXL_DECODER_RAM);
+ return create_region_store(dev, buf, len, CXL_PARTMODE_RAM);
}
DEVICE_ATTR_RW(create_ram_region);
@@ -2678,7 +2701,7 @@ EXPORT_SYMBOL_NS_GPL(to_cxl_pmem_region, "CXL");
struct cxl_poison_context {
struct cxl_port *port;
- enum cxl_decoder_mode mode;
+ int part;
u64 offset;
};
@@ -2686,47 +2709,45 @@ static int cxl_get_poison_unmapped(struct cxl_memdev *cxlmd,
struct cxl_poison_context *ctx)
{
struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ const struct resource *res;
+ struct resource *p, *last;
u64 offset, length;
int rc = 0;
+ if (ctx->part < 0)
+ return 0;
+
/*
- * Collect poison for the remaining unmapped resources
- * after poison is collected by committed endpoints.
- *
- * Knowing that PMEM must always follow RAM, get poison
- * for unmapped resources based on the last decoder's mode:
- * ram: scan remains of ram range, then any pmem range
- * pmem: scan remains of pmem range
+ * Collect poison for the remaining unmapped resources after
+ * poison is collected by committed endpoints decoders.
*/
-
- if (ctx->mode == CXL_DECODER_RAM) {
- offset = ctx->offset;
- length = resource_size(&cxlds->ram_res) - offset;
+ for (int i = ctx->part; i < cxlds->nr_partitions; i++) {
+ res = &cxlds->part[i].res;
+ for (p = res->child, last = NULL; p; p = p->sibling)
+ last = p;
+ if (last)
+ offset = last->end + 1;
+ else
+ offset = res->start;
+ length = res->end - offset + 1;
+ if (!length)
+ break;
rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
- if (rc == -EFAULT)
- rc = 0;
+ if (rc == -EFAULT && cxlds->part[i].mode == CXL_PARTMODE_RAM)
+ continue;
if (rc)
- return rc;
- }
- if (ctx->mode == CXL_DECODER_PMEM) {
- offset = ctx->offset;
- length = resource_size(&cxlds->dpa_res) - offset;
- if (!length)
- return 0;
- } else if (resource_size(&cxlds->pmem_res)) {
- offset = cxlds->pmem_res.start;
- length = resource_size(&cxlds->pmem_res);
- } else {
- return 0;
+ break;
}
- return cxl_mem_get_poison(cxlmd, offset, length, NULL);
+ return rc;
}
static int poison_by_decoder(struct device *dev, void *arg)
{
struct cxl_poison_context *ctx = arg;
struct cxl_endpoint_decoder *cxled;
+ enum cxl_partition_mode mode;
+ struct cxl_dev_state *cxlds;
struct cxl_memdev *cxlmd;
u64 offset, length;
int rc = 0;
@@ -2735,27 +2756,18 @@ static int poison_by_decoder(struct device *dev, void *arg)
return rc;
cxled = to_cxl_endpoint_decoder(dev);
- if (!cxled->dpa_res || !resource_size(cxled->dpa_res))
- return rc;
-
- /*
- * Regions are only created with single mode decoders: pmem or ram.
- * Linux does not support mixed mode decoders. This means that
- * reading poison per endpoint decoder adheres to the requirement
- * that poison reads of pmem and ram must be separated.
- * CXL 3.0 Spec 8.2.9.8.4.1
- */
- if (cxled->mode == CXL_DECODER_MIXED) {
- dev_dbg(dev, "poison list read unsupported in mixed mode\n");
+ if (!cxled->dpa_res)
return rc;
- }
cxlmd = cxled_to_memdev(cxled);
+ cxlds = cxlmd->cxlds;
+ mode = cxlds->part[cxled->part].mode;
+
if (cxled->skip) {
offset = cxled->dpa_res->start - cxled->skip;
length = cxled->skip;
rc = cxl_mem_get_poison(cxlmd, offset, length, NULL);
- if (rc == -EFAULT && cxled->mode == CXL_DECODER_RAM)
+ if (rc == -EFAULT && mode == CXL_PARTMODE_RAM)
rc = 0;
if (rc)
return rc;
@@ -2764,7 +2776,7 @@ static int poison_by_decoder(struct device *dev, void *arg)
offset = cxled->dpa_res->start;
length = cxled->dpa_res->end - offset + 1;
rc = cxl_mem_get_poison(cxlmd, offset, length, cxled->cxld.region);
- if (rc == -EFAULT && cxled->mode == CXL_DECODER_RAM)
+ if (rc == -EFAULT && mode == CXL_PARTMODE_RAM)
rc = 0;
if (rc)
return rc;
@@ -2772,7 +2784,7 @@ static int poison_by_decoder(struct device *dev, void *arg)
/* Iterate until commit_end is reached */
if (cxled->cxld.id == ctx->port->commit_end) {
ctx->offset = cxled->dpa_res->end + 1;
- ctx->mode = cxled->mode;
+ ctx->part = cxled->part;
return 1;
}
@@ -2785,7 +2797,8 @@ int cxl_get_poison_by_endpoint(struct cxl_port *port)
int rc = 0;
ctx = (struct cxl_poison_context) {
- .port = port
+ .port = port,
+ .part = -1,
};
rc = device_for_each_child(&port->dev, &ctx, poison_by_decoder);
@@ -2921,7 +2934,7 @@ u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
hpa_offset |= dpa_offset & GENMASK_ULL(eig + 7, 0);
/* Apply the hpa_offset to the region base address */
- hpa = hpa_offset + p->res->start;
+ hpa = hpa_offset + p->res->start + p->cache_size;
/* Root decoder translation overrides typical modulo decode */
if (cxlrd->hpa_to_spa)
@@ -3038,17 +3051,13 @@ static struct cxl_dax_region *cxl_dax_region_alloc(struct cxl_region *cxlr)
struct cxl_dax_region *cxlr_dax;
struct device *dev;
- down_read(&cxl_region_rwsem);
- if (p->state != CXL_CONFIG_COMMIT) {
- cxlr_dax = ERR_PTR(-ENXIO);
- goto out;
- }
+ guard(rwsem_read)(&cxl_region_rwsem);
+ if (p->state != CXL_CONFIG_COMMIT)
+ return ERR_PTR(-ENXIO);
cxlr_dax = kzalloc(sizeof(*cxlr_dax), GFP_KERNEL);
- if (!cxlr_dax) {
- cxlr_dax = ERR_PTR(-ENOMEM);
- goto out;
- }
+ if (!cxlr_dax)
+ return ERR_PTR(-ENOMEM);
cxlr_dax->hpa_range.start = p->res->start;
cxlr_dax->hpa_range.end = p->res->end;
@@ -3061,8 +3070,6 @@ static struct cxl_dax_region *cxl_dax_region_alloc(struct cxl_region *cxlr)
dev->parent = &cxlr->dev;
dev->bus = &cxl_bus_type;
dev->type = &cxl_dax_region_type;
-out:
- up_read(&cxl_region_rwsem);
return cxlr_dax;
}
@@ -3208,7 +3215,6 @@ static int match_region_by_range(struct device *dev, const void *data)
struct cxl_region_params *p;
struct cxl_region *cxlr;
const struct range *r = data;
- int rc = 0;
if (!is_cxl_region(dev))
return 0;
@@ -3216,60 +3222,96 @@ static int match_region_by_range(struct device *dev, const void *data)
cxlr = to_cxl_region(dev);
p = &cxlr->params;
- down_read(&cxl_region_rwsem);
+ guard(rwsem_read)(&cxl_region_rwsem);
if (p->res && p->res->start == r->start && p->res->end == r->end)
- rc = 1;
- up_read(&cxl_region_rwsem);
+ return 1;
- return rc;
+ return 0;
}
-/* Establish an empty region covering the given HPA range */
-static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd,
- struct cxl_endpoint_decoder *cxled)
+static int cxl_extended_linear_cache_resize(struct cxl_region *cxlr,
+ struct resource *res)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
+ struct cxl_region_params *p = &cxlr->params;
+ int nid = phys_to_target_node(res->start);
+ resource_size_t size = resource_size(res);
+ resource_size_t cache_size, start;
+ int rc;
+
+ rc = cxl_acpi_get_extended_linear_cache_size(res, nid, &cache_size);
+ if (rc)
+ return rc;
+
+ if (!cache_size)
+ return 0;
+
+ if (size != cache_size) {
+ dev_warn(&cxlr->dev,
+ "Extended Linear Cache size %pa != CXL size %pa. No Support!",
+ &cache_size, &size);
+ return -ENXIO;
+ }
+
+ /*
+ * Move the start of the range to where the cache range starts. The
+ * implementation assumes that the cache range is in front of the
+ * CXL range. This is not dictated by the HMAT spec but is how the
+ * current known implementation is configured.
+ *
+ * The cache range is expected to be within the CFMWS. The adjusted
+ * res->start should not be less than cxlrd->res->start.
+ */
+ start = res->start - cache_size;
+ if (start < cxlrd->res->start)
+ return -ENXIO;
+
+ res->start = start;
+ p->cache_size = cache_size;
+
+ return 0;
+}
+
+static int __construct_region(struct cxl_region *cxlr,
+ struct cxl_root_decoder *cxlrd,
+ struct cxl_endpoint_decoder *cxled)
{
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
- struct cxl_port *port = cxlrd_to_port(cxlrd);
struct range *hpa = &cxled->cxld.hpa_range;
struct cxl_region_params *p;
- struct cxl_region *cxlr;
struct resource *res;
int rc;
- do {
- cxlr = __create_region(cxlrd, cxled->mode,
- atomic_read(&cxlrd->region_id));
- } while (IS_ERR(cxlr) && PTR_ERR(cxlr) == -EBUSY);
-
- if (IS_ERR(cxlr)) {
- dev_err(cxlmd->dev.parent,
- "%s:%s: %s failed assign region: %ld\n",
- dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
- __func__, PTR_ERR(cxlr));
- return cxlr;
- }
-
- down_write(&cxl_region_rwsem);
+ guard(rwsem_write)(&cxl_region_rwsem);
p = &cxlr->params;
if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
dev_err(cxlmd->dev.parent,
"%s:%s: %s autodiscovery interrupted\n",
dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
__func__);
- rc = -EBUSY;
- goto err;
+ return -EBUSY;
}
set_bit(CXL_REGION_F_AUTO, &cxlr->flags);
res = kmalloc(sizeof(*res), GFP_KERNEL);
- if (!res) {
- rc = -ENOMEM;
- goto err;
- }
+ if (!res)
+ return -ENOMEM;
*res = DEFINE_RES_MEM_NAMED(hpa->start, range_len(hpa),
dev_name(&cxlr->dev));
+
+ rc = cxl_extended_linear_cache_resize(cxlr, res);
+ if (rc && rc != -EOPNOTSUPP) {
+ /*
+ * Failing to support extended linear cache region resize does not
+ * prevent the region from functioning. Only causes cxl list showing
+ * incorrect region size.
+ */
+ dev_warn(cxlmd->dev.parent,
+ "Extended linear cache calculation failed rc:%d\n", rc);
+ }
+
rc = insert_resource(cxlrd->res, res);
if (rc) {
/*
@@ -3289,7 +3331,7 @@ static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd,
rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group());
if (rc)
- goto err;
+ return rc;
dev_dbg(cxlmd->dev.parent, "%s:%s: %s %s res: %pr iw: %d ig: %d\n",
dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), __func__,
@@ -3298,14 +3340,40 @@ static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd,
/* ...to match put_device() in cxl_add_to_region() */
get_device(&cxlr->dev);
- up_write(&cxl_region_rwsem);
- return cxlr;
+ return 0;
+}
-err:
- up_write(&cxl_region_rwsem);
- devm_release_action(port->uport_dev, unregister_region, cxlr);
- return ERR_PTR(rc);
+/* Establish an empty region covering the given HPA range */
+static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd,
+ struct cxl_endpoint_decoder *cxled)
+{
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_port *port = cxlrd_to_port(cxlrd);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ int rc, part = READ_ONCE(cxled->part);
+ struct cxl_region *cxlr;
+
+ do {
+ cxlr = __create_region(cxlrd, cxlds->part[part].mode,
+ atomic_read(&cxlrd->region_id));
+ } while (IS_ERR(cxlr) && PTR_ERR(cxlr) == -EBUSY);
+
+ if (IS_ERR(cxlr)) {
+ dev_err(cxlmd->dev.parent,
+ "%s:%s: %s failed assign region: %ld\n",
+ dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
+ __func__, PTR_ERR(cxlr));
+ return cxlr;
+ }
+
+ rc = __construct_region(cxlr, cxlrd, cxled);
+ if (rc) {
+ devm_release_action(port->uport_dev, unregister_region, cxlr);
+ return ERR_PTR(rc);
+ }
+
+ return cxlr;
}
int cxl_add_to_region(struct cxl_port *root, struct cxl_endpoint_decoder *cxled)
@@ -3375,6 +3443,34 @@ out:
}
EXPORT_SYMBOL_NS_GPL(cxl_add_to_region, "CXL");
+u64 cxl_port_get_spa_cache_alias(struct cxl_port *endpoint, u64 spa)
+{
+ struct cxl_region_ref *iter;
+ unsigned long index;
+
+ if (!endpoint)
+ return ~0ULL;
+
+ guard(rwsem_write)(&cxl_region_rwsem);
+
+ xa_for_each(&endpoint->regions, index, iter) {
+ struct cxl_region_params *p = &iter->region->params;
+
+ if (p->res->start <= spa && spa <= p->res->end) {
+ if (!p->cache_size)
+ return ~0ULL;
+
+ if (spa >= p->res->start + p->cache_size)
+ return spa - p->cache_size;
+
+ return spa + p->cache_size;
+ }
+ }
+
+ return ~0ULL;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_port_get_spa_cache_alias, "CXL");
+
static int is_system_ram(struct resource *res, void *arg)
{
struct cxl_region *cxlr = arg;
@@ -3440,9 +3536,9 @@ out:
return rc;
switch (cxlr->mode) {
- case CXL_DECODER_PMEM:
+ case CXL_PARTMODE_PMEM:
return devm_cxl_add_pmem_region(cxlr);
- case CXL_DECODER_RAM:
+ case CXL_PARTMODE_RAM:
/*
* The region can not be manged by CXL if any portion of
* it is already online as 'System RAM'
diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h
index cea706b683b5..25ebfbc1616c 100644
--- a/drivers/cxl/core/trace.h
+++ b/drivers/cxl/core/trace.h
@@ -48,6 +48,34 @@
{ CXL_RAS_UC_IDE_RX_ERR, "IDE Rx Error" } \
)
+TRACE_EVENT(cxl_port_aer_uncorrectable_error,
+ TP_PROTO(struct device *dev, u32 status, u32 fe, u32 *hl),
+ TP_ARGS(dev, status, fe, hl),
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __string(host, dev_name(dev->parent))
+ __field(u32, status)
+ __field(u32, first_error)
+ __array(u32, header_log, CXL_HEADERLOG_SIZE_U32)
+ ),
+ TP_fast_assign(
+ __assign_str(device);
+ __assign_str(host);
+ __entry->status = status;
+ __entry->first_error = fe;
+ /*
+ * Embed the 512B headerlog data for user app retrieval and
+ * parsing, but no need to print this in the trace buffer.
+ */
+ memcpy(__entry->header_log, hl, CXL_HEADERLOG_SIZE);
+ ),
+ TP_printk("device=%s host=%s status: '%s' first_error: '%s'",
+ __get_str(device), __get_str(host),
+ show_uc_errs(__entry->status),
+ show_uc_errs(__entry->first_error)
+ )
+);
+
TRACE_EVENT(cxl_aer_uncorrectable_error,
TP_PROTO(const struct cxl_memdev *cxlmd, u32 status, u32 fe, u32 *hl),
TP_ARGS(cxlmd, status, fe, hl),
@@ -96,6 +124,25 @@ TRACE_EVENT(cxl_aer_uncorrectable_error,
{ CXL_RAS_CE_PHYS_LAYER_ERR, "Received Error From Physical Layer" } \
)
+TRACE_EVENT(cxl_port_aer_correctable_error,
+ TP_PROTO(struct device *dev, u32 status),
+ TP_ARGS(dev, status),
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __string(host, dev_name(dev->parent))
+ __field(u32, status)
+ ),
+ TP_fast_assign(
+ __assign_str(device);
+ __assign_str(host);
+ __entry->status = status;
+ ),
+ TP_printk("device=%s host=%s status='%s'",
+ __get_str(device), __get_str(host),
+ show_ce_errs(__entry->status)
+ )
+);
+
TRACE_EVENT(cxl_aer_correctable_error,
TP_PROTO(const struct cxl_memdev *cxlmd, u32 status),
TP_ARGS(cxlmd, status),
@@ -392,9 +439,10 @@ TRACE_EVENT(cxl_generic_event,
TRACE_EVENT(cxl_general_media,
TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log,
- struct cxl_region *cxlr, u64 hpa, struct cxl_event_gen_media *rec),
+ struct cxl_region *cxlr, u64 hpa, u64 hpa_alias0,
+ struct cxl_event_gen_media *rec),
- TP_ARGS(cxlmd, log, cxlr, hpa, rec),
+ TP_ARGS(cxlmd, log, cxlr, hpa, hpa_alias0, rec),
TP_STRUCT__entry(
CXL_EVT_TP_entry
@@ -408,6 +456,7 @@ TRACE_EVENT(cxl_general_media,
__array(u8, comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE)
/* Following are out of order to pack trace record */
__field(u64, hpa)
+ __field(u64, hpa_alias0)
__field_struct(uuid_t, region_uuid)
__field(u16, validity_flags)
__field(u8, rank)
@@ -438,6 +487,7 @@ TRACE_EVENT(cxl_general_media,
CXL_EVENT_GEN_MED_COMP_ID_SIZE);
__entry->validity_flags = get_unaligned_le16(&rec->media_hdr.validity_flags);
__entry->hpa = hpa;
+ __entry->hpa_alias0 = hpa_alias0;
if (cxlr) {
__assign_str(region_name);
uuid_copy(&__entry->region_uuid, &cxlr->params.uuid);
@@ -455,7 +505,7 @@ TRACE_EVENT(cxl_general_media,
"device=%x validity_flags='%s' " \
"comp_id=%s comp_id_pldm_valid_flags='%s' " \
"pldm_entity_id=%s pldm_resource_id=%s " \
- "hpa=%llx region=%s region_uuid=%pUb " \
+ "hpa=%llx hpa_alias0=%llx region=%s region_uuid=%pUb " \
"cme_threshold_ev_flags='%s' cme_count=%u",
__entry->dpa, show_dpa_flags(__entry->dpa_flags),
show_event_desc_flags(__entry->descriptor),
@@ -470,7 +520,7 @@ TRACE_EVENT(cxl_general_media,
CXL_GMER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id),
show_pldm_resource_id(__entry->validity_flags, CXL_GMER_VALID_COMPONENT,
CXL_GMER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id),
- __entry->hpa, __get_str(region_name), &__entry->region_uuid,
+ __entry->hpa, __entry->hpa_alias0, __get_str(region_name), &__entry->region_uuid,
show_cme_threshold_ev_flags(__entry->cme_threshold_ev_flags), __entry->cme_count
)
);
@@ -529,9 +579,10 @@ TRACE_EVENT(cxl_general_media,
TRACE_EVENT(cxl_dram,
TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log,
- struct cxl_region *cxlr, u64 hpa, struct cxl_event_dram *rec),
+ struct cxl_region *cxlr, u64 hpa, u64 hpa_alias0,
+ struct cxl_event_dram *rec),
- TP_ARGS(cxlmd, log, cxlr, hpa, rec),
+ TP_ARGS(cxlmd, log, cxlr, hpa, hpa_alias0, rec),
TP_STRUCT__entry(
CXL_EVT_TP_entry
@@ -547,6 +598,7 @@ TRACE_EVENT(cxl_dram,
__field(u32, row)
__array(u8, cor_mask, CXL_EVENT_DER_CORRECTION_MASK_SIZE)
__field(u64, hpa)
+ __field(u64, hpa_alias0)
__field_struct(uuid_t, region_uuid)
__field(u8, rank) /* Out of order to pack trace record */
__field(u8, bank_group) /* Out of order to pack trace record */
@@ -584,6 +636,7 @@ TRACE_EVENT(cxl_dram,
memcpy(__entry->cor_mask, &rec->correction_mask,
CXL_EVENT_DER_CORRECTION_MASK_SIZE);
__entry->hpa = hpa;
+ __entry->hpa_alias0 = hpa_alias0;
if (cxlr) {
__assign_str(region_name);
uuid_copy(&__entry->region_uuid, &cxlr->params.uuid);
@@ -604,7 +657,7 @@ TRACE_EVENT(cxl_dram,
"validity_flags='%s' " \
"comp_id=%s comp_id_pldm_valid_flags='%s' " \
"pldm_entity_id=%s pldm_resource_id=%s " \
- "hpa=%llx region=%s region_uuid=%pUb " \
+ "hpa=%llx hpa_alias0=%llx region=%s region_uuid=%pUb " \
"sub_channel=%u cme_threshold_ev_flags='%s' cvme_count=%u",
__entry->dpa, show_dpa_flags(__entry->dpa_flags),
show_event_desc_flags(__entry->descriptor),
@@ -622,7 +675,7 @@ TRACE_EVENT(cxl_dram,
CXL_DER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id),
show_pldm_resource_id(__entry->validity_flags, CXL_DER_VALID_COMPONENT,
CXL_DER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id),
- __entry->hpa, __get_str(region_name), &__entry->region_uuid,
+ __entry->hpa, __entry->hpa_alias0, __get_str(region_name), &__entry->region_uuid,
__entry->sub_channel, show_cme_threshold_ev_flags(__entry->cme_threshold_ev_flags),
__entry->cvme_count
)
@@ -870,6 +923,7 @@ TRACE_EVENT(cxl_poison,
__string(region, cxlr ? dev_name(&cxlr->dev) : "")
__field(u64, overflow_ts)
__field(u64, hpa)
+ __field(u64, hpa_alias0)
__field(u64, dpa)
__field(u32, dpa_length)
__array(char, uuid, 16)
@@ -892,16 +946,22 @@ TRACE_EVENT(cxl_poison,
memcpy(__entry->uuid, &cxlr->params.uuid, 16);
__entry->hpa = cxl_dpa_to_hpa(cxlr, cxlmd,
__entry->dpa);
+ if (__entry->hpa != ULLONG_MAX && cxlr->params.cache_size)
+ __entry->hpa_alias0 = __entry->hpa +
+ cxlr->params.cache_size;
+ else
+ __entry->hpa_alias0 = ULLONG_MAX;
} else {
__assign_str(region);
memset(__entry->uuid, 0, 16);
__entry->hpa = ULLONG_MAX;
+ __entry->hpa_alias0 = ULLONG_MAX;
}
),
TP_printk("memdev=%s host=%s serial=%lld trace_type=%s region=%s " \
- "region_uuid=%pU hpa=0x%llx dpa=0x%llx dpa_length=0x%x " \
- "source=%s flags=%s overflow_time=%llu",
+ "region_uuid=%pU hpa=0x%llx hpa_alias0=0x%llx dpa=0x%llx " \
+ "dpa_length=0x%x source=%s flags=%s overflow_time=%llu",
__get_str(memdev),
__get_str(host),
__entry->serial,
@@ -909,6 +969,7 @@ TRACE_EVENT(cxl_poison,
__get_str(region),
__entry->uuid,
__entry->hpa,
+ __entry->hpa_alias0,
__entry->dpa,
__entry->dpa_length,
show_poison_source(__entry->source),
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index bbbaa0d0a670..be8a7dc77719 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -373,32 +373,6 @@ struct cxl_decoder {
};
/*
- * CXL_DECODER_DEAD prevents endpoints from being reattached to regions
- * while cxld_unregister() is running
- */
-enum cxl_decoder_mode {
- CXL_DECODER_NONE,
- CXL_DECODER_RAM,
- CXL_DECODER_PMEM,
- CXL_DECODER_MIXED,
- CXL_DECODER_DEAD,
-};
-
-static inline const char *cxl_decoder_mode_name(enum cxl_decoder_mode mode)
-{
- static const char * const names[] = {
- [CXL_DECODER_NONE] = "none",
- [CXL_DECODER_RAM] = "ram",
- [CXL_DECODER_PMEM] = "pmem",
- [CXL_DECODER_MIXED] = "mixed",
- };
-
- if (mode >= CXL_DECODER_NONE && mode <= CXL_DECODER_MIXED)
- return names[mode];
- return "mixed";
-}
-
-/*
* Track whether this decoder is reserved for region autodiscovery, or
* free for userspace provisioning.
*/
@@ -412,16 +386,16 @@ enum cxl_decoder_state {
* @cxld: base cxl_decoder_object
* @dpa_res: actively claimed DPA span of this decoder
* @skip: offset into @dpa_res where @cxld.hpa_range maps
- * @mode: which memory type / access-mode-partition this decoder targets
* @state: autodiscovery state
+ * @part: partition index this decoder maps
* @pos: interleave position in @cxld.region
*/
struct cxl_endpoint_decoder {
struct cxl_decoder cxld;
struct resource *dpa_res;
resource_size_t skip;
- enum cxl_decoder_mode mode;
enum cxl_decoder_state state;
+ int part;
int pos;
};
@@ -493,6 +467,7 @@ enum cxl_config_state {
* @res: allocated iomem capacity for this region
* @targets: active ordered targets in current decoder configuration
* @nr_targets: number of targets
+ * @cache_size: extended linear cache size if exists, otherwise zero.
*
* State transitions are protected by the cxl_region_rwsem
*/
@@ -504,6 +479,12 @@ struct cxl_region_params {
struct resource *res;
struct cxl_endpoint_decoder *targets[CXL_DECODER_MAX_INTERLEAVE];
int nr_targets;
+ resource_size_t cache_size;
+};
+
+enum cxl_partition_mode {
+ CXL_PARTMODE_RAM,
+ CXL_PARTMODE_PMEM,
};
/*
@@ -525,7 +506,7 @@ struct cxl_region_params {
* struct cxl_region - CXL region
* @dev: This region's device
* @id: This region's id. Id is globally unique across all regions
- * @mode: Endpoint decoder allocation / access mode
+ * @mode: Operational mode of the mapped capacity
* @type: Endpoint decoder target type
* @cxl_nvb: nvdimm bridge for coordinating @cxlr_pmem setup / shutdown
* @cxlr_pmem: (for pmem regions) cached copy of the nvdimm bridge
@@ -538,7 +519,7 @@ struct cxl_region_params {
struct cxl_region {
struct device dev;
int id;
- enum cxl_decoder_mode mode;
+ enum cxl_partition_mode mode;
enum cxl_decoder_type type;
struct cxl_nvdimm_bridge *cxl_nvb;
struct cxl_pmem_region *cxlr_pmem;
@@ -563,6 +544,7 @@ struct cxl_nvdimm {
struct device dev;
struct cxl_memdev *cxlmd;
u8 dev_id[CXL_DEV_ID_LEN]; /* for nvdimm, string of 'serial' */
+ u64 dirty_shutdowns;
};
struct cxl_pmem_region_mapping {
@@ -610,6 +592,7 @@ struct cxl_dax_region {
* @cdat: Cached CDAT data
* @cdat_available: Should a CDAT attribute be available in sysfs
* @pci_latency: Upstream latency in picoseconds
+ * @gpf_dvsec: Cached GPF port DVSEC
*/
struct cxl_port {
struct device dev;
@@ -633,6 +616,7 @@ struct cxl_port {
} cdat;
bool cdat_available;
long pci_latency;
+ int gpf_dvsec;
};
/**
@@ -875,6 +859,7 @@ struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev);
int cxl_add_to_region(struct cxl_port *root,
struct cxl_endpoint_decoder *cxled);
struct cxl_dax_region *to_cxl_dax_region(struct device *dev);
+u64 cxl_port_get_spa_cache_alias(struct cxl_port *endpoint, u64 spa);
#else
static inline bool is_cxl_pmem_region(struct device *dev)
{
@@ -893,6 +878,11 @@ static inline struct cxl_dax_region *to_cxl_dax_region(struct device *dev)
{
return NULL;
}
+static inline u64 cxl_port_get_spa_cache_alias(struct cxl_port *endpoint,
+ u64 spa)
+{
+ return 0;
+}
#endif
void cxl_endpoint_parse_cdat(struct cxl_port *port);
@@ -920,4 +910,6 @@ bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port);
#define __mock static
#endif
+u16 cxl_gpf_get_dvsec(struct device *dev, bool is_port);
+
#endif /* __CXL_H__ */
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index dd2b7060d501..3ec6b906371b 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -97,6 +97,19 @@ int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
resource_size_t base, resource_size_t len,
resource_size_t skipped);
+#define CXL_NR_PARTITIONS_MAX 2
+
+struct cxl_dpa_info {
+ u64 size;
+ struct cxl_dpa_part_info {
+ struct range range;
+ enum cxl_partition_mode mode;
+ } part[CXL_NR_PARTITIONS_MAX];
+ int nr_partitions;
+};
+
+int cxl_dpa_setup(struct cxl_dev_state *cxlds, const struct cxl_dpa_info *info);
+
static inline struct cxl_ep *cxl_ep_load(struct cxl_port *port,
struct cxl_memdev *cxlmd)
{
@@ -373,6 +386,18 @@ struct cxl_dpa_perf {
};
/**
+ * struct cxl_dpa_partition - DPA partition descriptor
+ * @res: shortcut to the partition in the DPA resource tree (cxlds->dpa_res)
+ * @perf: performance attributes of the partition from CDAT
+ * @mode: operation mode for the DPA capacity, e.g. ram, pmem, dynamic...
+ */
+struct cxl_dpa_partition {
+ struct resource res;
+ struct cxl_dpa_perf perf;
+ enum cxl_partition_mode mode;
+};
+
+/**
* struct cxl_dev_state - The driver device state
*
* cxl_dev_state represents the CXL driver/device state. It provides an
@@ -387,8 +412,8 @@ struct cxl_dpa_perf {
* @rcd: operating in RCD mode (CXL 3.0 9.11.8 CXL Devices Attached to an RCH)
* @media_ready: Indicate whether the device media is usable
* @dpa_res: Overall DPA resource tree for the device
- * @pmem_res: Active Persistent memory capacity configuration
- * @ram_res: Active Volatile memory capacity configuration
+ * @part: DPA partition array
+ * @nr_partitions: Number of DPA partitions
* @serial: PCIe Device Serial Number
* @type: Generic Memory Class device or Vendor Specific Memory device
* @cxl_mbox: CXL mailbox context
@@ -403,8 +428,8 @@ struct cxl_dev_state {
bool rcd;
bool media_ready;
struct resource dpa_res;
- struct resource pmem_res;
- struct resource ram_res;
+ struct cxl_dpa_partition part[CXL_NR_PARTITIONS_MAX];
+ unsigned int nr_partitions;
u64 serial;
enum cxl_devtype type;
struct cxl_mailbox cxl_mbox;
@@ -413,6 +438,18 @@ struct cxl_dev_state {
#endif
};
+static inline resource_size_t cxl_pmem_size(struct cxl_dev_state *cxlds)
+{
+ /*
+ * Static PMEM may be at partition index 0 when there is no static RAM
+ * capacity.
+ */
+ for (int i = 0; i < cxlds->nr_partitions; i++)
+ if (cxlds->part[i].mode == CXL_PARTMODE_PMEM)
+ return resource_size(&cxlds->part[i].res);
+ return 0;
+}
+
static inline struct cxl_dev_state *mbox_to_cxlds(struct cxl_mailbox *cxl_mbox)
{
return dev_get_drvdata(cxl_mbox->host);
@@ -435,14 +472,11 @@ static inline struct cxl_dev_state *mbox_to_cxlds(struct cxl_mailbox *cxl_mbox)
* @partition_align_bytes: alignment size for partition-able capacity
* @active_volatile_bytes: sum of hard + soft volatile
* @active_persistent_bytes: sum of hard + soft persistent
- * @next_volatile_bytes: volatile capacity change pending device reset
- * @next_persistent_bytes: persistent capacity change pending device reset
- * @ram_perf: performance data entry matched to RAM partition
- * @pmem_perf: performance data entry matched to PMEM partition
* @event: event log driver state
* @poison: poison driver state info
* @security: security driver state info
* @fw: firmware upload / activation state
+ * @mce_notifier: MCE notifier
*
* See CXL 3.0 8.2.9.8.2 Capacity Configuration and Label Storage for
* details on capacity parameters.
@@ -457,16 +491,12 @@ struct cxl_memdev_state {
u64 partition_align_bytes;
u64 active_volatile_bytes;
u64 active_persistent_bytes;
- u64 next_volatile_bytes;
- u64 next_persistent_bytes;
-
- struct cxl_dpa_perf ram_perf;
- struct cxl_dpa_perf pmem_perf;
struct cxl_event_state event;
struct cxl_poison_state poison;
struct cxl_security_state security;
struct cxl_fw_state fw;
+ struct notifier_block mce_notifier;
};
static inline struct cxl_memdev_state *
@@ -660,6 +690,23 @@ struct cxl_mbox_set_partition_info {
#define CXL_SET_PARTITION_IMMEDIATE_FLAG BIT(0)
+/* Get Health Info Output Payload CXL 3.2 Spec 8.2.10.9.3.1 Table 8-148 */
+struct cxl_mbox_get_health_info_out {
+ u8 health_status;
+ u8 media_status;
+ u8 additional_status;
+ u8 life_used;
+ __le16 device_temperature;
+ __le32 dirty_shutdown_cnt;
+ __le32 corrected_volatile_error_cnt;
+ __le32 corrected_persistent_error_cnt;
+} __packed;
+
+/* Set Shutdown State Input Payload CXL 3.2 Spec 8.2.10.9.3.5 Table 8-152 */
+struct cxl_mbox_set_shutdown_state_in {
+ u8 state;
+} __packed;
+
/* Set Timestamp CXL 3.0 Spec 8.2.9.4.2 */
struct cxl_mbox_set_timestamp_in {
__le64 timestamp;
@@ -785,7 +832,7 @@ int cxl_internal_send_cmd(struct cxl_mailbox *cxl_mbox,
int cxl_dev_state_identify(struct cxl_memdev_state *mds);
int cxl_await_media_ready(struct cxl_dev_state *cxlds);
int cxl_enumerate_cmds(struct cxl_memdev_state *mds);
-int cxl_mem_create_range_info(struct cxl_memdev_state *mds);
+int cxl_mem_dpa_fetch(struct cxl_memdev_state *mds, struct cxl_dpa_info *info);
struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev);
void set_exclusive_cxl_commands(struct cxl_memdev_state *mds,
unsigned long *cmds);
@@ -796,6 +843,8 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
enum cxl_event_log_type type,
enum cxl_event_type event_type,
const uuid_t *uuid, union cxl_event *evt);
+int cxl_get_dirty_count(struct cxl_memdev_state *mds, u32 *count);
+int cxl_arm_dirty_shutdown(struct cxl_memdev_state *mds);
int cxl_set_timestamp(struct cxl_memdev_state *mds);
int cxl_poison_state_init(struct cxl_memdev_state *mds);
int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
diff --git a/drivers/cxl/cxlpci.h b/drivers/cxl/cxlpci.h
index 4da07727ab9c..54e219b0049e 100644
--- a/drivers/cxl/cxlpci.h
+++ b/drivers/cxl/cxlpci.h
@@ -40,6 +40,12 @@
/* CXL 2.0 8.1.6: GPF DVSEC for CXL Port */
#define CXL_DVSEC_PORT_GPF 4
+#define CXL_DVSEC_PORT_GPF_PHASE_1_CONTROL_OFFSET 0x0C
+#define CXL_DVSEC_PORT_GPF_PHASE_1_TMO_BASE_MASK GENMASK(3, 0)
+#define CXL_DVSEC_PORT_GPF_PHASE_1_TMO_SCALE_MASK GENMASK(11, 8)
+#define CXL_DVSEC_PORT_GPF_PHASE_2_CONTROL_OFFSET 0xE
+#define CXL_DVSEC_PORT_GPF_PHASE_2_TMO_BASE_MASK GENMASK(3, 0)
+#define CXL_DVSEC_PORT_GPF_PHASE_2_TMO_SCALE_MASK GENMASK(11, 8)
/* CXL 2.0 8.1.7: GPF DVSEC for CXL Device */
#define CXL_DVSEC_DEVICE_GPF 5
diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c
index 2f03a4d5606e..9675243bd05b 100644
--- a/drivers/cxl/mem.c
+++ b/drivers/cxl/mem.c
@@ -152,7 +152,7 @@ static int cxl_mem_probe(struct device *dev)
return -ENXIO;
}
- if (resource_size(&cxlds->pmem_res) && IS_ENABLED(CONFIG_CXL_PMEM)) {
+ if (cxl_pmem_size(cxlds) && IS_ENABLED(CONFIG_CXL_PMEM)) {
rc = devm_cxl_add_nvdimm(parent_port, cxlmd);
if (rc) {
if (rc == -ENODEV)
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 993fa60fe453..7b14a154463c 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -903,6 +903,7 @@ __ATTRIBUTE_GROUPS(cxl_rcd);
static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct pci_host_bridge *host_bridge = pci_find_host_bridge(pdev->bus);
+ struct cxl_dpa_info range_info = { 0 };
struct cxl_memdev_state *mds;
struct cxl_dev_state *cxlds;
struct cxl_register_map map;
@@ -993,7 +994,11 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
return rc;
- rc = cxl_mem_create_range_info(mds);
+ rc = cxl_mem_dpa_fetch(mds, &range_info);
+ if (rc)
+ return rc;
+
+ rc = cxl_dpa_setup(cxlds, &range_info);
if (rc)
return rc;
diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c
index f9c95996e937..d061fe3d2b86 100644
--- a/drivers/cxl/pmem.c
+++ b/drivers/cxl/pmem.c
@@ -42,15 +42,44 @@ static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *
}
static DEVICE_ATTR_RO(id);
+static ssize_t dirty_shutdown_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
+
+ return sysfs_emit(buf, "%llu\n", cxl_nvd->dirty_shutdowns);
+}
+static DEVICE_ATTR_RO(dirty_shutdown);
+
static struct attribute *cxl_dimm_attributes[] = {
&dev_attr_id.attr,
&dev_attr_provider.attr,
+ &dev_attr_dirty_shutdown.attr,
NULL
};
+#define CXL_INVALID_DIRTY_SHUTDOWN_COUNT ULLONG_MAX
+static umode_t cxl_dimm_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ if (a == &dev_attr_dirty_shutdown.attr) {
+ struct device *dev = kobj_to_dev(kobj);
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
+
+ if (cxl_nvd->dirty_shutdowns ==
+ CXL_INVALID_DIRTY_SHUTDOWN_COUNT)
+ return 0;
+ }
+
+ return a->mode;
+}
+
static const struct attribute_group cxl_dimm_attribute_group = {
.name = "cxl",
.attrs = cxl_dimm_attributes,
+ .is_visible = cxl_dimm_visible
};
static const struct attribute_group *cxl_dimm_attribute_groups[] = {
@@ -58,6 +87,38 @@ static const struct attribute_group *cxl_dimm_attribute_groups[] = {
NULL
};
+static void cxl_nvdimm_arm_dirty_shutdown_tracking(struct cxl_nvdimm *cxl_nvd)
+{
+ struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+ struct device *dev = &cxl_nvd->dev;
+ u32 count;
+
+ /*
+ * Dirty tracking is enabled and exposed to the user, only when:
+ * - dirty shutdown on the device can be set, and,
+ * - the device has a Device GPF DVSEC (albeit unused), and,
+ * - the Get Health Info cmd can retrieve the device's dirty count.
+ */
+ cxl_nvd->dirty_shutdowns = CXL_INVALID_DIRTY_SHUTDOWN_COUNT;
+
+ if (cxl_arm_dirty_shutdown(mds)) {
+ dev_warn(dev, "GPF: could not set dirty shutdown state\n");
+ return;
+ }
+
+ if (!cxl_gpf_get_dvsec(cxlds->dev, false))
+ return;
+
+ if (cxl_get_dirty_count(mds, &count)) {
+ dev_warn(dev, "GPF: could not retrieve dirty count\n");
+ return;
+ }
+
+ cxl_nvd->dirty_shutdowns = count;
+}
+
static int cxl_nvdimm_probe(struct device *dev)
{
struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
@@ -78,6 +139,14 @@ static int cxl_nvdimm_probe(struct device *dev)
set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
+
+ /*
+ * Set dirty shutdown now, with the expectation that the device
+ * clear it upon a successful GPF flow. The exception to this
+ * is upon Viral detection, per CXL 3.2 section 12.4.2.
+ */
+ cxl_nvdimm_arm_dirty_shutdown_tracking(cxl_nvd);
+
nvdimm = __nvdimm_create(cxl_nvb->nvdimm_bus, cxl_nvd,
cxl_dimm_attribute_groups, flags,
cmd_mask, 0, NULL, cxl_nvd->dev_id,
@@ -375,6 +444,16 @@ static int cxl_pmem_region_probe(struct device *dev)
goto out_nvd;
}
+ if (cxlds->serial == 0) {
+ /* include missing alongside invalid in this error message. */
+ dev_err(dev, "%s: invalid or missing serial number\n",
+ dev_name(&cxlmd->dev));
+ rc = -ENXIO;
+ goto out_nvd;
+ }
+ info[i].serial = cxlds->serial;
+ info[i].offset = m->start;
+
m->cxl_nvd = cxl_nvd;
mappings[i] = (struct nd_mapping_desc) {
.nvdimm = nvdimm,
@@ -382,8 +461,6 @@ static int cxl_pmem_region_probe(struct device *dev)
.size = m->size,
.position = i,
};
- info[i].offset = m->start;
- info[i].serial = cxlds->serial;
}
ndr_desc.num_mappings = cxlr_pmem->nr_mappings;
ndr_desc.mapping = mappings;
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index b360dca2c69e..bd04980009a4 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -1137,10 +1137,7 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
unsigned long payload, buffer_end, transmit_header_bytes = 0;
u32 control;
int count;
- struct {
- struct fw_iso_packet packet;
- u8 header[256];
- } u;
+ DEFINE_RAW_FLEX(struct fw_iso_packet, u, header, 64);
if (ctx == NULL || a->handle != 0)
return -EINVAL;
@@ -1172,29 +1169,29 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
while (p < end) {
if (get_user(control, &p->control))
return -EFAULT;
- u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
- u.packet.interrupt = GET_INTERRUPT(control);
- u.packet.skip = GET_SKIP(control);
- u.packet.tag = GET_TAG(control);
- u.packet.sy = GET_SY(control);
- u.packet.header_length = GET_HEADER_LENGTH(control);
+ u->payload_length = GET_PAYLOAD_LENGTH(control);
+ u->interrupt = GET_INTERRUPT(control);
+ u->skip = GET_SKIP(control);
+ u->tag = GET_TAG(control);
+ u->sy = GET_SY(control);
+ u->header_length = GET_HEADER_LENGTH(control);
switch (ctx->type) {
case FW_ISO_CONTEXT_TRANSMIT:
- if (u.packet.header_length & 3)
+ if (u->header_length & 3)
return -EINVAL;
- transmit_header_bytes = u.packet.header_length;
+ transmit_header_bytes = u->header_length;
break;
case FW_ISO_CONTEXT_RECEIVE:
- if (u.packet.header_length == 0 ||
- u.packet.header_length % ctx->header_size != 0)
+ if (u->header_length == 0 ||
+ u->header_length % ctx->header_size != 0)
return -EINVAL;
break;
case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
- if (u.packet.payload_length == 0 ||
- u.packet.payload_length & 3)
+ if (u->payload_length == 0 ||
+ u->payload_length & 3)
return -EINVAL;
break;
}
@@ -1204,20 +1201,19 @@ static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
if (next > end)
return -EINVAL;
if (copy_from_user
- (u.packet.header, p->header, transmit_header_bytes))
+ (u->header, p->header, transmit_header_bytes))
return -EFAULT;
- if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
- u.packet.header_length + u.packet.payload_length > 0)
+ if (u->skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
+ u->header_length + u->payload_length > 0)
return -EINVAL;
- if (payload + u.packet.payload_length > buffer_end)
+ if (payload + u->payload_length > buffer_end)
return -EINVAL;
- if (fw_iso_context_queue(ctx, &u.packet,
- &client->buffer, payload))
+ if (fw_iso_context_queue(ctx, u, &client->buffer, payload))
break;
p = next;
- payload += u.packet.payload_length;
+ payload += u->payload_length;
count++;
}
fw_iso_context_queue_flush(ctx);
diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
index 42433c19eb30..560724ce21aa 100644
--- a/drivers/firmware/cirrus/cs_dsp.c
+++ b/drivers/firmware/cirrus/cs_dsp.c
@@ -1631,6 +1631,7 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
cs_dsp_debugfs_save_wmfwname(dsp, file);
+ ret = 0;
out_fw:
cs_dsp_buf_free(&buf_list);
@@ -2338,6 +2339,7 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
cs_dsp_debugfs_save_binname(dsp, file);
+ ret = 0;
out_fw:
cs_dsp_buf_free(&buf_list);
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index b69e68ef3f02..928409199a1a 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -24,7 +24,7 @@
#include <linux/bcd.h>
#include <acpi/ghes.h>
#include <ras/ras_event.h>
-#include "cper_cxl.h"
+#include <cxl/event.h>
/*
* CPER record ID need to be unique even after reboot, because record
@@ -624,11 +624,11 @@ cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata
else
goto err_section_too_small;
} else if (guid_equal(sec_type, &CPER_SEC_CXL_PROT_ERR)) {
- struct cper_sec_prot_err *prot_err = acpi_hest_get_payload(gdata);
+ struct cxl_cper_sec_prot_err *prot_err = acpi_hest_get_payload(gdata);
printk("%ssection_type: CXL Protocol Error\n", newpfx);
if (gdata->error_data_length >= sizeof(*prot_err))
- cper_print_prot_err(newpfx, prot_err);
+ cxl_cper_print_prot_err(newpfx, prot_err);
else
goto err_section_too_small;
} else {
diff --git a/drivers/firmware/efi/cper_cxl.c b/drivers/firmware/efi/cper_cxl.c
index a55771b99a97..8a7667faf953 100644
--- a/drivers/firmware/efi/cper_cxl.c
+++ b/drivers/firmware/efi/cper_cxl.c
@@ -8,26 +8,7 @@
*/
#include <linux/cper.h>
-#include "cper_cxl.h"
-
-#define PROT_ERR_VALID_AGENT_TYPE BIT_ULL(0)
-#define PROT_ERR_VALID_AGENT_ADDRESS BIT_ULL(1)
-#define PROT_ERR_VALID_DEVICE_ID BIT_ULL(2)
-#define PROT_ERR_VALID_SERIAL_NUMBER BIT_ULL(3)
-#define PROT_ERR_VALID_CAPABILITY BIT_ULL(4)
-#define PROT_ERR_VALID_DVSEC BIT_ULL(5)
-#define PROT_ERR_VALID_ERROR_LOG BIT_ULL(6)
-
-/* CXL RAS Capability Structure, CXL v3.0 sec 8.2.4.16 */
-struct cxl_ras_capability_regs {
- u32 uncor_status;
- u32 uncor_mask;
- u32 uncor_severity;
- u32 cor_status;
- u32 cor_mask;
- u32 cap_control;
- u32 header_log[16];
-};
+#include <cxl/event.h>
static const char * const prot_err_agent_type_strs[] = {
"Restricted CXL Device",
@@ -40,22 +21,8 @@ static const char * const prot_err_agent_type_strs[] = {
"CXL Upstream Switch Port",
};
-/*
- * The layout of the enumeration and the values matches CXL Agent Type
- * field in the UEFI 2.10 Section N.2.13,
- */
-enum {
- RCD, /* Restricted CXL Device */
- RCH_DP, /* Restricted CXL Host Downstream Port */
- DEVICE, /* CXL Device */
- LD, /* CXL Logical Device */
- FMLD, /* CXL Fabric Manager managed Logical Device */
- RP, /* CXL Root Port */
- DSP, /* CXL Downstream Switch Port */
- USP, /* CXL Upstream Switch Port */
-};
-
-void cper_print_prot_err(const char *pfx, const struct cper_sec_prot_err *prot_err)
+void cxl_cper_print_prot_err(const char *pfx,
+ const struct cxl_cper_sec_prot_err *prot_err)
{
if (prot_err->valid_bits & PROT_ERR_VALID_AGENT_TYPE)
pr_info("%s agent_type: %d, %s\n", pfx, prot_err->agent_type,
diff --git a/drivers/firmware/efi/cper_cxl.h b/drivers/firmware/efi/cper_cxl.h
deleted file mode 100644
index 86bfcf7909ec..000000000000
--- a/drivers/firmware/efi/cper_cxl.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * UEFI Common Platform Error Record (CPER) support for CXL Section.
- *
- * Copyright (C) 2022 Advanced Micro Devices, Inc.
- *
- * Author: Smita Koralahalli <Smita.KoralahalliChannabasappa@amd.com>
- */
-
-#ifndef LINUX_CPER_CXL_H
-#define LINUX_CPER_CXL_H
-
-/* CXL Protocol Error Section */
-#define CPER_SEC_CXL_PROT_ERR \
- GUID_INIT(0x80B9EFB4, 0x52B5, 0x4DE3, 0xA7, 0x77, 0x68, 0x78, \
- 0x4B, 0x77, 0x10, 0x48)
-
-#pragma pack(1)
-
-/* Compute Express Link Protocol Error Section, UEFI v2.10 sec N.2.13 */
-struct cper_sec_prot_err {
- u64 valid_bits;
- u8 agent_type;
- u8 reserved[7];
-
- /*
- * Except for RCH Downstream Port, all the remaining CXL Agent
- * types are uniquely identified by the PCIe compatible SBDF number.
- */
- union {
- u64 rcrb_base_addr;
- struct {
- u8 function;
- u8 device;
- u8 bus;
- u16 segment;
- u8 reserved_1[3];
- };
- } agent_addr;
-
- struct {
- u16 vendor_id;
- u16 device_id;
- u16 subsystem_vendor_id;
- u16 subsystem_id;
- u8 class_code[2];
- u16 slot;
- u8 reserved_1[4];
- } device_id;
-
- struct {
- u32 lower_dw;
- u32 upper_dw;
- } dev_serial_num;
-
- u8 capability[60];
- u16 dvsec_len;
- u16 err_len;
- u8 reserved_2[4];
-};
-
-#pragma pack()
-
-void cper_print_prot_err(const char *pfx, const struct cper_sec_prot_err *prot_err);
-
-#endif //__CPER_CXL_
diff --git a/drivers/gpu/drm/i915/i915_iosf_mbi.h b/drivers/gpu/drm/i915/i915_iosf_mbi.h
index 8f81b7603d37..317075d0da4e 100644
--- a/drivers/gpu/drm/i915/i915_iosf_mbi.h
+++ b/drivers/gpu/drm/i915/i915_iosf_mbi.h
@@ -31,12 +31,6 @@ iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(struct notifier_block *nb)
{
return 0;
}
-
-static inline
-int iosf_mbi_unregister_pmic_bus_access_notifier(struct notifier_block *nb)
-{
- return 0;
-}
#endif
#endif /* __I915_IOSF_MBI_H__ */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index 1f15990d3934..1d9a42cbc88f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -289,7 +289,7 @@ static int vmw_send_msg(struct rpc_channel *channel, const char *msg)
return -EINVAL;
}
-STACK_FRAME_NON_STANDARD(vmw_send_msg);
+STACK_FRAME_NON_STANDARD_FP(vmw_send_msg);
/**
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index b27791029fa9..b9f4a2937c3a 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -55,6 +55,7 @@ static int qibfs_mknod(struct inode *dir, struct dentry *dentry,
struct inode *inode = new_inode(dir->i_sb);
if (!inode) {
+ dput(dentry);
error = -EPERM;
goto bail;
}
diff --git a/drivers/input/mouse/cyapa.c b/drivers/input/mouse/cyapa.c
index 2f2d925a55d7..00c87c0532a6 100644
--- a/drivers/input/mouse/cyapa.c
+++ b/drivers/input/mouse/cyapa.c
@@ -1080,8 +1080,8 @@ static ssize_t cyapa_update_fw_store(struct device *dev,
char fw_name[NAME_MAX];
int ret, error;
- if (count >= NAME_MAX) {
- dev_err(dev, "File name too long\n");
+ if (!count || count >= NAME_MAX) {
+ dev_err(dev, "Bad file name size\n");
return -EINVAL;
}
diff --git a/drivers/input/touchscreen/tsc2007.h b/drivers/input/touchscreen/tsc2007.h
index 69b08dd6c8df..e346fb4f7552 100644
--- a/drivers/input/touchscreen/tsc2007.h
+++ b/drivers/input/touchscreen/tsc2007.h
@@ -19,6 +19,7 @@
#ifndef _TSC2007_H
#define _TSC2007_H
+#include <linux/input/touchscreen.h>
struct gpio_desc;
#define TSC2007_MEASURE_TEMP0 (0x0 << 4)
@@ -63,6 +64,7 @@ struct tsc2007 {
struct i2c_client *client;
+ struct touchscreen_properties prop;
u16 model;
u16 x_plate_ohms;
u16 max_rt;
diff --git a/drivers/input/touchscreen/tsc2007_core.c b/drivers/input/touchscreen/tsc2007_core.c
index 8d832a372b89..5252301686ec 100644
--- a/drivers/input/touchscreen/tsc2007_core.c
+++ b/drivers/input/touchscreen/tsc2007_core.c
@@ -142,8 +142,7 @@ static irqreturn_t tsc2007_soft_irq(int irq, void *handle)
rt = ts->max_rt - rt;
input_report_key(input, BTN_TOUCH, 1);
- input_report_abs(input, ABS_X, tc.x);
- input_report_abs(input, ABS_Y, tc.y);
+ touchscreen_report_pos(input, &ts->prop, tc.x, tc.y, false);
input_report_abs(input, ABS_PRESSURE, rt);
input_sync(input);
@@ -339,9 +338,9 @@ static int tsc2007_probe(struct i2c_client *client)
input_set_drvdata(input_dev, ts);
input_set_capability(input_dev, EV_KEY, BTN_TOUCH);
-
input_set_abs_params(input_dev, ABS_X, 0, MAX_12BIT, ts->fuzzx, 0);
input_set_abs_params(input_dev, ABS_Y, 0, MAX_12BIT, ts->fuzzy, 0);
+ touchscreen_parse_properties(input_dev, false, &ts->prop);
input_set_abs_params(input_dev, ABS_PRESSURE, 0, MAX_12BIT,
ts->fuzzz, 0);
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 0b1870a09e1f..06f809e70f15 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -267,6 +267,7 @@ config DM_CRYPT
depends on BLK_DEV_DM
depends on (ENCRYPTED_KEYS || ENCRYPTED_KEYS=n)
depends on (TRUSTED_KEYS || TRUSTED_KEYS=n)
+ select CRC32
select CRYPTO
select CRYPTO_CBC
select CRYPTO_ESSIV
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index aab8240429b0..9c8ed65cd87e 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -2234,7 +2234,7 @@ int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t c
}
EXPORT_SYMBOL_GPL(dm_bufio_issue_discard);
-static bool forget_buffer(struct dm_bufio_client *c, sector_t block)
+static void forget_buffer(struct dm_bufio_client *c, sector_t block)
{
struct dm_buffer *b;
@@ -2249,8 +2249,6 @@ static bool forget_buffer(struct dm_bufio_client *c, sector_t block)
cache_put_and_wake(c, b);
}
}
-
- return b ? true : false;
}
/*
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 9cb797a561d6..a10d75a562db 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -406,6 +406,12 @@ struct cache {
mempool_t migration_pool;
struct bio_set bs;
+
+ /*
+ * Cache_size entries. Set bits indicate blocks mapped beyond the
+ * target length, which are marked for invalidation.
+ */
+ unsigned long *invalid_bitset;
};
struct per_bio_data {
@@ -1922,6 +1928,9 @@ static void __destroy(struct cache *cache)
if (cache->discard_bitset)
free_bitset(cache->discard_bitset);
+ if (cache->invalid_bitset)
+ free_bitset(cache->invalid_bitset);
+
if (cache->copier)
dm_kcopyd_client_destroy(cache->copier);
@@ -2510,6 +2519,13 @@ static int cache_create(struct cache_args *ca, struct cache **result)
}
clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
+ cache->invalid_bitset = alloc_bitset(from_cblock(cache->cache_size));
+ if (!cache->invalid_bitset) {
+ *error = "could not allocate bitset for invalid blocks";
+ goto bad;
+ }
+ clear_bitset(cache->invalid_bitset, from_cblock(cache->cache_size));
+
cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
if (IS_ERR(cache->copier)) {
*error = "could not create kcopyd client";
@@ -2808,6 +2824,24 @@ static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
return policy_load_mapping(cache->policy, oblock, cblock, dirty, hint, hint_valid);
}
+static int load_filtered_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
+ bool dirty, uint32_t hint, bool hint_valid)
+{
+ struct cache *cache = context;
+
+ if (from_oblock(oblock) >= from_oblock(cache->origin_blocks)) {
+ if (dirty) {
+ DMERR("%s: unable to shrink origin; cache block %u is dirty",
+ cache_device_name(cache), from_cblock(cblock));
+ return -EFBIG;
+ }
+ set_bit(from_cblock(cblock), cache->invalid_bitset);
+ return 0;
+ }
+
+ return load_mapping(context, oblock, cblock, dirty, hint, hint_valid);
+}
+
/*
* The discard block size in the on disk metadata is not
* necessarily the same as we're currently using. So we have to
@@ -2899,6 +2933,27 @@ static dm_cblock_t get_cache_dev_size(struct cache *cache)
return to_cblock(size);
}
+static bool can_resume(struct cache *cache)
+{
+ /*
+ * Disallow retrying the resume operation for devices that failed the
+ * first resume attempt, as the failure leaves the policy object partially
+ * initialized. Retrying could trigger BUG_ON when loading cache mappings
+ * into the incomplete policy object.
+ */
+ if (cache->sized && !cache->loaded_mappings) {
+ if (get_cache_mode(cache) != CM_WRITE)
+ DMERR("%s: unable to resume a failed-loaded cache, please check metadata.",
+ cache_device_name(cache));
+ else
+ DMERR("%s: unable to resume cache due to missing proper cache table reload",
+ cache_device_name(cache));
+ return false;
+ }
+
+ return true;
+}
+
static bool can_resize(struct cache *cache, dm_cblock_t new_size)
{
if (from_cblock(new_size) > from_cblock(cache->cache_size)) {
@@ -2941,12 +2996,33 @@ static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size)
return 0;
}
+static int truncate_oblocks(struct cache *cache)
+{
+ uint32_t nr_blocks = from_cblock(cache->cache_size);
+ uint32_t i;
+ int r;
+
+ for_each_set_bit(i, cache->invalid_bitset, nr_blocks) {
+ r = dm_cache_remove_mapping(cache->cmd, to_cblock(i));
+ if (r) {
+ DMERR_LIMIT("%s: invalidation failed; couldn't update on disk metadata",
+ cache_device_name(cache));
+ return r;
+ }
+ }
+
+ return 0;
+}
+
static int cache_preresume(struct dm_target *ti)
{
int r = 0;
struct cache *cache = ti->private;
dm_cblock_t csize = get_cache_dev_size(cache);
+ if (!can_resume(cache))
+ return -EINVAL;
+
/*
* Check to see if the cache has resized.
*/
@@ -2962,11 +3038,25 @@ static int cache_preresume(struct dm_target *ti)
}
if (!cache->loaded_mappings) {
+ /*
+ * The fast device could have been resized since the last
+ * failed preresume attempt. To be safe we start by a blank
+ * bitset for cache blocks.
+ */
+ clear_bitset(cache->invalid_bitset, from_cblock(cache->cache_size));
+
r = dm_cache_load_mappings(cache->cmd, cache->policy,
- load_mapping, cache);
+ load_filtered_mapping, cache);
if (r) {
DMERR("%s: could not load cache mappings", cache_device_name(cache));
- metadata_operation_failed(cache, "dm_cache_load_mappings", r);
+ if (r != -EFBIG)
+ metadata_operation_failed(cache, "dm_cache_load_mappings", r);
+ return r;
+ }
+
+ r = truncate_oblocks(cache);
+ if (r) {
+ metadata_operation_failed(cache, "dm_cache_remove_mapping", r);
return r;
}
@@ -3426,7 +3516,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type cache_target = {
.name = "cache",
- .version = {2, 2, 0},
+ .version = {2, 3, 0},
.module = THIS_MODULE,
.ctr = cache_ctr,
.dtr = cache_dtr,
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 02a2919f4e5a..9dfdb63220d7 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -17,6 +17,7 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/blk-integrity.h>
+#include <linux/crc32.h>
#include <linux/mempool.h>
#include <linux/slab.h>
#include <linux/crypto.h>
@@ -125,7 +126,6 @@ struct iv_lmk_private {
#define TCW_WHITENING_SIZE 16
struct iv_tcw_private {
- struct crypto_shash *crc32_tfm;
u8 *iv_seed;
u8 *whitening;
};
@@ -607,10 +607,6 @@ static void crypt_iv_tcw_dtr(struct crypt_config *cc)
tcw->iv_seed = NULL;
kfree_sensitive(tcw->whitening);
tcw->whitening = NULL;
-
- if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm))
- crypto_free_shash(tcw->crc32_tfm);
- tcw->crc32_tfm = NULL;
}
static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
@@ -628,13 +624,6 @@ static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
return -EINVAL;
}
- tcw->crc32_tfm = crypto_alloc_shash("crc32", 0,
- CRYPTO_ALG_ALLOCATES_MEMORY);
- if (IS_ERR(tcw->crc32_tfm)) {
- ti->error = "Error initializing CRC32 in TCW";
- return PTR_ERR(tcw->crc32_tfm);
- }
-
tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL);
tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL);
if (!tcw->iv_seed || !tcw->whitening) {
@@ -668,36 +657,28 @@ static int crypt_iv_tcw_wipe(struct crypt_config *cc)
return 0;
}
-static int crypt_iv_tcw_whitening(struct crypt_config *cc,
- struct dm_crypt_request *dmreq,
- u8 *data)
+static void crypt_iv_tcw_whitening(struct crypt_config *cc,
+ struct dm_crypt_request *dmreq, u8 *data)
{
struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
__le64 sector = cpu_to_le64(dmreq->iv_sector);
u8 buf[TCW_WHITENING_SIZE];
- SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm);
- int i, r;
+ int i;
/* xor whitening with sector number */
crypto_xor_cpy(buf, tcw->whitening, (u8 *)&sector, 8);
crypto_xor_cpy(&buf[8], tcw->whitening + 8, (u8 *)&sector, 8);
/* calculate crc32 for every 32bit part and xor it */
- desc->tfm = tcw->crc32_tfm;
- for (i = 0; i < 4; i++) {
- r = crypto_shash_digest(desc, &buf[i * 4], 4, &buf[i * 4]);
- if (r)
- goto out;
- }
+ for (i = 0; i < 4; i++)
+ put_unaligned_le32(crc32(0, &buf[i * 4], 4), &buf[i * 4]);
crypto_xor(&buf[0], &buf[12], 4);
crypto_xor(&buf[4], &buf[8], 4);
/* apply whitening (8 bytes) to whole sector */
for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++)
crypto_xor(data + i * 8, buf, 8);
-out:
memzero_explicit(buf, sizeof(buf));
- return r;
}
static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
@@ -707,13 +688,12 @@ static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
__le64 sector = cpu_to_le64(dmreq->iv_sector);
u8 *src;
- int r = 0;
/* Remove whitening from ciphertext */
if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
sg = crypt_get_sg_data(cc, dmreq->sg_in);
src = kmap_local_page(sg_page(sg));
- r = crypt_iv_tcw_whitening(cc, dmreq, src + sg->offset);
+ crypt_iv_tcw_whitening(cc, dmreq, src + sg->offset);
kunmap_local(src);
}
@@ -723,7 +703,7 @@ static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
crypto_xor_cpy(&iv[8], tcw->iv_seed + 8, (u8 *)&sector,
cc->iv_size - 8);
- return r;
+ return 0;
}
static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
@@ -731,7 +711,6 @@ static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
{
struct scatterlist *sg;
u8 *dst;
- int r;
if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
return 0;
@@ -739,10 +718,10 @@ static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
/* Apply whitening on ciphertext */
sg = crypt_get_sg_data(cc, dmreq->sg_out);
dst = kmap_local_page(sg_page(sg));
- r = crypt_iv_tcw_whitening(cc, dmreq, dst + sg->offset);
+ crypt_iv_tcw_whitening(cc, dmreq, dst + sg->offset);
kunmap_local(dst);
- return r;
+ return 0;
}
static int crypt_iv_random_gen(struct crypt_config *cc, u8 *iv,
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index 08f6387620c1..d4cf0ac2a7aa 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -369,6 +369,21 @@ static int delay_map(struct dm_target *ti, struct bio *bio)
return delay_bio(dc, c, bio);
}
+#ifdef CONFIG_BLK_DEV_ZONED
+static int delay_report_zones(struct dm_target *ti,
+ struct dm_report_zones_args *args, unsigned int nr_zones)
+{
+ struct delay_c *dc = ti->private;
+ struct delay_class *c = &dc->read;
+
+ return dm_report_zones(c->dev->bdev, c->start,
+ c->start + dm_target_offset(ti, args->next_sector),
+ args, nr_zones);
+}
+#else
+#define delay_report_zones NULL
+#endif
+
#define DMEMIT_DELAY_CLASS(c) \
DMEMIT("%s %llu %u", (c)->dev->name, (unsigned long long)(c)->start, (c)->delay)
@@ -424,11 +439,12 @@ out:
static struct target_type delay_target = {
.name = "delay",
.version = {1, 4, 0},
- .features = DM_TARGET_PASSES_INTEGRITY,
+ .features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ZONED_HM,
.module = THIS_MODULE,
.ctr = delay_ctr,
.dtr = delay_dtr,
.map = delay_map,
+ .report_zones = delay_report_zones,
.presuspend = delay_presuspend,
.resume = delay_resume,
.status = delay_status,
diff --git a/drivers/md/dm-ebs-target.c b/drivers/md/dm-ebs-target.c
index 18ae45dcbfb2..b19b0142a690 100644
--- a/drivers/md/dm-ebs-target.c
+++ b/drivers/md/dm-ebs-target.c
@@ -390,6 +390,12 @@ static int ebs_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED;
}
+static void ebs_postsuspend(struct dm_target *ti)
+{
+ struct ebs_c *ec = ti->private;
+ dm_bufio_client_reset(ec->bufio);
+}
+
static void ebs_status(struct dm_target *ti, status_type_t type,
unsigned int status_flags, char *result, unsigned int maxlen)
{
@@ -447,6 +453,7 @@ static struct target_type ebs_target = {
.ctr = ebs_ctr,
.dtr = ebs_dtr,
.map = ebs_map,
+ .postsuspend = ebs_postsuspend,
.status = ebs_status,
.io_hints = ebs_io_hints,
.prepare_ioctl = ebs_prepare_ioctl,
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index c8c1a00e7d80..8b219b1199b4 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -21,6 +21,7 @@
#include <linux/reboot.h>
#include <crypto/hash.h>
#include <crypto/skcipher.h>
+#include <crypto/utils.h>
#include <linux/async_tx.h>
#include <linux/dm-bufio.h>
@@ -516,7 +517,7 @@ static int sb_mac(struct dm_integrity_c *ic, bool wr)
dm_integrity_io_error(ic, "crypto_shash_digest", r);
return r;
}
- if (memcmp(mac, actual_mac, mac_size)) {
+ if (crypto_memneq(mac, actual_mac, mac_size)) {
dm_integrity_io_error(ic, "superblock mac", -EILSEQ);
dm_audit_log_target(DM_MSG_PREFIX, "mac-superblock", ic->ti, 0);
return -EILSEQ;
@@ -859,7 +860,7 @@ static void rw_section_mac(struct dm_integrity_c *ic, unsigned int section, bool
if (likely(wr))
memcpy(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR);
else {
- if (memcmp(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR)) {
+ if (crypto_memneq(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR)) {
dm_integrity_io_error(ic, "journal mac", -EILSEQ);
dm_audit_log_target(DM_MSG_PREFIX, "mac-journal", ic->ti, 0);
}
@@ -1401,10 +1402,9 @@ static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_
static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block,
unsigned int *metadata_offset, unsigned int total_size, int op)
{
-#define MAY_BE_FILLER 1
-#define MAY_BE_HASH 2
unsigned int hash_offset = 0;
- unsigned int may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
+ unsigned char mismatch_hash = 0;
+ unsigned char mismatch_filler = !ic->discard;
do {
unsigned char *data, *dp;
@@ -1425,7 +1425,7 @@ static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, se
if (op == TAG_READ) {
memcpy(tag, dp, to_copy);
} else if (op == TAG_WRITE) {
- if (memcmp(dp, tag, to_copy)) {
+ if (crypto_memneq(dp, tag, to_copy)) {
memcpy(dp, tag, to_copy);
dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy);
}
@@ -1433,29 +1433,30 @@ static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, se
/* e.g.: op == TAG_CMP */
if (likely(is_power_of_2(ic->tag_size))) {
- if (unlikely(memcmp(dp, tag, to_copy)))
- if (unlikely(!ic->discard) ||
- unlikely(memchr_inv(dp, DISCARD_FILLER, to_copy) != NULL)) {
- goto thorough_test;
- }
+ if (unlikely(crypto_memneq(dp, tag, to_copy)))
+ goto thorough_test;
} else {
unsigned int i, ts;
thorough_test:
ts = total_size;
for (i = 0; i < to_copy; i++, ts--) {
- if (unlikely(dp[i] != tag[i]))
- may_be &= ~MAY_BE_HASH;
- if (likely(dp[i] != DISCARD_FILLER))
- may_be &= ~MAY_BE_FILLER;
+ /*
+ * Warning: the control flow must not be
+ * dependent on match/mismatch of
+ * individual bytes.
+ */
+ mismatch_hash |= dp[i] ^ tag[i];
+ mismatch_filler |= dp[i] ^ DISCARD_FILLER;
hash_offset++;
if (unlikely(hash_offset == ic->tag_size)) {
- if (unlikely(!may_be)) {
+ if (unlikely(mismatch_hash) && unlikely(mismatch_filler)) {
dm_bufio_release(b);
return ts;
}
hash_offset = 0;
- may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
+ mismatch_hash = 0;
+ mismatch_filler = !ic->discard;
}
}
}
@@ -1476,8 +1477,6 @@ thorough_test:
} while (unlikely(total_size));
return 0;
-#undef MAY_BE_FILLER
-#undef MAY_BE_HASH
}
struct flush_request {
@@ -2076,7 +2075,7 @@ retry_kmap:
char checksums_onstack[MAX_T(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
- if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
+ if (unlikely(crypto_memneq(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx",
logical_sector);
dm_audit_log_bio(DM_MSG_PREFIX, "journal-checksum",
@@ -2595,7 +2594,7 @@ static void dm_integrity_inline_recheck(struct work_struct *w)
bio_put(outgoing_bio);
integrity_sector_checksum(ic, dio->bio_details.bi_iter.bi_sector, outgoing_data, digest);
- if (unlikely(memcmp(digest, dio->integrity_payload, min(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)))) {
+ if (unlikely(crypto_memneq(digest, dio->integrity_payload, min(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)))) {
DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx",
ic->dev->bdev, dio->bio_details.bi_iter.bi_sector);
atomic64_inc(&ic->number_of_mismatches);
@@ -2634,7 +2633,7 @@ static int dm_integrity_end_io(struct dm_target *ti, struct bio *bio, blk_status
char *mem = bvec_kmap_local(&bv);
//memset(mem, 0xff, ic->sectors_per_block << SECTOR_SHIFT);
integrity_sector_checksum(ic, dio->bio_details.bi_iter.bi_sector, mem, digest);
- if (unlikely(memcmp(digest, dio->integrity_payload + pos,
+ if (unlikely(crypto_memneq(digest, dio->integrity_payload + pos,
min(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)))) {
kunmap_local(mem);
dm_integrity_free_payload(dio);
@@ -2911,7 +2910,7 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned int write_start
integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
(char *)access_journal_data(ic, i, l), test_tag);
- if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size))) {
+ if (unlikely(crypto_memneq(test_tag, journal_entry_tag(ic, je2), ic->tag_size))) {
dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ);
dm_audit_log_target(DM_MSG_PREFIX, "integrity-replay-journal", ic->ti, 0);
}
@@ -5072,16 +5071,19 @@ try_smaller_buffer:
ic->recalc_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
if (!ic->recalc_bitmap) {
+ ti->error = "Could not allocate memory for bitmap";
r = -ENOMEM;
goto bad;
}
ic->may_write_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
if (!ic->may_write_bitmap) {
+ ti->error = "Could not allocate memory for bitmap";
r = -ENOMEM;
goto bad;
}
ic->bbs = kvmalloc_array(ic->n_bitmap_blocks, sizeof(struct bitmap_block_status), GFP_KERNEL);
if (!ic->bbs) {
+ ti->error = "Could not allocate memory for bitmap";
r = -ENOMEM;
goto bad;
}
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 3786ac67cefe..a1b7535c508a 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -467,7 +467,7 @@ static struct target_type stripe_target = {
.name = "striped",
.version = {1, 7, 0},
.features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_NOWAIT |
- DM_TARGET_ATOMIC_WRITES,
+ DM_TARGET_ATOMIC_WRITES | DM_TARGET_PASSES_CRYPTO,
.module = THIS_MODULE,
.ctr = stripe_ctr,
.dtr = stripe_dtr,
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 453803f1edf5..35100a435c88 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -697,6 +697,10 @@ int dm_table_add_target(struct dm_table *t, const char *type,
DMERR("%s: zero-length target", dm_device_name(t->md));
return -EINVAL;
}
+ if (start + len < start || start + len > LLONG_MAX >> SECTOR_SHIFT) {
+ DMERR("%s: too large device", dm_device_name(t->md));
+ return -EINVAL;
+ }
ti->type = dm_get_target_type(type);
if (!ti->type) {
diff --git a/drivers/md/dm-vdo/block-map.c b/drivers/md/dm-vdo/block-map.c
index 89cb7942ec5c..baf683cabb1b 100644
--- a/drivers/md/dm-vdo/block-map.c
+++ b/drivers/md/dm-vdo/block-map.c
@@ -451,7 +451,7 @@ static struct page_info * __must_check find_page(struct vdo_page_cache *cache,
* select_lru_page() - Determine which page is least recently used.
*
* Picks the least recently used from among the non-busy entries at the front of each of the lru
- * ring. Since whenever we mark a page busy we also put it to the end of the ring it is unlikely
+ * list. Since whenever we mark a page busy we also put it to the end of the list it is unlikely
* that the entries at the front are busy unless the queue is very short, but not impossible.
*
* Return: A pointer to the info structure for a relevant page, or NULL if no such page can be
@@ -1544,7 +1544,7 @@ static void write_page_if_not_dirtied(struct vdo_waiter *waiter, void *context)
static void return_to_pool(struct block_map_zone *zone, struct pooled_vio *vio)
{
- return_vio_to_pool(zone->vio_pool, vio);
+ return_vio_to_pool(vio);
check_for_drain_complete(zone);
}
@@ -1837,7 +1837,7 @@ static void finish_block_map_page_load(struct vdo_completion *completion)
if (!vdo_copy_valid_page(vio->data, nonce, pbn, page))
vdo_format_block_map_page(page, nonce, pbn, false);
- return_vio_to_pool(zone->vio_pool, pooled);
+ return_vio_to_pool(pooled);
/* Release our claim to the load and wake any waiters */
release_page_lock(data_vio, "load");
@@ -1851,10 +1851,9 @@ static void handle_io_error(struct vdo_completion *completion)
struct vio *vio = as_vio(completion);
struct pooled_vio *pooled = container_of(vio, struct pooled_vio, vio);
struct data_vio *data_vio = completion->parent;
- struct block_map_zone *zone = pooled->context;
vio_record_metadata_io_error(vio);
- return_vio_to_pool(zone->vio_pool, pooled);
+ return_vio_to_pool(pooled);
abort_load(data_vio, result);
}
@@ -2499,7 +2498,7 @@ static void finish_cursor(struct cursor *cursor)
struct cursors *cursors = cursor->parent;
struct vdo_completion *completion = cursors->completion;
- return_vio_to_pool(cursors->pool, vdo_forget(cursor->vio));
+ return_vio_to_pool(vdo_forget(cursor->vio));
if (--cursors->active_roots > 0)
return;
@@ -2746,7 +2745,7 @@ static int __must_check initialize_block_map_zone(struct block_map *map,
if (result != VDO_SUCCESS)
return result;
- result = make_vio_pool(vdo, BLOCK_MAP_VIO_POOL_SIZE,
+ result = make_vio_pool(vdo, BLOCK_MAP_VIO_POOL_SIZE, 1,
zone->thread_id, VIO_TYPE_BLOCK_MAP_INTERIOR,
VIO_PRIORITY_METADATA, zone, &zone->vio_pool);
if (result != VDO_SUCCESS)
diff --git a/drivers/md/dm-vdo/constants.h b/drivers/md/dm-vdo/constants.h
index a8c4d6e24b38..2a8b03779f87 100644
--- a/drivers/md/dm-vdo/constants.h
+++ b/drivers/md/dm-vdo/constants.h
@@ -44,9 +44,6 @@ enum {
/* The default size of each slab journal, in blocks */
DEFAULT_VDO_SLAB_JOURNAL_SIZE = 224,
- /* Unit test minimum */
- MINIMUM_VDO_SLAB_JOURNAL_BLOCKS = 2,
-
/*
* The initial size of lbn_operations and pbn_operations, which is based upon the expected
* maximum number of outstanding VIOs. This value was chosen to make it highly unlikely
diff --git a/drivers/md/dm-vdo/dedupe.c b/drivers/md/dm-vdo/dedupe.c
index 3f3d29af1be4..5c49d49e023c 100644
--- a/drivers/md/dm-vdo/dedupe.c
+++ b/drivers/md/dm-vdo/dedupe.c
@@ -226,7 +226,7 @@ struct hash_lock {
* A list containing the data VIOs sharing this lock, all having the same record name and
* data block contents, linked by their hash_lock_node fields.
*/
- struct list_head duplicate_ring;
+ struct list_head duplicate_vios;
/* The number of data_vios sharing this lock instance */
data_vio_count_t reference_count;
@@ -343,7 +343,7 @@ static void return_hash_lock_to_pool(struct hash_zone *zone, struct hash_lock *l
{
memset(lock, 0, sizeof(*lock));
INIT_LIST_HEAD(&lock->pool_node);
- INIT_LIST_HEAD(&lock->duplicate_ring);
+ INIT_LIST_HEAD(&lock->duplicate_vios);
vdo_waitq_init(&lock->waiters);
list_add_tail(&lock->pool_node, &zone->lock_pool);
}
@@ -441,7 +441,7 @@ static void set_hash_lock(struct data_vio *data_vio, struct hash_lock *new_lock)
VDO_ASSERT_LOG_ONLY(data_vio->hash_zone != NULL,
"must have a hash zone when holding a hash lock");
VDO_ASSERT_LOG_ONLY(!list_empty(&data_vio->hash_lock_entry),
- "must be on a hash lock ring when holding a hash lock");
+ "must be on a hash lock list when holding a hash lock");
VDO_ASSERT_LOG_ONLY(old_lock->reference_count > 0,
"hash lock reference must be counted");
@@ -464,10 +464,10 @@ static void set_hash_lock(struct data_vio *data_vio, struct hash_lock *new_lock)
if (new_lock != NULL) {
/*
- * Keep all data_vios sharing the lock on a ring since they can complete in any
+ * Keep all data_vios sharing the lock on a list since they can complete in any
* order and we'll always need a pointer to one to compare data.
*/
- list_move_tail(&data_vio->hash_lock_entry, &new_lock->duplicate_ring);
+ list_move_tail(&data_vio->hash_lock_entry, &new_lock->duplicate_vios);
new_lock->reference_count += 1;
if (new_lock->max_references < new_lock->reference_count)
new_lock->max_references = new_lock->reference_count;
@@ -1789,10 +1789,10 @@ static bool is_hash_collision(struct hash_lock *lock, struct data_vio *candidate
struct hash_zone *zone;
bool collides;
- if (list_empty(&lock->duplicate_ring))
+ if (list_empty(&lock->duplicate_vios))
return false;
- lock_holder = list_first_entry(&lock->duplicate_ring, struct data_vio,
+ lock_holder = list_first_entry(&lock->duplicate_vios, struct data_vio,
hash_lock_entry);
zone = candidate->hash_zone;
collides = !blocks_equal(lock_holder->vio.data, candidate->vio.data);
@@ -1815,7 +1815,7 @@ static inline int assert_hash_lock_preconditions(const struct data_vio *data_vio
return result;
result = VDO_ASSERT(list_empty(&data_vio->hash_lock_entry),
- "must not already be a member of a hash lock ring");
+ "must not already be a member of a hash lock list");
if (result != VDO_SUCCESS)
return result;
@@ -1942,8 +1942,8 @@ void vdo_release_hash_lock(struct data_vio *data_vio)
"returned hash lock must not be in use with state %s",
get_hash_lock_state_name(lock->state));
VDO_ASSERT_LOG_ONLY(list_empty(&lock->pool_node),
- "hash lock returned to zone must not be in a pool ring");
- VDO_ASSERT_LOG_ONLY(list_empty(&lock->duplicate_ring),
+ "hash lock returned to zone must not be in a pool list");
+ VDO_ASSERT_LOG_ONLY(list_empty(&lock->duplicate_vios),
"hash lock returned to zone must not reference DataVIOs");
return_hash_lock_to_pool(zone, lock);
diff --git a/drivers/md/dm-vdo/encodings.c b/drivers/md/dm-vdo/encodings.c
index 100e92f8f866..b7cc0f41caca 100644
--- a/drivers/md/dm-vdo/encodings.c
+++ b/drivers/md/dm-vdo/encodings.c
@@ -711,24 +711,11 @@ int vdo_configure_slab(block_count_t slab_size, block_count_t slab_journal_block
ref_blocks = vdo_get_saved_reference_count_size(slab_size - slab_journal_blocks);
meta_blocks = (ref_blocks + slab_journal_blocks);
- /* Make sure test code hasn't configured slabs to be too small. */
+ /* Make sure configured slabs are not too small. */
if (meta_blocks >= slab_size)
return VDO_BAD_CONFIGURATION;
- /*
- * If the slab size is very small, assume this must be a unit test and override the number
- * of data blocks to be a power of two (wasting blocks in the slab). Many tests need their
- * data_blocks fields to be the exact capacity of the configured volume, and that used to
- * fall out since they use a power of two for the number of data blocks, the slab size was
- * a power of two, and every block in a slab was a data block.
- *
- * TODO: Try to figure out some way of structuring testParameters and unit tests so this
- * hack isn't needed without having to edit several unit tests every time the metadata size
- * changes by one block.
- */
data_blocks = slab_size - meta_blocks;
- if ((slab_size < 1024) && !is_power_of_2(data_blocks))
- data_blocks = ((block_count_t) 1 << ilog2(data_blocks));
/*
* Configure the slab journal thresholds. The flush threshold is 168 of 224 blocks in
@@ -1221,11 +1208,6 @@ int vdo_validate_config(const struct vdo_config *config,
if (result != VDO_SUCCESS)
return result;
- result = VDO_ASSERT(config->slab_journal_blocks >= MINIMUM_VDO_SLAB_JOURNAL_BLOCKS,
- "slab journal size meets minimum size");
- if (result != VDO_SUCCESS)
- return result;
-
result = VDO_ASSERT(config->slab_journal_blocks <= config->slab_size,
"slab journal size is within expected bound");
if (result != VDO_SUCCESS)
diff --git a/drivers/md/dm-vdo/indexer/index-layout.c b/drivers/md/dm-vdo/indexer/index-layout.c
index af8fab83b0f3..61edf2b72427 100644
--- a/drivers/md/dm-vdo/indexer/index-layout.c
+++ b/drivers/md/dm-vdo/indexer/index-layout.c
@@ -54,7 +54,6 @@
* Each save also has a unique nonce.
*/
-#define MAGIC_SIZE 32
#define NONCE_INFO_SIZE 32
#define MAX_SAVES 2
@@ -98,9 +97,11 @@ enum region_type {
#define SUPER_VERSION_CURRENT 3
#define SUPER_VERSION_MAXIMUM 7
-static const u8 LAYOUT_MAGIC[MAGIC_SIZE] = "*ALBIREO*SINGLE*FILE*LAYOUT*001*";
+static const u8 LAYOUT_MAGIC[] = "*ALBIREO*SINGLE*FILE*LAYOUT*001*";
static const u64 REGION_MAGIC = 0x416c6252676e3031; /* 'AlbRgn01' */
+#define MAGIC_SIZE (sizeof(LAYOUT_MAGIC) - 1)
+
struct region_header {
u64 magic;
u64 region_blocks;
diff --git a/drivers/md/dm-vdo/indexer/index-session.c b/drivers/md/dm-vdo/indexer/index-session.c
index aee0914d604a..aa575a24e0b2 100644
--- a/drivers/md/dm-vdo/indexer/index-session.c
+++ b/drivers/md/dm-vdo/indexer/index-session.c
@@ -100,7 +100,6 @@ static int get_index_session(struct uds_index_session *index_session)
int uds_launch_request(struct uds_request *request)
{
- size_t internal_size;
int result;
if (request->callback == NULL) {
@@ -121,10 +120,7 @@ int uds_launch_request(struct uds_request *request)
}
/* Reset all internal fields before processing. */
- internal_size =
- sizeof(struct uds_request) - offsetof(struct uds_request, zone_number);
- // FIXME should be using struct_group for this instead
- memset((char *) request + sizeof(*request) - internal_size, 0, internal_size);
+ memset(&request->internal, 0, sizeof(request->internal));
result = get_index_session(request->session);
if (result != UDS_SUCCESS)
diff --git a/drivers/md/dm-vdo/indexer/indexer.h b/drivers/md/dm-vdo/indexer/indexer.h
index 183a94eb7e92..7c1fc4577f5b 100644
--- a/drivers/md/dm-vdo/indexer/indexer.h
+++ b/drivers/md/dm-vdo/indexer/indexer.h
@@ -8,6 +8,7 @@
#include <linux/mutex.h>
#include <linux/sched.h>
+#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/wait.h>
@@ -73,7 +74,7 @@ enum uds_request_type {
/* Remove any mapping for a name. */
UDS_DELETE,
-};
+} __packed;
enum uds_open_index_type {
/* Create a new index. */
@@ -226,7 +227,7 @@ struct uds_zone_message {
enum uds_zone_message_type type;
/* The virtual chapter number to which the message applies */
u64 virtual_chapter;
-};
+} __packed;
struct uds_index_session;
struct uds_index;
@@ -253,34 +254,32 @@ struct uds_request {
/* The existing data associated with the request name, if any */
struct uds_record_data old_metadata;
- /* Either UDS_SUCCESS or an error code for the request */
- int status;
/* True if the record name had an existing entry in the index */
bool found;
+ /* Either UDS_SUCCESS or an error code for the request */
+ int status;
- /*
- * The remaining fields are used internally and should not be altered by clients. The index
- * relies on zone_number being the first field in this section.
- */
-
- /* The number of the zone which will process this request*/
- unsigned int zone_number;
- /* A link for adding a request to a lock-free queue */
- struct funnel_queue_entry queue_link;
- /* A link for adding a request to a standard linked list */
- struct uds_request *next_request;
- /* A pointer to the index processing this request */
- struct uds_index *index;
- /* Control message for coordinating between zones */
- struct uds_zone_message zone_message;
- /* If true, process request immediately by waking the worker thread */
- bool unbatched;
- /* If true, continue this request before processing newer requests */
- bool requeued;
- /* The virtual chapter containing the record name, if known */
- u64 virtual_chapter;
- /* The region of the index containing the record name */
- enum uds_index_region location;
+ /* The remaining fields are used internally and should not be altered by clients. */
+ struct_group(internal,
+ /* The virtual chapter containing the record name, if known */
+ u64 virtual_chapter;
+ /* The region of the index containing the record name */
+ enum uds_index_region location;
+ /* If true, process request immediately by waking the worker thread */
+ bool unbatched;
+ /* If true, continue this request before processing newer requests */
+ bool requeued;
+ /* Control message for coordinating between zones */
+ struct uds_zone_message zone_message;
+ /* The number of the zone which will process this request*/
+ unsigned int zone_number;
+ /* A link for adding a request to a lock-free queue */
+ struct funnel_queue_entry queue_link;
+ /* A link for adding a request to a standard linked list */
+ struct uds_request *next_request;
+ /* A pointer to the index processing this request */
+ struct uds_index *index;
+ );
};
/* A session is required for most index operations. */
diff --git a/drivers/md/dm-vdo/io-submitter.c b/drivers/md/dm-vdo/io-submitter.c
index 421e5436c32c..11d47770b54d 100644
--- a/drivers/md/dm-vdo/io-submitter.c
+++ b/drivers/md/dm-vdo/io-submitter.c
@@ -327,6 +327,7 @@ void vdo_submit_data_vio(struct data_vio *data_vio)
* @error_handler: the handler for submission or I/O errors (may be NULL)
* @operation: the type of I/O to perform
* @data: the buffer to read or write (may be NULL)
+ * @size: the I/O amount in bytes
*
* The vio is enqueued on a vdo bio queue so that bio submission (which may block) does not block
* other vdo threads.
@@ -338,7 +339,7 @@ void vdo_submit_data_vio(struct data_vio *data_vio)
*/
void __submit_metadata_vio(struct vio *vio, physical_block_number_t physical,
bio_end_io_t callback, vdo_action_fn error_handler,
- blk_opf_t operation, char *data)
+ blk_opf_t operation, char *data, int size)
{
int result;
struct vdo_completion *completion = &vio->completion;
@@ -349,7 +350,8 @@ void __submit_metadata_vio(struct vio *vio, physical_block_number_t physical,
vdo_reset_completion(completion);
completion->error_handler = error_handler;
- result = vio_reset_bio(vio, data, callback, operation | REQ_META, physical);
+ result = vio_reset_bio_with_size(vio, data, size, callback, operation | REQ_META,
+ physical);
if (result != VDO_SUCCESS) {
continue_vio(vio, result);
return;
diff --git a/drivers/md/dm-vdo/io-submitter.h b/drivers/md/dm-vdo/io-submitter.h
index 80748699496f..3088f11055fd 100644
--- a/drivers/md/dm-vdo/io-submitter.h
+++ b/drivers/md/dm-vdo/io-submitter.h
@@ -8,6 +8,7 @@
#include <linux/bio.h>
+#include "constants.h"
#include "types.h"
struct io_submitter;
@@ -26,14 +27,25 @@ void vdo_submit_data_vio(struct data_vio *data_vio);
void __submit_metadata_vio(struct vio *vio, physical_block_number_t physical,
bio_end_io_t callback, vdo_action_fn error_handler,
- blk_opf_t operation, char *data);
+ blk_opf_t operation, char *data, int size);
static inline void vdo_submit_metadata_vio(struct vio *vio, physical_block_number_t physical,
bio_end_io_t callback, vdo_action_fn error_handler,
blk_opf_t operation)
{
__submit_metadata_vio(vio, physical, callback, error_handler,
- operation, vio->data);
+ operation, vio->data, vio->block_count * VDO_BLOCK_SIZE);
+}
+
+static inline void vdo_submit_metadata_vio_with_size(struct vio *vio,
+ physical_block_number_t physical,
+ bio_end_io_t callback,
+ vdo_action_fn error_handler,
+ blk_opf_t operation,
+ int size)
+{
+ __submit_metadata_vio(vio, physical, callback, error_handler,
+ operation, vio->data, size);
}
static inline void vdo_submit_flush_vio(struct vio *vio, bio_end_io_t callback,
@@ -41,7 +53,7 @@ static inline void vdo_submit_flush_vio(struct vio *vio, bio_end_io_t callback,
{
/* FIXME: Can we just use REQ_OP_FLUSH? */
__submit_metadata_vio(vio, 0, callback, error_handler,
- REQ_OP_WRITE | REQ_PREFLUSH, NULL);
+ REQ_OP_WRITE | REQ_PREFLUSH, NULL, 0);
}
#endif /* VDO_IO_SUBMITTER_H */
diff --git a/drivers/md/dm-vdo/packer.h b/drivers/md/dm-vdo/packer.h
index 0f3be44710b5..8c8d6892582d 100644
--- a/drivers/md/dm-vdo/packer.h
+++ b/drivers/md/dm-vdo/packer.h
@@ -46,7 +46,7 @@ struct compressed_block {
/*
* Each packer_bin holds an incomplete batch of data_vios that only partially fill a compressed
- * block. The bins are kept in a ring sorted by the amount of unused space so the first bin with
+ * block. The bins are kept in a list sorted by the amount of unused space so the first bin with
* enough space to hold a newly-compressed data_vio can easily be found. When the bin fills up or
* is flushed, the first uncanceled data_vio in the bin is selected to be the agent for that bin.
* Upon entering the packer, each data_vio already has its compressed data in the first slot of the
diff --git a/drivers/md/dm-vdo/priority-table.c b/drivers/md/dm-vdo/priority-table.c
index 42d3d8d0e4b5..9bae8256ba4e 100644
--- a/drivers/md/dm-vdo/priority-table.c
+++ b/drivers/md/dm-vdo/priority-table.c
@@ -199,7 +199,7 @@ void vdo_priority_table_remove(struct priority_table *table, struct list_head *e
/*
* Remove the entry from the bucket list, remembering a pointer to another entry in the
- * ring.
+ * list.
*/
next_entry = entry->next;
list_del_init(entry);
diff --git a/drivers/md/dm-vdo/recovery-journal.h b/drivers/md/dm-vdo/recovery-journal.h
index 899071173015..25e7ec6d19f6 100644
--- a/drivers/md/dm-vdo/recovery-journal.h
+++ b/drivers/md/dm-vdo/recovery-journal.h
@@ -43,9 +43,9 @@
* has a vio which is used to commit that block to disk. The vio's data is the on-disk
* representation of the journal block. In addition each in-memory block has a buffer which is used
* to accumulate entries while a partial commit of the block is in progress. In-memory blocks are
- * kept on two rings. Free blocks live on the 'free_tail_blocks' ring. When a block becomes active
- * (see below) it is moved to the 'active_tail_blocks' ring. When a block is fully committed, it is
- * moved back to the 'free_tail_blocks' ring.
+ * kept on two lists. Free blocks live on the 'free_tail_blocks' list. When a block becomes active
+ * (see below) it is moved to the 'active_tail_blocks' list. When a block is fully committed, it is
+ * moved back to the 'free_tail_blocks' list.
*
* When entries are added to the journal, they are added to the active in-memory block, as
* indicated by the 'active_block' field. If the caller wishes to wait for the entry to be
diff --git a/drivers/md/dm-vdo/slab-depot.c b/drivers/md/dm-vdo/slab-depot.c
index 8f0a35c63af6..f3d80ff7bef5 100644
--- a/drivers/md/dm-vdo/slab-depot.c
+++ b/drivers/md/dm-vdo/slab-depot.c
@@ -139,7 +139,7 @@ static bool is_slab_journal_blank(const struct vdo_slab *slab)
}
/**
- * mark_slab_journal_dirty() - Put a slab journal on the dirty ring of its allocator in the correct
+ * mark_slab_journal_dirty() - Put a slab journal on the dirty list of its allocator in the correct
* order.
* @journal: The journal to be marked dirty.
* @lock: The recovery journal lock held by the slab journal.
@@ -414,8 +414,7 @@ static void complete_reaping(struct vdo_completion *completion)
{
struct slab_journal *journal = completion->parent;
- return_vio_to_pool(journal->slab->allocator->vio_pool,
- vio_as_pooled_vio(as_vio(vdo_forget(completion))));
+ return_vio_to_pool(vio_as_pooled_vio(as_vio(completion)));
finish_reaping(journal);
reap_slab_journal(journal);
}
@@ -698,7 +697,7 @@ static void complete_write(struct vdo_completion *completion)
sequence_number_t committed = get_committing_sequence_number(pooled);
list_del_init(&pooled->list_entry);
- return_vio_to_pool(journal->slab->allocator->vio_pool, vdo_forget(pooled));
+ return_vio_to_pool(pooled);
if (result != VDO_SUCCESS) {
vio_record_metadata_io_error(as_vio(completion));
@@ -822,7 +821,7 @@ static void commit_tail(struct slab_journal *journal)
/*
* Since we are about to commit the tail block, this journal no longer needs to be on the
- * ring of journals which the recovery journal might ask to commit.
+ * list of journals which the recovery journal might ask to commit.
*/
mark_slab_journal_clean(journal);
@@ -1076,7 +1075,7 @@ static void finish_reference_block_write(struct vdo_completion *completion)
/* Release the slab journal lock. */
adjust_slab_journal_block_reference(&slab->journal,
block->slab_journal_lock_to_release, -1);
- return_vio_to_pool(slab->allocator->vio_pool, pooled);
+ return_vio_to_pool(pooled);
/*
* We can't clear the is_writing flag earlier as releasing the slab journal lock may cause
@@ -1170,8 +1169,8 @@ static void handle_io_error(struct vdo_completion *completion)
struct vdo_slab *slab = ((struct reference_block *) completion->parent)->slab;
vio_record_metadata_io_error(vio);
- return_vio_to_pool(slab->allocator->vio_pool, vio_as_pooled_vio(vio));
- slab->active_count--;
+ return_vio_to_pool(vio_as_pooled_vio(vio));
+ slab->active_count -= vio->io_size / VDO_BLOCK_SIZE;
vdo_enter_read_only_mode(slab->allocator->depot->vdo, result);
check_if_slab_drained(slab);
}
@@ -1372,7 +1371,7 @@ static unsigned int calculate_slab_priority(struct vdo_slab *slab)
static void prioritize_slab(struct vdo_slab *slab)
{
VDO_ASSERT_LOG_ONLY(list_empty(&slab->allocq_entry),
- "a slab must not already be on a ring when prioritizing");
+ "a slab must not already be on a list when prioritizing");
slab->priority = calculate_slab_priority(slab);
vdo_priority_table_enqueue(slab->allocator->prioritized_slabs,
slab->priority, &slab->allocq_entry);
@@ -2165,28 +2164,95 @@ static void dirty_all_reference_blocks(struct vdo_slab *slab)
dirty_block(&slab->reference_blocks[i]);
}
+static inline bool journal_points_equal(struct journal_point first,
+ struct journal_point second)
+{
+ return ((first.sequence_number == second.sequence_number) &&
+ (first.entry_count == second.entry_count));
+}
+
/**
- * clear_provisional_references() - Clear the provisional reference counts from a reference block.
- * @block: The block to clear.
+ * match_bytes() - Check an 8-byte word for bytes matching the value specified
+ * @input: A word to examine the bytes of
+ * @match: The byte value sought
+ *
+ * Return: 1 in each byte when the corresponding input byte matched, 0 otherwise
*/
-static void clear_provisional_references(struct reference_block *block)
+static inline u64 match_bytes(u64 input, u8 match)
{
- vdo_refcount_t *counters = get_reference_counters_for_block(block);
- block_count_t j;
+ u64 temp = input ^ (match * 0x0101010101010101ULL);
+ /* top bit of each byte is set iff top bit of temp byte is clear; rest are 0 */
+ u64 test_top_bits = ~temp & 0x8080808080808080ULL;
+ /* top bit of each byte is set iff low 7 bits of temp byte are clear; rest are useless */
+ u64 test_low_bits = 0x8080808080808080ULL - (temp & 0x7f7f7f7f7f7f7f7fULL);
+ /* return 1 when both tests indicate temp byte is 0 */
+ return (test_top_bits & test_low_bits) >> 7;
+}
+
+/**
+ * count_valid_references() - Process a newly loaded refcount array
+ * @counters: the array of counters from a metadata block
+ *
+ * Scan a 8-byte-aligned array of counters, fixing up any "provisional" values that weren't
+ * cleaned up at shutdown, changing them internally to "empty".
+ *
+ * Return: the number of blocks that are referenced (counters not "empty")
+ */
+static unsigned int count_valid_references(vdo_refcount_t *counters)
+{
+ u64 *words = (u64 *)counters;
+ /* It's easier to count occurrences of a specific byte than its absences. */
+ unsigned int empty_count = 0;
+ /* For speed, we process 8 bytes at once. */
+ unsigned int words_left = COUNTS_PER_BLOCK / sizeof(u64);
+
+ /*
+ * Sanity check assumptions used for optimizing this code: Counters are bytes. The counter
+ * array is a multiple of the word size.
+ */
+ BUILD_BUG_ON(sizeof(vdo_refcount_t) != 1);
+ BUILD_BUG_ON((COUNTS_PER_BLOCK % sizeof(u64)) != 0);
+
+ while (words_left > 0) {
+ /*
+ * This is used effectively as 8 byte-size counters. Byte 0 counts how many words
+ * had the target value found in byte 0, etc. We just have to avoid overflow.
+ */
+ u64 split_count = 0;
+ /*
+ * The counter "% 255" trick used below to fold split_count into empty_count
+ * imposes a limit of 254 bytes examined each iteration of the outer loop. We
+ * process a word at a time, so that limit gets rounded down to 31 u64 words.
+ */
+ const unsigned int max_words_per_iteration = 254 / sizeof(u64);
+ unsigned int iter_words_left = min_t(unsigned int, words_left,
+ max_words_per_iteration);
+
+ words_left -= iter_words_left;
+
+ while (iter_words_left--) {
+ u64 word = *words;
+ u64 temp;
+
+ /* First, if we have any provisional refcount values, clear them. */
+ temp = match_bytes(word, PROVISIONAL_REFERENCE_COUNT);
+ if (temp) {
+ /*
+ * 'temp' has 0x01 bytes where 'word' has PROVISIONAL; this xor
+ * will alter just those bytes, changing PROVISIONAL to EMPTY.
+ */
+ word ^= temp * (PROVISIONAL_REFERENCE_COUNT ^ EMPTY_REFERENCE_COUNT);
+ *words = word;
+ }
- for (j = 0; j < COUNTS_PER_BLOCK; j++) {
- if (counters[j] == PROVISIONAL_REFERENCE_COUNT) {
- counters[j] = EMPTY_REFERENCE_COUNT;
- block->allocated_count--;
+ /* Now count the EMPTY_REFERENCE_COUNT bytes, updating the 8 counters. */
+ split_count += match_bytes(word, EMPTY_REFERENCE_COUNT);
+ words++;
}
+ empty_count += split_count % 255;
}
-}
-static inline bool journal_points_equal(struct journal_point first,
- struct journal_point second)
-{
- return ((first.sequence_number == second.sequence_number) &&
- (first.entry_count == second.entry_count));
+ return COUNTS_PER_BLOCK - empty_count;
}
/**
@@ -2197,7 +2263,6 @@ static inline bool journal_points_equal(struct journal_point first,
static void unpack_reference_block(struct packed_reference_block *packed,
struct reference_block *block)
{
- block_count_t index;
sector_count_t i;
struct vdo_slab *slab = block->slab;
vdo_refcount_t *counters = get_reference_counters_for_block(block);
@@ -2223,11 +2288,7 @@ static void unpack_reference_block(struct packed_reference_block *packed,
}
}
- block->allocated_count = 0;
- for (index = 0; index < COUNTS_PER_BLOCK; index++) {
- if (counters[index] != EMPTY_REFERENCE_COUNT)
- block->allocated_count++;
- }
+ block->allocated_count = count_valid_references(counters);
}
/**
@@ -2240,13 +2301,19 @@ static void finish_reference_block_load(struct vdo_completion *completion)
struct pooled_vio *pooled = vio_as_pooled_vio(vio);
struct reference_block *block = completion->parent;
struct vdo_slab *slab = block->slab;
+ unsigned int block_count = vio->io_size / VDO_BLOCK_SIZE;
+ unsigned int i;
+ char *data = vio->data;
- unpack_reference_block((struct packed_reference_block *) vio->data, block);
- return_vio_to_pool(slab->allocator->vio_pool, pooled);
- slab->active_count--;
- clear_provisional_references(block);
+ for (i = 0; i < block_count; i++, block++, data += VDO_BLOCK_SIZE) {
+ struct packed_reference_block *packed = (struct packed_reference_block *) data;
+
+ unpack_reference_block(packed, block);
+ slab->free_blocks -= block->allocated_count;
+ }
+ return_vio_to_pool(pooled);
+ slab->active_count -= block_count;
- slab->free_blocks -= block->allocated_count;
check_if_slab_drained(slab);
}
@@ -2260,23 +2327,25 @@ static void load_reference_block_endio(struct bio *bio)
}
/**
- * load_reference_block() - After a block waiter has gotten a VIO from the VIO pool, load the
- * block.
- * @waiter: The waiter of the block to load.
+ * load_reference_block_group() - After a block waiter has gotten a VIO from the VIO pool, load
+ * a set of blocks.
+ * @waiter: The waiter of the first block to load.
* @context: The VIO returned by the pool.
*/
-static void load_reference_block(struct vdo_waiter *waiter, void *context)
+static void load_reference_block_group(struct vdo_waiter *waiter, void *context)
{
struct pooled_vio *pooled = context;
struct vio *vio = &pooled->vio;
struct reference_block *block =
container_of(waiter, struct reference_block, waiter);
- size_t block_offset = (block - block->slab->reference_blocks);
+ u32 block_offset = block - block->slab->reference_blocks;
+ u32 max_block_count = block->slab->reference_block_count - block_offset;
+ u32 block_count = min_t(int, vio->block_count, max_block_count);
vio->completion.parent = block;
- vdo_submit_metadata_vio(vio, block->slab->ref_counts_origin + block_offset,
- load_reference_block_endio, handle_io_error,
- REQ_OP_READ);
+ vdo_submit_metadata_vio_with_size(vio, block->slab->ref_counts_origin + block_offset,
+ load_reference_block_endio, handle_io_error,
+ REQ_OP_READ, block_count * VDO_BLOCK_SIZE);
}
/**
@@ -2286,14 +2355,21 @@ static void load_reference_block(struct vdo_waiter *waiter, void *context)
static void load_reference_blocks(struct vdo_slab *slab)
{
block_count_t i;
+ u64 blocks_per_vio = slab->allocator->refcount_blocks_per_big_vio;
+ struct vio_pool *pool = slab->allocator->refcount_big_vio_pool;
+
+ if (!pool) {
+ pool = slab->allocator->vio_pool;
+ blocks_per_vio = 1;
+ }
slab->free_blocks = slab->block_count;
slab->active_count = slab->reference_block_count;
- for (i = 0; i < slab->reference_block_count; i++) {
+ for (i = 0; i < slab->reference_block_count; i += blocks_per_vio) {
struct vdo_waiter *waiter = &slab->reference_blocks[i].waiter;
- waiter->callback = load_reference_block;
- acquire_vio_from_pool(slab->allocator->vio_pool, waiter);
+ waiter->callback = load_reference_block_group;
+ acquire_vio_from_pool(pool, waiter);
}
}
@@ -2429,7 +2505,7 @@ static void finish_loading_journal(struct vdo_completion *completion)
initialize_journal_state(journal);
}
- return_vio_to_pool(slab->allocator->vio_pool, vio_as_pooled_vio(vio));
+ return_vio_to_pool(vio_as_pooled_vio(vio));
vdo_finish_loading_with_result(&slab->state, allocate_counters_if_clean(slab));
}
@@ -2449,7 +2525,7 @@ static void handle_load_error(struct vdo_completion *completion)
struct vio *vio = as_vio(completion);
vio_record_metadata_io_error(vio);
- return_vio_to_pool(journal->slab->allocator->vio_pool, vio_as_pooled_vio(vio));
+ return_vio_to_pool(vio_as_pooled_vio(vio));
vdo_finish_loading_with_result(&journal->slab->state, result);
}
@@ -2547,7 +2623,7 @@ static void queue_slab(struct vdo_slab *slab)
int result;
VDO_ASSERT_LOG_ONLY(list_empty(&slab->allocq_entry),
- "a requeued slab must not already be on a ring");
+ "a requeued slab must not already be on a list");
if (vdo_is_read_only(allocator->depot->vdo))
return;
@@ -2700,6 +2776,7 @@ static void finish_scrubbing(struct slab_scrubber *scrubber, int result)
vdo_log_info("VDO commencing normal operation");
else if (prior_state == VDO_RECOVERING)
vdo_log_info("Exiting recovery mode");
+ free_vio_pool(vdo_forget(allocator->refcount_big_vio_pool));
}
/*
@@ -3281,7 +3358,7 @@ int vdo_release_block_reference(struct block_allocator *allocator,
* This is a min_heap callback function orders slab_status structures using the 'is_clean' field as
* the primary key and the 'emptiness' field as the secondary key.
*
- * Slabs need to be pushed onto the rings in the same order they are to be popped off. Popping
+ * Slabs need to be pushed onto the lists in the same order they are to be popped off. Popping
* should always get the most empty first, so pushing should be from most empty to least empty.
* Thus, the ordering is reversed from the usual sense since min_heap returns smaller elements
* before larger ones.
@@ -3983,6 +4060,7 @@ static int __must_check initialize_block_allocator(struct slab_depot *depot,
struct vdo *vdo = depot->vdo;
block_count_t max_free_blocks = depot->slab_config.data_blocks;
unsigned int max_priority = (2 + ilog2(max_free_blocks));
+ u32 reference_block_count, refcount_reads_needed, refcount_blocks_per_vio;
*allocator = (struct block_allocator) {
.depot = depot,
@@ -4000,12 +4078,24 @@ static int __must_check initialize_block_allocator(struct slab_depot *depot,
return result;
vdo_initialize_completion(&allocator->completion, vdo, VDO_BLOCK_ALLOCATOR_COMPLETION);
- result = make_vio_pool(vdo, BLOCK_ALLOCATOR_VIO_POOL_SIZE, allocator->thread_id,
+ result = make_vio_pool(vdo, BLOCK_ALLOCATOR_VIO_POOL_SIZE, 1, allocator->thread_id,
VIO_TYPE_SLAB_JOURNAL, VIO_PRIORITY_METADATA,
allocator, &allocator->vio_pool);
if (result != VDO_SUCCESS)
return result;
+ /* Initialize the refcount-reading vio pool. */
+ reference_block_count = vdo_get_saved_reference_count_size(depot->slab_config.slab_blocks);
+ refcount_reads_needed = DIV_ROUND_UP(reference_block_count, MAX_BLOCKS_PER_VIO);
+ refcount_blocks_per_vio = DIV_ROUND_UP(reference_block_count, refcount_reads_needed);
+ allocator->refcount_blocks_per_big_vio = refcount_blocks_per_vio;
+ result = make_vio_pool(vdo, BLOCK_ALLOCATOR_REFCOUNT_VIO_POOL_SIZE,
+ allocator->refcount_blocks_per_big_vio, allocator->thread_id,
+ VIO_TYPE_SLAB_JOURNAL, VIO_PRIORITY_METADATA,
+ NULL, &allocator->refcount_big_vio_pool);
+ if (result != VDO_SUCCESS)
+ return result;
+
result = initialize_slab_scrubber(allocator);
if (result != VDO_SUCCESS)
return result;
@@ -4223,6 +4313,7 @@ void vdo_free_slab_depot(struct slab_depot *depot)
uninitialize_allocator_summary(allocator);
uninitialize_scrubber_vio(&allocator->scrubber);
free_vio_pool(vdo_forget(allocator->vio_pool));
+ free_vio_pool(vdo_forget(allocator->refcount_big_vio_pool));
vdo_free_priority_table(vdo_forget(allocator->prioritized_slabs));
}
diff --git a/drivers/md/dm-vdo/slab-depot.h b/drivers/md/dm-vdo/slab-depot.h
index f234853501ca..fadc0c9d4dc4 100644
--- a/drivers/md/dm-vdo/slab-depot.h
+++ b/drivers/md/dm-vdo/slab-depot.h
@@ -45,6 +45,13 @@
enum {
/* The number of vios in the vio pool is proportional to the throughput of the VDO. */
BLOCK_ALLOCATOR_VIO_POOL_SIZE = 128,
+
+ /*
+ * The number of vios in the vio pool used for loading reference count data. A slab's
+ * refcounts is capped at ~8MB, and we process one at a time in a zone, so 9 should be
+ * plenty.
+ */
+ BLOCK_ALLOCATOR_REFCOUNT_VIO_POOL_SIZE = 9,
};
/*
@@ -248,7 +255,7 @@ struct vdo_slab {
/* A list of the dirty blocks waiting to be written out */
struct vdo_wait_queue dirty_blocks;
- /* The number of blocks which are currently writing */
+ /* The number of blocks which are currently reading or writing */
size_t active_count;
/* A waiter object for updating the slab summary */
@@ -425,6 +432,10 @@ struct block_allocator {
/* The vio pool for reading and writing block allocator metadata */
struct vio_pool *vio_pool;
+ /* The vio pool for large initial reads of ref count areas */
+ struct vio_pool *refcount_big_vio_pool;
+ /* How many ref count blocks are read per vio at initial load */
+ u32 refcount_blocks_per_big_vio;
/* The dm_kcopyd client for erasing slab journals */
struct dm_kcopyd_client *eraser;
/* Iterator over the slabs to be erased */
diff --git a/drivers/md/dm-vdo/types.h b/drivers/md/dm-vdo/types.h
index dbe892b10f26..cdf36e7d7702 100644
--- a/drivers/md/dm-vdo/types.h
+++ b/drivers/md/dm-vdo/types.h
@@ -376,6 +376,9 @@ struct vio {
/* The size of this vio in blocks */
unsigned int block_count;
+ /* The amount of data to be read or written, in bytes */
+ unsigned int io_size;
+
/* The data being read or written. */
char *data;
diff --git a/drivers/md/dm-vdo/vdo.c b/drivers/md/dm-vdo/vdo.c
index a7e32baab4af..80b608674022 100644
--- a/drivers/md/dm-vdo/vdo.c
+++ b/drivers/md/dm-vdo/vdo.c
@@ -31,9 +31,7 @@
#include <linux/completion.h>
#include <linux/device-mapper.h>
-#include <linux/kernel.h>
#include <linux/lz4.h>
-#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/types.h>
@@ -142,12 +140,6 @@ static void finish_vdo_request_queue(void *ptr)
vdo_unregister_allocating_thread();
}
-#ifdef MODULE
-#define MODULE_NAME THIS_MODULE->name
-#else
-#define MODULE_NAME "dm-vdo"
-#endif /* MODULE */
-
static const struct vdo_work_queue_type default_queue_type = {
.start = start_vdo_request_queue,
.finish = finish_vdo_request_queue,
@@ -559,8 +551,7 @@ int vdo_make(unsigned int instance, struct device_config *config, char **reason,
*vdo_ptr = vdo;
snprintf(vdo->thread_name_prefix, sizeof(vdo->thread_name_prefix),
- "%s%u", MODULE_NAME, instance);
- BUG_ON(vdo->thread_name_prefix[0] == '\0');
+ "vdo%u", instance);
result = vdo_allocate(vdo->thread_config.thread_count,
struct vdo_thread, __func__, &vdo->threads);
if (result != VDO_SUCCESS) {
diff --git a/drivers/md/dm-vdo/vio.c b/drivers/md/dm-vdo/vio.c
index e710f3c5a972..e7f4153e55e3 100644
--- a/drivers/md/dm-vdo/vio.c
+++ b/drivers/md/dm-vdo/vio.c
@@ -188,14 +188,23 @@ void vdo_set_bio_properties(struct bio *bio, struct vio *vio, bio_end_io_t callb
/*
* Prepares the bio to perform IO with the specified buffer. May only be used on a VDO-allocated
- * bio, as it assumes the bio wraps a 4k buffer that is 4k aligned, but there does not have to be a
- * vio associated with the bio.
+ * bio, as it assumes the bio wraps a 4k-multiple buffer that is 4k aligned, but there does not
+ * have to be a vio associated with the bio.
*/
int vio_reset_bio(struct vio *vio, char *data, bio_end_io_t callback,
blk_opf_t bi_opf, physical_block_number_t pbn)
{
- int bvec_count, offset, len, i;
+ return vio_reset_bio_with_size(vio, data, vio->block_count * VDO_BLOCK_SIZE,
+ callback, bi_opf, pbn);
+}
+
+int vio_reset_bio_with_size(struct vio *vio, char *data, int size, bio_end_io_t callback,
+ blk_opf_t bi_opf, physical_block_number_t pbn)
+{
+ int bvec_count, offset, i;
struct bio *bio = vio->bio;
+ int vio_size = vio->block_count * VDO_BLOCK_SIZE;
+ int remaining;
bio_reset(bio, bio->bi_bdev, bi_opf);
vdo_set_bio_properties(bio, vio, callback, bi_opf, pbn);
@@ -205,22 +214,21 @@ int vio_reset_bio(struct vio *vio, char *data, bio_end_io_t callback,
bio->bi_ioprio = 0;
bio->bi_io_vec = bio->bi_inline_vecs;
bio->bi_max_vecs = vio->block_count + 1;
- len = VDO_BLOCK_SIZE * vio->block_count;
+ if (VDO_ASSERT(size <= vio_size, "specified size %d is not greater than allocated %d",
+ size, vio_size) != VDO_SUCCESS)
+ size = vio_size;
+ vio->io_size = size;
offset = offset_in_page(data);
- bvec_count = DIV_ROUND_UP(offset + len, PAGE_SIZE);
+ bvec_count = DIV_ROUND_UP(offset + size, PAGE_SIZE);
+ remaining = size;
- /*
- * If we knew that data was always on one page, or contiguous pages, we wouldn't need the
- * loop. But if we're using vmalloc, it's not impossible that the data is in different
- * pages that can't be merged in bio_add_page...
- */
- for (i = 0; (i < bvec_count) && (len > 0); i++) {
+ for (i = 0; (i < bvec_count) && (remaining > 0); i++) {
struct page *page;
int bytes_added;
int bytes = PAGE_SIZE - offset;
- if (bytes > len)
- bytes = len;
+ if (bytes > remaining)
+ bytes = remaining;
page = is_vmalloc_addr(data) ? vmalloc_to_page(data) : virt_to_page(data);
bytes_added = bio_add_page(bio, page, bytes, offset);
@@ -232,7 +240,7 @@ int vio_reset_bio(struct vio *vio, char *data, bio_end_io_t callback,
}
data += bytes;
- len -= bytes;
+ remaining -= bytes;
offset = 0;
}
@@ -301,6 +309,7 @@ void vio_record_metadata_io_error(struct vio *vio)
* make_vio_pool() - Create a new vio pool.
* @vdo: The vdo.
* @pool_size: The number of vios in the pool.
+ * @block_count: The number of 4k blocks per vio.
* @thread_id: The ID of the thread using this pool.
* @vio_type: The type of vios in the pool.
* @priority: The priority with which vios from the pool should be enqueued.
@@ -309,13 +318,14 @@ void vio_record_metadata_io_error(struct vio *vio)
*
* Return: A success or error code.
*/
-int make_vio_pool(struct vdo *vdo, size_t pool_size, thread_id_t thread_id,
+int make_vio_pool(struct vdo *vdo, size_t pool_size, size_t block_count, thread_id_t thread_id,
enum vio_type vio_type, enum vio_priority priority, void *context,
struct vio_pool **pool_ptr)
{
struct vio_pool *pool;
char *ptr;
int result;
+ size_t per_vio_size = VDO_BLOCK_SIZE * block_count;
result = vdo_allocate_extended(struct vio_pool, pool_size, struct pooled_vio,
__func__, &pool);
@@ -326,7 +336,7 @@ int make_vio_pool(struct vdo *vdo, size_t pool_size, thread_id_t thread_id,
INIT_LIST_HEAD(&pool->available);
INIT_LIST_HEAD(&pool->busy);
- result = vdo_allocate(pool_size * VDO_BLOCK_SIZE, char,
+ result = vdo_allocate(pool_size * per_vio_size, char,
"VIO pool buffer", &pool->buffer);
if (result != VDO_SUCCESS) {
free_vio_pool(pool);
@@ -334,10 +344,10 @@ int make_vio_pool(struct vdo *vdo, size_t pool_size, thread_id_t thread_id,
}
ptr = pool->buffer;
- for (pool->size = 0; pool->size < pool_size; pool->size++, ptr += VDO_BLOCK_SIZE) {
+ for (pool->size = 0; pool->size < pool_size; pool->size++, ptr += per_vio_size) {
struct pooled_vio *pooled = &pool->vios[pool->size];
- result = allocate_vio_components(vdo, vio_type, priority, NULL, 1, ptr,
+ result = allocate_vio_components(vdo, vio_type, priority, NULL, block_count, ptr,
&pooled->vio);
if (result != VDO_SUCCESS) {
free_vio_pool(pool);
@@ -345,6 +355,7 @@ int make_vio_pool(struct vdo *vdo, size_t pool_size, thread_id_t thread_id,
}
pooled->context = context;
+ pooled->pool = pool;
list_add_tail(&pooled->pool_entry, &pool->available);
}
@@ -419,12 +430,13 @@ void acquire_vio_from_pool(struct vio_pool *pool, struct vdo_waiter *waiter)
}
/**
- * return_vio_to_pool() - Return a vio to the pool
- * @pool: The vio pool.
+ * return_vio_to_pool() - Return a vio to its pool
* @vio: The pooled vio to return.
*/
-void return_vio_to_pool(struct vio_pool *pool, struct pooled_vio *vio)
+void return_vio_to_pool(struct pooled_vio *vio)
{
+ struct vio_pool *pool = vio->pool;
+
VDO_ASSERT_LOG_ONLY((pool->thread_id == vdo_get_callback_thread_id()),
"vio pool entry returned on same thread as it was acquired");
diff --git a/drivers/md/dm-vdo/vio.h b/drivers/md/dm-vdo/vio.h
index 3490e9f59b04..4bfcb21901f1 100644
--- a/drivers/md/dm-vdo/vio.h
+++ b/drivers/md/dm-vdo/vio.h
@@ -30,6 +30,8 @@ struct pooled_vio {
void *context;
/* The list entry used by the pool */
struct list_head pool_entry;
+ /* The pool this vio is allocated from */
+ struct vio_pool *pool;
};
/**
@@ -123,6 +125,8 @@ void vdo_set_bio_properties(struct bio *bio, struct vio *vio, bio_end_io_t callb
int vio_reset_bio(struct vio *vio, char *data, bio_end_io_t callback,
blk_opf_t bi_opf, physical_block_number_t pbn);
+int vio_reset_bio_with_size(struct vio *vio, char *data, int size, bio_end_io_t callback,
+ blk_opf_t bi_opf, physical_block_number_t pbn);
void update_vio_error_stats(struct vio *vio, const char *format, ...)
__printf(2, 3);
@@ -188,12 +192,13 @@ static inline struct pooled_vio *vio_as_pooled_vio(struct vio *vio)
struct vio_pool;
-int __must_check make_vio_pool(struct vdo *vdo, size_t pool_size, thread_id_t thread_id,
- enum vio_type vio_type, enum vio_priority priority,
- void *context, struct vio_pool **pool_ptr);
+int __must_check make_vio_pool(struct vdo *vdo, size_t pool_size, size_t block_count,
+ thread_id_t thread_id, enum vio_type vio_type,
+ enum vio_priority priority, void *context,
+ struct vio_pool **pool_ptr);
void free_vio_pool(struct vio_pool *pool);
bool __must_check is_vio_pool_busy(struct vio_pool *pool);
void acquire_vio_from_pool(struct vio_pool *pool, struct vdo_waiter *waiter);
-void return_vio_to_pool(struct vio_pool *pool, struct pooled_vio *vio);
+void return_vio_to_pool(struct pooled_vio *vio);
#endif /* VIO_H */
diff --git a/drivers/md/dm-vdo/wait-queue.c b/drivers/md/dm-vdo/wait-queue.c
index 6e1e739277ef..f81ed0cee2bf 100644
--- a/drivers/md/dm-vdo/wait-queue.c
+++ b/drivers/md/dm-vdo/wait-queue.c
@@ -34,7 +34,7 @@ void vdo_waitq_enqueue_waiter(struct vdo_wait_queue *waitq, struct vdo_waiter *w
waitq->last_waiter->next_waiter = waiter;
}
- /* In both cases, the waiter we added to the ring becomes the last waiter. */
+ /* In both cases, the waiter we added to the list becomes the last waiter. */
waitq->last_waiter = waiter;
waitq->length += 1;
}
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index e86c1431b108..3c427f18a04b 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -30,6 +30,7 @@
#define DM_VERITY_ENV_VAR_NAME "DM_VERITY_ERR_BLOCK_NR"
#define DM_VERITY_DEFAULT_PREFETCH_SIZE 262144
+#define DM_VERITY_USE_BH_DEFAULT_BYTES 8192
#define DM_VERITY_MAX_CORRUPTED_ERRS 100
@@ -49,6 +50,15 @@ static unsigned int dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE
module_param_named(prefetch_cluster, dm_verity_prefetch_cluster, uint, 0644);
+static unsigned int dm_verity_use_bh_bytes[4] = {
+ DM_VERITY_USE_BH_DEFAULT_BYTES, // IOPRIO_CLASS_NONE
+ DM_VERITY_USE_BH_DEFAULT_BYTES, // IOPRIO_CLASS_RT
+ DM_VERITY_USE_BH_DEFAULT_BYTES, // IOPRIO_CLASS_BE
+ 0 // IOPRIO_CLASS_IDLE
+};
+
+module_param_array_named(use_bh_bytes, dm_verity_use_bh_bytes, uint, NULL, 0644);
+
static DEFINE_STATIC_KEY_FALSE(use_bh_wq_enabled);
/* Is at least one dm-verity instance using ahash_tfm instead of shash_tfm? */
@@ -311,7 +321,7 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
if (static_branch_unlikely(&use_bh_wq_enabled) && io->in_bh) {
data = dm_bufio_get(v->bufio, hash_block, &buf);
- if (data == NULL) {
+ if (IS_ERR_OR_NULL(data)) {
/*
* In tasklet and the hash was not in the bufio cache.
* Return early and resume execution from a work-queue
@@ -324,8 +334,24 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
&buf, bio->bi_ioprio);
}
- if (IS_ERR(data))
- return PTR_ERR(data);
+ if (IS_ERR(data)) {
+ if (skip_unverified)
+ return 1;
+ r = PTR_ERR(data);
+ data = dm_bufio_new(v->bufio, hash_block, &buf);
+ if (IS_ERR(data))
+ return r;
+ if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_METADATA,
+ hash_block, data) == 0) {
+ aux = dm_bufio_get_aux_data(buf);
+ aux->hash_verified = 1;
+ goto release_ok;
+ } else {
+ dm_bufio_release(buf);
+ dm_bufio_forget(v->bufio, hash_block);
+ return r;
+ }
+ }
aux = dm_bufio_get_aux_data(buf);
@@ -366,6 +392,7 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
}
}
+release_ok:
data += offset;
memcpy(want_digest, data, v->digest_size);
r = 0;
@@ -652,9 +679,17 @@ static void verity_bh_work(struct work_struct *w)
verity_finish_io(io, errno_to_blk_status(err));
}
+static inline bool verity_use_bh(unsigned int bytes, unsigned short ioprio)
+{
+ return ioprio <= IOPRIO_CLASS_IDLE &&
+ bytes <= READ_ONCE(dm_verity_use_bh_bytes[ioprio]);
+}
+
static void verity_end_io(struct bio *bio)
{
struct dm_verity_io *io = bio->bi_private;
+ unsigned short ioprio = IOPRIO_PRIO_CLASS(bio->bi_ioprio);
+ unsigned int bytes = io->n_blocks << io->v->data_dev_block_bits;
if (bio->bi_status &&
(!verity_fec_is_enabled(io->v) ||
@@ -664,9 +699,14 @@ static void verity_end_io(struct bio *bio)
return;
}
- if (static_branch_unlikely(&use_bh_wq_enabled) && io->v->use_bh_wq) {
- INIT_WORK(&io->bh_work, verity_bh_work);
- queue_work(system_bh_wq, &io->bh_work);
+ if (static_branch_unlikely(&use_bh_wq_enabled) && io->v->use_bh_wq &&
+ verity_use_bh(bytes, ioprio)) {
+ if (in_hardirq() || irqs_disabled()) {
+ INIT_WORK(&io->bh_work, verity_bh_work);
+ queue_work(system_bh_wq, &io->bh_work);
+ } else {
+ verity_bh_work(&io->bh_work);
+ }
} else {
INIT_WORK(&io->work, verity_work);
queue_work(io->v->verify_wq, &io->work);
@@ -796,6 +836,13 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_SUBMITTED;
}
+static void verity_postsuspend(struct dm_target *ti)
+{
+ struct dm_verity *v = ti->private;
+ flush_workqueue(v->verify_wq);
+ dm_bufio_client_reset(v->bufio);
+}
+
/*
* Status: V (valid) or C (corruption found)
*/
@@ -1761,11 +1808,12 @@ static struct target_type verity_target = {
.name = "verity",
/* Note: the LSMs depend on the singleton and immutable features */
.features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE,
- .version = {1, 10, 0},
+ .version = {1, 11, 0},
.module = THIS_MODULE,
.ctr = verity_ctr,
.dtr = verity_dtr,
.map = verity_map,
+ .postsuspend = verity_postsuspend,
.status = verity_status,
.prepare_ioctl = verity_prepare_ioctl,
.iterate_devices = verity_iterate_devices,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 4d1e42891d24..5ab7574c0c76 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1540,14 +1540,18 @@ static void __send_empty_flush(struct clone_info *ci)
{
struct dm_table *t = ci->map;
struct bio flush_bio;
+ blk_opf_t opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
+
+ if ((ci->io->orig_bio->bi_opf & (REQ_IDLE | REQ_SYNC)) ==
+ (REQ_IDLE | REQ_SYNC))
+ opf |= REQ_IDLE;
/*
* Use an on-stack bio for this, it's safe since we don't
* need to reference it after submit. It's just used as
* the basis for the clone(s).
*/
- bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0,
- REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC);
+ bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0, opf);
ci->bio = &flush_bio;
ci->sector_count = 0;
diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c
index 2f5165918163..cfe59c3255f7 100644
--- a/drivers/media/dvb-frontends/dib8000.c
+++ b/drivers/media/dvb-frontends/dib8000.c
@@ -2701,8 +2701,11 @@ static void dib8000_set_dds(struct dib8000_state *state, s32 offset_khz)
u8 ratio;
if (state->revision == 0x8090) {
+ u32 internal = dib8000_read32(state, 23) / 1000;
+
ratio = 4;
- unit_khz_dds_val = (1<<26) / (dib8000_read32(state, 23) / 1000);
+
+ unit_khz_dds_val = (1<<26) / (internal ?: 1);
if (offset_khz < 0)
dds = (1 << 26) - (abs_offset_khz * unit_khz_dds_val);
else
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 56bc72c7ce4a..6b37d61150ee 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -641,7 +641,6 @@ source "drivers/misc/mei/Kconfig"
source "drivers/misc/vmw_vmci/Kconfig"
source "drivers/misc/genwqe/Kconfig"
source "drivers/misc/echo/Kconfig"
-source "drivers/misc/cxl/Kconfig"
source "drivers/misc/ocxl/Kconfig"
source "drivers/misc/bcm-vk/Kconfig"
source "drivers/misc/cardreader/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 545aad06d088..d6c917229c45 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -50,7 +50,6 @@ obj-$(CONFIG_SRAM) += sram.o
obj-$(CONFIG_SRAM_EXEC) += sram-exec.o
obj-$(CONFIG_GENWQE) += genwqe/
obj-$(CONFIG_ECHO) += echo/
-obj-$(CONFIG_CXL_BASE) += cxl/
obj-$(CONFIG_DW_XDATA_PCIE) += dw-xdata-pcie.o
obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o
obj-$(CONFIG_OCXL) += ocxl/
diff --git a/drivers/misc/cxl/Kconfig b/drivers/misc/cxl/Kconfig
deleted file mode 100644
index 15307f5e4307..000000000000
--- a/drivers/misc/cxl/Kconfig
+++ /dev/null
@@ -1,28 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# IBM Coherent Accelerator (CXL) compatible devices
-#
-
-config CXL_BASE
- bool
- select PPC_COPRO_BASE
- select PPC_64S_HASH_MMU
-
-config CXL
- tristate "Support for IBM Coherent Accelerators (CXL) (DEPRECATED)"
- depends on PPC_POWERNV && PCI_MSI && EEH
- select CXL_BASE
- help
- The cxl driver is deprecated and will be removed in a future
- kernel release.
-
- Select this option to enable driver support for IBM Coherent
- Accelerators (CXL). CXL is otherwise known as Coherent Accelerator
- Processor Interface (CAPI). CAPI allows accelerators in FPGAs to be
- coherently attached to a CPU via an MMU. This driver enables
- userspace programs to access these accelerators via /dev/cxl/afuM.N
- devices.
-
- CAPI adapters are found in POWER8 based systems.
-
- If unsure, say N.
diff --git a/drivers/misc/cxl/Makefile b/drivers/misc/cxl/Makefile
deleted file mode 100644
index 5eea61b9584f..000000000000
--- a/drivers/misc/cxl/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-ccflags-y := $(call cc-disable-warning, unused-const-variable)
-ccflags-$(CONFIG_PPC_WERROR) += -Werror
-
-cxl-y += main.o file.o irq.o fault.o native.o
-cxl-y += context.o sysfs.o pci.o trace.o
-cxl-y += vphb.o api.o cxllib.o
-cxl-$(CONFIG_PPC_PSERIES) += flash.o guest.o of.o hcalls.o
-cxl-$(CONFIG_DEBUG_FS) += debugfs.o
-obj-$(CONFIG_CXL) += cxl.o
-obj-$(CONFIG_CXL_BASE) += base.o
-
-# For tracepoints to include our trace.h from tracepoint infrastructure:
-CFLAGS_trace.o := -I$(src)
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
deleted file mode 100644
index d85c56530863..000000000000
--- a/drivers/misc/cxl/api.c
+++ /dev/null
@@ -1,532 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2014 IBM Corp.
- */
-
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include <linux/file.h>
-#include <misc/cxl.h>
-#include <linux/module.h>
-#include <linux/mount.h>
-#include <linux/pseudo_fs.h>
-#include <linux/sched/mm.h>
-#include <linux/mmu_context.h>
-#include <linux/irqdomain.h>
-
-#include "cxl.h"
-
-/*
- * Since we want to track memory mappings to be able to force-unmap
- * when the AFU is no longer reachable, we need an inode. For devices
- * opened through the cxl user API, this is not a problem, but a
- * userland process can also get a cxl fd through the cxl_get_fd()
- * API, which is used by the cxlflash driver.
- *
- * Therefore we implement our own simple pseudo-filesystem and inode
- * allocator. We don't use the anonymous inode, as we need the
- * meta-data associated with it (address_space) and it is shared by
- * other drivers/processes, so it could lead to cxl unmapping VMAs
- * from random processes.
- */
-
-#define CXL_PSEUDO_FS_MAGIC 0x1697697f
-
-static int cxl_fs_cnt;
-static struct vfsmount *cxl_vfs_mount;
-
-static int cxl_fs_init_fs_context(struct fs_context *fc)
-{
- return init_pseudo(fc, CXL_PSEUDO_FS_MAGIC) ? 0 : -ENOMEM;
-}
-
-static struct file_system_type cxl_fs_type = {
- .name = "cxl",
- .owner = THIS_MODULE,
- .init_fs_context = cxl_fs_init_fs_context,
- .kill_sb = kill_anon_super,
-};
-
-
-void cxl_release_mapping(struct cxl_context *ctx)
-{
- if (ctx->kernelapi && ctx->mapping)
- simple_release_fs(&cxl_vfs_mount, &cxl_fs_cnt);
-}
-
-static struct file *cxl_getfile(const char *name,
- const struct file_operations *fops,
- void *priv, int flags)
-{
- struct file *file;
- struct inode *inode;
- int rc;
-
- /* strongly inspired by anon_inode_getfile() */
-
- if (fops->owner && !try_module_get(fops->owner))
- return ERR_PTR(-ENOENT);
-
- rc = simple_pin_fs(&cxl_fs_type, &cxl_vfs_mount, &cxl_fs_cnt);
- if (rc < 0) {
- pr_err("Cannot mount cxl pseudo filesystem: %d\n", rc);
- file = ERR_PTR(rc);
- goto err_module;
- }
-
- inode = alloc_anon_inode(cxl_vfs_mount->mnt_sb);
- if (IS_ERR(inode)) {
- file = ERR_CAST(inode);
- goto err_fs;
- }
-
- file = alloc_file_pseudo(inode, cxl_vfs_mount, name,
- flags & (O_ACCMODE | O_NONBLOCK), fops);
- if (IS_ERR(file))
- goto err_inode;
-
- file->private_data = priv;
-
- return file;
-
-err_inode:
- iput(inode);
-err_fs:
- simple_release_fs(&cxl_vfs_mount, &cxl_fs_cnt);
-err_module:
- module_put(fops->owner);
- return file;
-}
-
-struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
-{
- struct cxl_afu *afu;
- struct cxl_context *ctx;
- int rc;
-
- afu = cxl_pci_to_afu(dev);
- if (IS_ERR(afu))
- return ERR_CAST(afu);
-
- ctx = cxl_context_alloc();
- if (!ctx)
- return ERR_PTR(-ENOMEM);
-
- ctx->kernelapi = true;
-
- /* Make it a slave context. We can promote it later? */
- rc = cxl_context_init(ctx, afu, false);
- if (rc)
- goto err_ctx;
-
- return ctx;
-
-err_ctx:
- kfree(ctx);
- return ERR_PTR(rc);
-}
-EXPORT_SYMBOL_GPL(cxl_dev_context_init);
-
-struct cxl_context *cxl_get_context(struct pci_dev *dev)
-{
- return dev->dev.archdata.cxl_ctx;
-}
-EXPORT_SYMBOL_GPL(cxl_get_context);
-
-int cxl_release_context(struct cxl_context *ctx)
-{
- if (ctx->status >= STARTED)
- return -EBUSY;
-
- cxl_context_free(ctx);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(cxl_release_context);
-
-static irq_hw_number_t cxl_find_afu_irq(struct cxl_context *ctx, int num)
-{
- __u16 range;
- int r;
-
- for (r = 0; r < CXL_IRQ_RANGES; r++) {
- range = ctx->irqs.range[r];
- if (num < range) {
- return ctx->irqs.offset[r] + num;
- }
- num -= range;
- }
- return 0;
-}
-
-
-int cxl_set_priv(struct cxl_context *ctx, void *priv)
-{
- if (!ctx)
- return -EINVAL;
-
- ctx->priv = priv;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(cxl_set_priv);
-
-void *cxl_get_priv(struct cxl_context *ctx)
-{
- if (!ctx)
- return ERR_PTR(-EINVAL);
-
- return ctx->priv;
-}
-EXPORT_SYMBOL_GPL(cxl_get_priv);
-
-int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num)
-{
- int res;
- irq_hw_number_t hwirq;
-
- if (num == 0)
- num = ctx->afu->pp_irqs;
- res = afu_allocate_irqs(ctx, num);
- if (res)
- return res;
-
- if (!cpu_has_feature(CPU_FTR_HVMODE)) {
- /* In a guest, the PSL interrupt is not multiplexed. It was
- * allocated above, and we need to set its handler
- */
- hwirq = cxl_find_afu_irq(ctx, 0);
- if (hwirq)
- cxl_map_irq(ctx->afu->adapter, hwirq, cxl_ops->psl_interrupt, ctx, "psl");
- }
-
- if (ctx->status == STARTED) {
- if (cxl_ops->update_ivtes)
- cxl_ops->update_ivtes(ctx);
- else WARN(1, "BUG: cxl_allocate_afu_irqs must be called prior to starting the context on this platform\n");
- }
-
- return res;
-}
-EXPORT_SYMBOL_GPL(cxl_allocate_afu_irqs);
-
-void cxl_free_afu_irqs(struct cxl_context *ctx)
-{
- irq_hw_number_t hwirq;
- unsigned int virq;
-
- if (!cpu_has_feature(CPU_FTR_HVMODE)) {
- hwirq = cxl_find_afu_irq(ctx, 0);
- if (hwirq) {
- virq = irq_find_mapping(NULL, hwirq);
- if (virq)
- cxl_unmap_irq(virq, ctx);
- }
- }
- afu_irq_name_free(ctx);
- cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
-}
-EXPORT_SYMBOL_GPL(cxl_free_afu_irqs);
-
-int cxl_map_afu_irq(struct cxl_context *ctx, int num,
- irq_handler_t handler, void *cookie, char *name)
-{
- irq_hw_number_t hwirq;
-
- /*
- * Find interrupt we are to register.
- */
- hwirq = cxl_find_afu_irq(ctx, num);
- if (!hwirq)
- return -ENOENT;
-
- return cxl_map_irq(ctx->afu->adapter, hwirq, handler, cookie, name);
-}
-EXPORT_SYMBOL_GPL(cxl_map_afu_irq);
-
-void cxl_unmap_afu_irq(struct cxl_context *ctx, int num, void *cookie)
-{
- irq_hw_number_t hwirq;
- unsigned int virq;
-
- hwirq = cxl_find_afu_irq(ctx, num);
- if (!hwirq)
- return;
-
- virq = irq_find_mapping(NULL, hwirq);
- if (virq)
- cxl_unmap_irq(virq, cookie);
-}
-EXPORT_SYMBOL_GPL(cxl_unmap_afu_irq);
-
-/*
- * Start a context
- * Code here similar to afu_ioctl_start_work().
- */
-int cxl_start_context(struct cxl_context *ctx, u64 wed,
- struct task_struct *task)
-{
- int rc = 0;
- bool kernel = true;
-
- pr_devel("%s: pe: %i\n", __func__, ctx->pe);
-
- mutex_lock(&ctx->status_mutex);
- if (ctx->status == STARTED)
- goto out; /* already started */
-
- /*
- * Increment the mapped context count for adapter. This also checks
- * if adapter_context_lock is taken.
- */
- rc = cxl_adapter_context_get(ctx->afu->adapter);
- if (rc)
- goto out;
-
- if (task) {
- ctx->pid = get_task_pid(task, PIDTYPE_PID);
- kernel = false;
-
- /* acquire a reference to the task's mm */
- ctx->mm = get_task_mm(current);
-
- /* ensure this mm_struct can't be freed */
- cxl_context_mm_count_get(ctx);
-
- if (ctx->mm) {
- /* decrement the use count from above */
- mmput(ctx->mm);
- /* make TLBIs for this context global */
- mm_context_add_copro(ctx->mm);
- }
- }
-
- /*
- * Increment driver use count. Enables global TLBIs for hash
- * and callbacks to handle the segment table
- */
- cxl_ctx_get();
-
- /* See the comment in afu_ioctl_start_work() */
- smp_mb();
-
- if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) {
- put_pid(ctx->pid);
- ctx->pid = NULL;
- cxl_adapter_context_put(ctx->afu->adapter);
- cxl_ctx_put();
- if (task) {
- cxl_context_mm_count_put(ctx);
- if (ctx->mm)
- mm_context_remove_copro(ctx->mm);
- }
- goto out;
- }
-
- ctx->status = STARTED;
-out:
- mutex_unlock(&ctx->status_mutex);
- return rc;
-}
-EXPORT_SYMBOL_GPL(cxl_start_context);
-
-int cxl_process_element(struct cxl_context *ctx)
-{
- return ctx->external_pe;
-}
-EXPORT_SYMBOL_GPL(cxl_process_element);
-
-/* Stop a context. Returns 0 on success, otherwise -Errno */
-int cxl_stop_context(struct cxl_context *ctx)
-{
- return __detach_context(ctx);
-}
-EXPORT_SYMBOL_GPL(cxl_stop_context);
-
-void cxl_set_master(struct cxl_context *ctx)
-{
- ctx->master = true;
-}
-EXPORT_SYMBOL_GPL(cxl_set_master);
-
-/* wrappers around afu_* file ops which are EXPORTED */
-int cxl_fd_open(struct inode *inode, struct file *file)
-{
- return afu_open(inode, file);
-}
-EXPORT_SYMBOL_GPL(cxl_fd_open);
-int cxl_fd_release(struct inode *inode, struct file *file)
-{
- return afu_release(inode, file);
-}
-EXPORT_SYMBOL_GPL(cxl_fd_release);
-long cxl_fd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- return afu_ioctl(file, cmd, arg);
-}
-EXPORT_SYMBOL_GPL(cxl_fd_ioctl);
-int cxl_fd_mmap(struct file *file, struct vm_area_struct *vm)
-{
- return afu_mmap(file, vm);
-}
-EXPORT_SYMBOL_GPL(cxl_fd_mmap);
-__poll_t cxl_fd_poll(struct file *file, struct poll_table_struct *poll)
-{
- return afu_poll(file, poll);
-}
-EXPORT_SYMBOL_GPL(cxl_fd_poll);
-ssize_t cxl_fd_read(struct file *file, char __user *buf, size_t count,
- loff_t *off)
-{
- return afu_read(file, buf, count, off);
-}
-EXPORT_SYMBOL_GPL(cxl_fd_read);
-
-#define PATCH_FOPS(NAME) if (!fops->NAME) fops->NAME = afu_fops.NAME
-
-/* Get a struct file and fd for a context and attach the ops */
-struct file *cxl_get_fd(struct cxl_context *ctx, struct file_operations *fops,
- int *fd)
-{
- struct file *file;
- int rc, flags, fdtmp;
- char *name = NULL;
-
- /* only allow one per context */
- if (ctx->mapping)
- return ERR_PTR(-EEXIST);
-
- flags = O_RDWR | O_CLOEXEC;
-
- /* This code is similar to anon_inode_getfd() */
- rc = get_unused_fd_flags(flags);
- if (rc < 0)
- return ERR_PTR(rc);
- fdtmp = rc;
-
- /*
- * Patch the file ops. Needs to be careful that this is rentrant safe.
- */
- if (fops) {
- PATCH_FOPS(open);
- PATCH_FOPS(poll);
- PATCH_FOPS(read);
- PATCH_FOPS(release);
- PATCH_FOPS(unlocked_ioctl);
- PATCH_FOPS(compat_ioctl);
- PATCH_FOPS(mmap);
- } else /* use default ops */
- fops = (struct file_operations *)&afu_fops;
-
- name = kasprintf(GFP_KERNEL, "cxl:%d", ctx->pe);
- file = cxl_getfile(name, fops, ctx, flags);
- kfree(name);
- if (IS_ERR(file))
- goto err_fd;
-
- cxl_context_set_mapping(ctx, file->f_mapping);
- *fd = fdtmp;
- return file;
-
-err_fd:
- put_unused_fd(fdtmp);
- return NULL;
-}
-EXPORT_SYMBOL_GPL(cxl_get_fd);
-
-struct cxl_context *cxl_fops_get_context(struct file *file)
-{
- return file->private_data;
-}
-EXPORT_SYMBOL_GPL(cxl_fops_get_context);
-
-void cxl_set_driver_ops(struct cxl_context *ctx,
- struct cxl_afu_driver_ops *ops)
-{
- WARN_ON(!ops->fetch_event || !ops->event_delivered);
- atomic_set(&ctx->afu_driver_events, 0);
- ctx->afu_driver_ops = ops;
-}
-EXPORT_SYMBOL_GPL(cxl_set_driver_ops);
-
-void cxl_context_events_pending(struct cxl_context *ctx,
- unsigned int new_events)
-{
- atomic_add(new_events, &ctx->afu_driver_events);
- wake_up_all(&ctx->wq);
-}
-EXPORT_SYMBOL_GPL(cxl_context_events_pending);
-
-int cxl_start_work(struct cxl_context *ctx,
- struct cxl_ioctl_start_work *work)
-{
- int rc;
-
- /* code taken from afu_ioctl_start_work */
- if (!(work->flags & CXL_START_WORK_NUM_IRQS))
- work->num_interrupts = ctx->afu->pp_irqs;
- else if ((work->num_interrupts < ctx->afu->pp_irqs) ||
- (work->num_interrupts > ctx->afu->irqs_max)) {
- return -EINVAL;
- }
-
- rc = afu_register_irqs(ctx, work->num_interrupts);
- if (rc)
- return rc;
-
- rc = cxl_start_context(ctx, work->work_element_descriptor, current);
- if (rc < 0) {
- afu_release_irqs(ctx, ctx);
- return rc;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(cxl_start_work);
-
-void __iomem *cxl_psa_map(struct cxl_context *ctx)
-{
- if (ctx->status != STARTED)
- return NULL;
-
- pr_devel("%s: psn_phys%llx size:%llx\n",
- __func__, ctx->psn_phys, ctx->psn_size);
- return ioremap(ctx->psn_phys, ctx->psn_size);
-}
-EXPORT_SYMBOL_GPL(cxl_psa_map);
-
-void cxl_psa_unmap(void __iomem *addr)
-{
- iounmap(addr);
-}
-EXPORT_SYMBOL_GPL(cxl_psa_unmap);
-
-int cxl_afu_reset(struct cxl_context *ctx)
-{
- struct cxl_afu *afu = ctx->afu;
- int rc;
-
- rc = cxl_ops->afu_reset(afu);
- if (rc)
- return rc;
-
- return cxl_ops->afu_check_and_enable(afu);
-}
-EXPORT_SYMBOL_GPL(cxl_afu_reset);
-
-void cxl_perst_reloads_same_image(struct cxl_afu *afu,
- bool perst_reloads_same_image)
-{
- afu->adapter->perst_same_image = perst_reloads_same_image;
-}
-EXPORT_SYMBOL_GPL(cxl_perst_reloads_same_image);
-
-ssize_t cxl_read_adapter_vpd(struct pci_dev *dev, void *buf, size_t count)
-{
- struct cxl_afu *afu = cxl_pci_to_afu(dev);
- if (IS_ERR(afu))
- return -ENODEV;
-
- return cxl_ops->read_adapter_vpd(afu->adapter, buf, count);
-}
-EXPORT_SYMBOL_GPL(cxl_read_adapter_vpd);
diff --git a/drivers/misc/cxl/base.c b/drivers/misc/cxl/base.c
deleted file mode 100644
index b054562c046e..000000000000
--- a/drivers/misc/cxl/base.c
+++ /dev/null
@@ -1,126 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2014 IBM Corp.
- */
-
-#include <linux/module.h>
-#include <linux/rcupdate.h>
-#include <asm/errno.h>
-#include <misc/cxl-base.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include "cxl.h"
-
-/* protected by rcu */
-static struct cxl_calls *cxl_calls;
-
-atomic_t cxl_use_count = ATOMIC_INIT(0);
-EXPORT_SYMBOL(cxl_use_count);
-
-#ifdef CONFIG_CXL_MODULE
-
-static inline struct cxl_calls *cxl_calls_get(void)
-{
- struct cxl_calls *calls = NULL;
-
- rcu_read_lock();
- calls = rcu_dereference(cxl_calls);
- if (calls && !try_module_get(calls->owner))
- calls = NULL;
- rcu_read_unlock();
-
- return calls;
-}
-
-static inline void cxl_calls_put(struct cxl_calls *calls)
-{
- BUG_ON(calls != cxl_calls);
-
- /* we don't need to rcu this, as we hold a reference to the module */
- module_put(cxl_calls->owner);
-}
-
-#else /* !defined CONFIG_CXL_MODULE */
-
-static inline struct cxl_calls *cxl_calls_get(void)
-{
- return cxl_calls;
-}
-
-static inline void cxl_calls_put(struct cxl_calls *calls) { }
-
-#endif /* CONFIG_CXL_MODULE */
-
-/* AFU refcount management */
-struct cxl_afu *cxl_afu_get(struct cxl_afu *afu)
-{
- return (get_device(&afu->dev) == NULL) ? NULL : afu;
-}
-EXPORT_SYMBOL_GPL(cxl_afu_get);
-
-void cxl_afu_put(struct cxl_afu *afu)
-{
- put_device(&afu->dev);
-}
-EXPORT_SYMBOL_GPL(cxl_afu_put);
-
-void cxl_slbia(struct mm_struct *mm)
-{
- struct cxl_calls *calls;
-
- calls = cxl_calls_get();
- if (!calls)
- return;
-
- if (cxl_ctx_in_use())
- calls->cxl_slbia(mm);
-
- cxl_calls_put(calls);
-}
-
-int register_cxl_calls(struct cxl_calls *calls)
-{
- if (cxl_calls)
- return -EBUSY;
-
- rcu_assign_pointer(cxl_calls, calls);
- return 0;
-}
-EXPORT_SYMBOL_GPL(register_cxl_calls);
-
-void unregister_cxl_calls(struct cxl_calls *calls)
-{
- BUG_ON(cxl_calls->owner != calls->owner);
- RCU_INIT_POINTER(cxl_calls, NULL);
- synchronize_rcu();
-}
-EXPORT_SYMBOL_GPL(unregister_cxl_calls);
-
-int cxl_update_properties(struct device_node *dn,
- struct property *new_prop)
-{
- return of_update_property(dn, new_prop);
-}
-EXPORT_SYMBOL_GPL(cxl_update_properties);
-
-static int __init cxl_base_init(void)
-{
- struct device_node *np;
- struct platform_device *dev;
- int count = 0;
-
- /*
- * Scan for compatible devices in guest only
- */
- if (cpu_has_feature(CPU_FTR_HVMODE))
- return 0;
-
- for_each_compatible_node(np, NULL, "ibm,coherent-platform-facility") {
- dev = of_platform_device_create(np, NULL, NULL);
- if (dev)
- count++;
- }
- pr_devel("Found %d cxl device(s)\n", count);
- return 0;
-}
-device_initcall(cxl_base_init);
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
deleted file mode 100644
index 76b5ea66dfa1..000000000000
--- a/drivers/misc/cxl/context.c
+++ /dev/null
@@ -1,362 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2014 IBM Corp.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/bitmap.h>
-#include <linux/sched.h>
-#include <linux/pid.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/debugfs.h>
-#include <linux/slab.h>
-#include <linux/idr.h>
-#include <linux/sched/mm.h>
-#include <linux/mmu_context.h>
-#include <asm/cputable.h>
-#include <asm/current.h>
-#include <asm/copro.h>
-
-#include "cxl.h"
-
-/*
- * Allocates space for a CXL context.
- */
-struct cxl_context *cxl_context_alloc(void)
-{
- return kzalloc(sizeof(struct cxl_context), GFP_KERNEL);
-}
-
-/*
- * Initialises a CXL context.
- */
-int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
-{
- int i;
-
- ctx->afu = afu;
- ctx->master = master;
- ctx->pid = NULL; /* Set in start work ioctl */
- mutex_init(&ctx->mapping_lock);
- ctx->mapping = NULL;
- ctx->tidr = 0;
- ctx->assign_tidr = false;
-
- if (cxl_is_power8()) {
- spin_lock_init(&ctx->sste_lock);
-
- /*
- * Allocate the segment table before we put it in the IDR so that we
- * can always access it when dereferenced from IDR. For the same
- * reason, the segment table is only destroyed after the context is
- * removed from the IDR. Access to this in the IOCTL is protected by
- * Linux filesystem semantics (can't IOCTL until open is complete).
- */
- i = cxl_alloc_sst(ctx);
- if (i)
- return i;
- }
-
- INIT_WORK(&ctx->fault_work, cxl_handle_fault);
-
- init_waitqueue_head(&ctx->wq);
- spin_lock_init(&ctx->lock);
-
- ctx->irq_bitmap = NULL;
- ctx->pending_irq = false;
- ctx->pending_fault = false;
- ctx->pending_afu_err = false;
-
- INIT_LIST_HEAD(&ctx->irq_names);
-
- /*
- * When we have to destroy all contexts in cxl_context_detach_all() we
- * end up with afu_release_irqs() called from inside a
- * idr_for_each_entry(). Hence we need to make sure that anything
- * dereferenced from this IDR is ok before we allocate the IDR here.
- * This clears out the IRQ ranges to ensure this.
- */
- for (i = 0; i < CXL_IRQ_RANGES; i++)
- ctx->irqs.range[i] = 0;
-
- mutex_init(&ctx->status_mutex);
-
- ctx->status = OPENED;
-
- /*
- * Allocating IDR! We better make sure everything's setup that
- * dereferences from it.
- */
- mutex_lock(&afu->contexts_lock);
- idr_preload(GFP_KERNEL);
- i = idr_alloc(&ctx->afu->contexts_idr, ctx, 0,
- ctx->afu->num_procs, GFP_NOWAIT);
- idr_preload_end();
- mutex_unlock(&afu->contexts_lock);
- if (i < 0)
- return i;
-
- ctx->pe = i;
- if (cpu_has_feature(CPU_FTR_HVMODE)) {
- ctx->elem = &ctx->afu->native->spa[i];
- ctx->external_pe = ctx->pe;
- } else {
- ctx->external_pe = -1; /* assigned when attaching */
- }
- ctx->pe_inserted = false;
-
- /*
- * take a ref on the afu so that it stays alive at-least till
- * this context is reclaimed inside reclaim_ctx.
- */
- cxl_afu_get(afu);
- return 0;
-}
-
-void cxl_context_set_mapping(struct cxl_context *ctx,
- struct address_space *mapping)
-{
- mutex_lock(&ctx->mapping_lock);
- ctx->mapping = mapping;
- mutex_unlock(&ctx->mapping_lock);
-}
-
-static vm_fault_t cxl_mmap_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct cxl_context *ctx = vma->vm_file->private_data;
- u64 area, offset;
- vm_fault_t ret;
-
- offset = vmf->pgoff << PAGE_SHIFT;
-
- pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n",
- __func__, ctx->pe, vmf->address, offset);
-
- if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
- area = ctx->afu->psn_phys;
- if (offset >= ctx->afu->adapter->ps_size)
- return VM_FAULT_SIGBUS;
- } else {
- area = ctx->psn_phys;
- if (offset >= ctx->psn_size)
- return VM_FAULT_SIGBUS;
- }
-
- mutex_lock(&ctx->status_mutex);
-
- if (ctx->status != STARTED) {
- mutex_unlock(&ctx->status_mutex);
- pr_devel("%s: Context not started, failing problem state access\n", __func__);
- if (ctx->mmio_err_ff) {
- if (!ctx->ff_page) {
- ctx->ff_page = alloc_page(GFP_USER);
- if (!ctx->ff_page)
- return VM_FAULT_OOM;
- memset(page_address(ctx->ff_page), 0xff, PAGE_SIZE);
- }
- get_page(ctx->ff_page);
- vmf->page = ctx->ff_page;
- vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
- return 0;
- }
- return VM_FAULT_SIGBUS;
- }
-
- ret = vmf_insert_pfn(vma, vmf->address, (area + offset) >> PAGE_SHIFT);
-
- mutex_unlock(&ctx->status_mutex);
-
- return ret;
-}
-
-static const struct vm_operations_struct cxl_mmap_vmops = {
- .fault = cxl_mmap_fault,
-};
-
-/*
- * Map a per-context mmio space into the given vma.
- */
-int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
-{
- u64 start = vma->vm_pgoff << PAGE_SHIFT;
- u64 len = vma->vm_end - vma->vm_start;
-
- if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
- if (start + len > ctx->afu->adapter->ps_size)
- return -EINVAL;
-
- if (cxl_is_power9()) {
- /*
- * Make sure there is a valid problem state
- * area space for this AFU.
- */
- if (ctx->master && !ctx->afu->psa) {
- pr_devel("AFU doesn't support mmio space\n");
- return -EINVAL;
- }
-
- /* Can't mmap until the AFU is enabled */
- if (!ctx->afu->enabled)
- return -EBUSY;
- }
- } else {
- if (start + len > ctx->psn_size)
- return -EINVAL;
-
- /* Make sure there is a valid per process space for this AFU */
- if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) {
- pr_devel("AFU doesn't support mmio space\n");
- return -EINVAL;
- }
-
- /* Can't mmap until the AFU is enabled */
- if (!ctx->afu->enabled)
- return -EBUSY;
- }
-
- pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__,
- ctx->psn_phys, ctx->pe , ctx->master);
-
- vm_flags_set(vma, VM_IO | VM_PFNMAP);
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- vma->vm_ops = &cxl_mmap_vmops;
- return 0;
-}
-
-/*
- * Detach a context from the hardware. This disables interrupts and doesn't
- * return until all outstanding interrupts for this context have completed. The
- * hardware should no longer access *ctx after this has returned.
- */
-int __detach_context(struct cxl_context *ctx)
-{
- enum cxl_context_status status;
-
- mutex_lock(&ctx->status_mutex);
- status = ctx->status;
- ctx->status = CLOSED;
- mutex_unlock(&ctx->status_mutex);
- if (status != STARTED)
- return -EBUSY;
-
- /* Only warn if we detached while the link was OK.
- * If detach fails when hw is down, we don't care.
- */
- WARN_ON(cxl_ops->detach_process(ctx) &&
- cxl_ops->link_ok(ctx->afu->adapter, ctx->afu));
- flush_work(&ctx->fault_work); /* Only needed for dedicated process */
-
- /*
- * Wait until no further interrupts are presented by the PSL
- * for this context.
- */
- if (cxl_ops->irq_wait)
- cxl_ops->irq_wait(ctx);
-
- /* release the reference to the group leader and mm handling pid */
- put_pid(ctx->pid);
-
- cxl_ctx_put();
-
- /* Decrease the attached context count on the adapter */
- cxl_adapter_context_put(ctx->afu->adapter);
-
- /* Decrease the mm count on the context */
- cxl_context_mm_count_put(ctx);
- if (ctx->mm)
- mm_context_remove_copro(ctx->mm);
- ctx->mm = NULL;
-
- return 0;
-}
-
-/*
- * Detach the given context from the AFU. This doesn't actually
- * free the context but it should stop the context running in hardware
- * (ie. prevent this context from generating any further interrupts
- * so that it can be freed).
- */
-void cxl_context_detach(struct cxl_context *ctx)
-{
- int rc;
-
- rc = __detach_context(ctx);
- if (rc)
- return;
-
- afu_release_irqs(ctx, ctx);
- wake_up_all(&ctx->wq);
-}
-
-/*
- * Detach all contexts on the given AFU.
- */
-void cxl_context_detach_all(struct cxl_afu *afu)
-{
- struct cxl_context *ctx;
- int tmp;
-
- mutex_lock(&afu->contexts_lock);
- idr_for_each_entry(&afu->contexts_idr, ctx, tmp) {
- /*
- * Anything done in here needs to be setup before the IDR is
- * created and torn down after the IDR removed
- */
- cxl_context_detach(ctx);
-
- /*
- * We are force detaching - remove any active PSA mappings so
- * userspace cannot interfere with the card if it comes back.
- * Easiest way to exercise this is to unbind and rebind the
- * driver via sysfs while it is in use.
- */
- mutex_lock(&ctx->mapping_lock);
- if (ctx->mapping)
- unmap_mapping_range(ctx->mapping, 0, 0, 1);
- mutex_unlock(&ctx->mapping_lock);
- }
- mutex_unlock(&afu->contexts_lock);
-}
-
-static void reclaim_ctx(struct rcu_head *rcu)
-{
- struct cxl_context *ctx = container_of(rcu, struct cxl_context, rcu);
-
- if (cxl_is_power8())
- free_page((u64)ctx->sstp);
- if (ctx->ff_page)
- __free_page(ctx->ff_page);
- ctx->sstp = NULL;
-
- bitmap_free(ctx->irq_bitmap);
-
- /* Drop ref to the afu device taken during cxl_context_init */
- cxl_afu_put(ctx->afu);
-
- kfree(ctx);
-}
-
-void cxl_context_free(struct cxl_context *ctx)
-{
- if (ctx->kernelapi && ctx->mapping)
- cxl_release_mapping(ctx);
- mutex_lock(&ctx->afu->contexts_lock);
- idr_remove(&ctx->afu->contexts_idr, ctx->pe);
- mutex_unlock(&ctx->afu->contexts_lock);
- call_rcu(&ctx->rcu, reclaim_ctx);
-}
-
-void cxl_context_mm_count_get(struct cxl_context *ctx)
-{
- if (ctx->mm)
- mmgrab(ctx->mm);
-}
-
-void cxl_context_mm_count_put(struct cxl_context *ctx)
-{
- if (ctx->mm)
- mmdrop(ctx->mm);
-}
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
deleted file mode 100644
index 6ad0ab892675..000000000000
--- a/drivers/misc/cxl/cxl.h
+++ /dev/null
@@ -1,1135 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright 2014 IBM Corp.
- */
-
-#ifndef _CXL_H_
-#define _CXL_H_
-
-#include <linux/interrupt.h>
-#include <linux/semaphore.h>
-#include <linux/device.h>
-#include <linux/types.h>
-#include <linux/cdev.h>
-#include <linux/pid.h>
-#include <linux/io.h>
-#include <linux/pci.h>
-#include <linux/fs.h>
-#include <asm/cputable.h>
-#include <asm/mmu.h>
-#include <asm/reg.h>
-#include <misc/cxl-base.h>
-
-#include <misc/cxl.h>
-#include <uapi/misc/cxl.h>
-
-extern uint cxl_verbose;
-
-struct property;
-
-#define CXL_TIMEOUT 5
-
-/*
- * Bump version each time a user API change is made, whether it is
- * backwards compatible ot not.
- */
-#define CXL_API_VERSION 3
-#define CXL_API_VERSION_COMPATIBLE 1
-
-/*
- * Opaque types to avoid accidentally passing registers for the wrong MMIO
- *
- * At the end of the day, I'm not married to using typedef here, but it might
- * (and has!) help avoid bugs like mixing up CXL_PSL_CtxTime and
- * CXL_PSL_CtxTime_An, or calling cxl_p1n_write instead of cxl_p1_write.
- *
- * I'm quite happy if these are changed back to #defines before upstreaming, it
- * should be little more than a regexp search+replace operation in this file.
- */
-typedef struct {
- const int x;
-} cxl_p1_reg_t;
-typedef struct {
- const int x;
-} cxl_p1n_reg_t;
-typedef struct {
- const int x;
-} cxl_p2n_reg_t;
-#define cxl_reg_off(reg) \
- (reg.x)
-
-/* Memory maps. Ref CXL Appendix A */
-
-/* PSL Privilege 1 Memory Map */
-/* Configuration and Control area - CAIA 1&2 */
-static const cxl_p1_reg_t CXL_PSL_CtxTime = {0x0000};
-static const cxl_p1_reg_t CXL_PSL_ErrIVTE = {0x0008};
-static const cxl_p1_reg_t CXL_PSL_KEY1 = {0x0010};
-static const cxl_p1_reg_t CXL_PSL_KEY2 = {0x0018};
-static const cxl_p1_reg_t CXL_PSL_Control = {0x0020};
-/* Downloading */
-static const cxl_p1_reg_t CXL_PSL_DLCNTL = {0x0060};
-static const cxl_p1_reg_t CXL_PSL_DLADDR = {0x0068};
-
-/* PSL Lookaside Buffer Management Area - CAIA 1 */
-static const cxl_p1_reg_t CXL_PSL_LBISEL = {0x0080};
-static const cxl_p1_reg_t CXL_PSL_SLBIE = {0x0088};
-static const cxl_p1_reg_t CXL_PSL_SLBIA = {0x0090};
-static const cxl_p1_reg_t CXL_PSL_TLBIE = {0x00A0};
-static const cxl_p1_reg_t CXL_PSL_TLBIA = {0x00A8};
-static const cxl_p1_reg_t CXL_PSL_AFUSEL = {0x00B0};
-
-/* 0x00C0:7EFF Implementation dependent area */
-/* PSL registers - CAIA 1 */
-static const cxl_p1_reg_t CXL_PSL_FIR1 = {0x0100};
-static const cxl_p1_reg_t CXL_PSL_FIR2 = {0x0108};
-static const cxl_p1_reg_t CXL_PSL_Timebase = {0x0110};
-static const cxl_p1_reg_t CXL_PSL_VERSION = {0x0118};
-static const cxl_p1_reg_t CXL_PSL_RESLCKTO = {0x0128};
-static const cxl_p1_reg_t CXL_PSL_TB_CTLSTAT = {0x0140};
-static const cxl_p1_reg_t CXL_PSL_FIR_CNTL = {0x0148};
-static const cxl_p1_reg_t CXL_PSL_DSNDCTL = {0x0150};
-static const cxl_p1_reg_t CXL_PSL_SNWRALLOC = {0x0158};
-static const cxl_p1_reg_t CXL_PSL_TRACE = {0x0170};
-/* PSL registers - CAIA 2 */
-static const cxl_p1_reg_t CXL_PSL9_CONTROL = {0x0020};
-static const cxl_p1_reg_t CXL_XSL9_INV = {0x0110};
-static const cxl_p1_reg_t CXL_XSL9_DBG = {0x0130};
-static const cxl_p1_reg_t CXL_XSL9_DEF = {0x0140};
-static const cxl_p1_reg_t CXL_XSL9_DSNCTL = {0x0168};
-static const cxl_p1_reg_t CXL_PSL9_FIR1 = {0x0300};
-static const cxl_p1_reg_t CXL_PSL9_FIR_MASK = {0x0308};
-static const cxl_p1_reg_t CXL_PSL9_Timebase = {0x0310};
-static const cxl_p1_reg_t CXL_PSL9_DEBUG = {0x0320};
-static const cxl_p1_reg_t CXL_PSL9_FIR_CNTL = {0x0348};
-static const cxl_p1_reg_t CXL_PSL9_DSNDCTL = {0x0350};
-static const cxl_p1_reg_t CXL_PSL9_TB_CTLSTAT = {0x0340};
-static const cxl_p1_reg_t CXL_PSL9_TRACECFG = {0x0368};
-static const cxl_p1_reg_t CXL_PSL9_APCDEDALLOC = {0x0378};
-static const cxl_p1_reg_t CXL_PSL9_APCDEDTYPE = {0x0380};
-static const cxl_p1_reg_t CXL_PSL9_TNR_ADDR = {0x0388};
-static const cxl_p1_reg_t CXL_PSL9_CTCCFG = {0x0390};
-static const cxl_p1_reg_t CXL_PSL9_GP_CT = {0x0398};
-static const cxl_p1_reg_t CXL_XSL9_IERAT = {0x0588};
-static const cxl_p1_reg_t CXL_XSL9_ILPP = {0x0590};
-
-/* 0x7F00:7FFF Reserved PCIe MSI-X Pending Bit Array area */
-/* 0x8000:FFFF Reserved PCIe MSI-X Table Area */
-
-/* PSL Slice Privilege 1 Memory Map */
-/* Configuration Area - CAIA 1&2 */
-static const cxl_p1n_reg_t CXL_PSL_SR_An = {0x00};
-static const cxl_p1n_reg_t CXL_PSL_LPID_An = {0x08};
-static const cxl_p1n_reg_t CXL_PSL_AMBAR_An = {0x10};
-static const cxl_p1n_reg_t CXL_PSL_SPOffset_An = {0x18};
-static const cxl_p1n_reg_t CXL_PSL_ID_An = {0x20};
-static const cxl_p1n_reg_t CXL_PSL_SERR_An = {0x28};
-/* Memory Management and Lookaside Buffer Management - CAIA 1*/
-static const cxl_p1n_reg_t CXL_PSL_SDR_An = {0x30};
-/* Memory Management and Lookaside Buffer Management - CAIA 1&2 */
-static const cxl_p1n_reg_t CXL_PSL_AMOR_An = {0x38};
-/* Pointer Area - CAIA 1&2 */
-static const cxl_p1n_reg_t CXL_HAURP_An = {0x80};
-static const cxl_p1n_reg_t CXL_PSL_SPAP_An = {0x88};
-static const cxl_p1n_reg_t CXL_PSL_LLCMD_An = {0x90};
-/* Control Area - CAIA 1&2 */
-static const cxl_p1n_reg_t CXL_PSL_SCNTL_An = {0xA0};
-static const cxl_p1n_reg_t CXL_PSL_CtxTime_An = {0xA8};
-static const cxl_p1n_reg_t CXL_PSL_IVTE_Offset_An = {0xB0};
-static const cxl_p1n_reg_t CXL_PSL_IVTE_Limit_An = {0xB8};
-/* 0xC0:FF Implementation Dependent Area - CAIA 1&2 */
-static const cxl_p1n_reg_t CXL_PSL_FIR_SLICE_An = {0xC0};
-static const cxl_p1n_reg_t CXL_AFU_DEBUG_An = {0xC8};
-/* 0xC0:FF Implementation Dependent Area - CAIA 1 */
-static const cxl_p1n_reg_t CXL_PSL_APCALLOC_A = {0xD0};
-static const cxl_p1n_reg_t CXL_PSL_COALLOC_A = {0xD8};
-static const cxl_p1n_reg_t CXL_PSL_RXCTL_A = {0xE0};
-static const cxl_p1n_reg_t CXL_PSL_SLICE_TRACE = {0xE8};
-
-/* PSL Slice Privilege 2 Memory Map */
-/* Configuration and Control Area - CAIA 1&2 */
-static const cxl_p2n_reg_t CXL_PSL_PID_TID_An = {0x000};
-static const cxl_p2n_reg_t CXL_CSRP_An = {0x008};
-/* Configuration and Control Area - CAIA 1 */
-static const cxl_p2n_reg_t CXL_AURP0_An = {0x010};
-static const cxl_p2n_reg_t CXL_AURP1_An = {0x018};
-static const cxl_p2n_reg_t CXL_SSTP0_An = {0x020};
-static const cxl_p2n_reg_t CXL_SSTP1_An = {0x028};
-/* Configuration and Control Area - CAIA 1 */
-static const cxl_p2n_reg_t CXL_PSL_AMR_An = {0x030};
-/* Segment Lookaside Buffer Management - CAIA 1 */
-static const cxl_p2n_reg_t CXL_SLBIE_An = {0x040};
-static const cxl_p2n_reg_t CXL_SLBIA_An = {0x048};
-static const cxl_p2n_reg_t CXL_SLBI_Select_An = {0x050};
-/* Interrupt Registers - CAIA 1&2 */
-static const cxl_p2n_reg_t CXL_PSL_DSISR_An = {0x060};
-static const cxl_p2n_reg_t CXL_PSL_DAR_An = {0x068};
-static const cxl_p2n_reg_t CXL_PSL_DSR_An = {0x070};
-static const cxl_p2n_reg_t CXL_PSL_TFC_An = {0x078};
-static const cxl_p2n_reg_t CXL_PSL_PEHandle_An = {0x080};
-static const cxl_p2n_reg_t CXL_PSL_ErrStat_An = {0x088};
-/* AFU Registers - CAIA 1&2 */
-static const cxl_p2n_reg_t CXL_AFU_Cntl_An = {0x090};
-static const cxl_p2n_reg_t CXL_AFU_ERR_An = {0x098};
-/* Work Element Descriptor - CAIA 1&2 */
-static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0};
-/* 0x0C0:FFF Implementation Dependent Area */
-
-#define CXL_PSL_SPAP_Addr 0x0ffffffffffff000ULL
-#define CXL_PSL_SPAP_Size 0x0000000000000ff0ULL
-#define CXL_PSL_SPAP_Size_Shift 4
-#define CXL_PSL_SPAP_V 0x0000000000000001ULL
-
-/****** CXL_PSL_Control ****************************************************/
-#define CXL_PSL_Control_tb (0x1ull << (63-63))
-#define CXL_PSL_Control_Fr (0x1ull << (63-31))
-#define CXL_PSL_Control_Fs_MASK (0x3ull << (63-29))
-#define CXL_PSL_Control_Fs_Complete (0x3ull << (63-29))
-
-/****** CXL_PSL_DLCNTL *****************************************************/
-#define CXL_PSL_DLCNTL_D (0x1ull << (63-28))
-#define CXL_PSL_DLCNTL_C (0x1ull << (63-29))
-#define CXL_PSL_DLCNTL_E (0x1ull << (63-30))
-#define CXL_PSL_DLCNTL_S (0x1ull << (63-31))
-#define CXL_PSL_DLCNTL_CE (CXL_PSL_DLCNTL_C | CXL_PSL_DLCNTL_E)
-#define CXL_PSL_DLCNTL_DCES (CXL_PSL_DLCNTL_D | CXL_PSL_DLCNTL_CE | CXL_PSL_DLCNTL_S)
-
-/****** CXL_PSL_SR_An ******************************************************/
-#define CXL_PSL_SR_An_SF MSR_SF /* 64bit */
-#define CXL_PSL_SR_An_TA (1ull << (63-1)) /* Tags active, GA1: 0 */
-#define CXL_PSL_SR_An_HV MSR_HV /* Hypervisor, GA1: 0 */
-#define CXL_PSL_SR_An_XLAT_hpt (0ull << (63-6))/* Hashed page table (HPT) mode */
-#define CXL_PSL_SR_An_XLAT_roh (2ull << (63-6))/* Radix on HPT mode */
-#define CXL_PSL_SR_An_XLAT_ror (3ull << (63-6))/* Radix on Radix mode */
-#define CXL_PSL_SR_An_BOT (1ull << (63-10)) /* Use the in-memory segment table */
-#define CXL_PSL_SR_An_PR MSR_PR /* Problem state, GA1: 1 */
-#define CXL_PSL_SR_An_ISL (1ull << (63-53)) /* Ignore Segment Large Page */
-#define CXL_PSL_SR_An_TC (1ull << (63-54)) /* Page Table secondary hash */
-#define CXL_PSL_SR_An_US (1ull << (63-56)) /* User state, GA1: X */
-#define CXL_PSL_SR_An_SC (1ull << (63-58)) /* Segment Table secondary hash */
-#define CXL_PSL_SR_An_R MSR_DR /* Relocate, GA1: 1 */
-#define CXL_PSL_SR_An_MP (1ull << (63-62)) /* Master Process */
-#define CXL_PSL_SR_An_LE (1ull << (63-63)) /* Little Endian */
-
-/****** CXL_PSL_ID_An ****************************************************/
-#define CXL_PSL_ID_An_F (1ull << (63-31))
-#define CXL_PSL_ID_An_L (1ull << (63-30))
-
-/****** CXL_PSL_SERR_An ****************************************************/
-#define CXL_PSL_SERR_An_afuto (1ull << (63-0))
-#define CXL_PSL_SERR_An_afudis (1ull << (63-1))
-#define CXL_PSL_SERR_An_afuov (1ull << (63-2))
-#define CXL_PSL_SERR_An_badsrc (1ull << (63-3))
-#define CXL_PSL_SERR_An_badctx (1ull << (63-4))
-#define CXL_PSL_SERR_An_llcmdis (1ull << (63-5))
-#define CXL_PSL_SERR_An_llcmdto (1ull << (63-6))
-#define CXL_PSL_SERR_An_afupar (1ull << (63-7))
-#define CXL_PSL_SERR_An_afudup (1ull << (63-8))
-#define CXL_PSL_SERR_An_IRQS ( \
- CXL_PSL_SERR_An_afuto | CXL_PSL_SERR_An_afudis | CXL_PSL_SERR_An_afuov | \
- CXL_PSL_SERR_An_badsrc | CXL_PSL_SERR_An_badctx | CXL_PSL_SERR_An_llcmdis | \
- CXL_PSL_SERR_An_llcmdto | CXL_PSL_SERR_An_afupar | CXL_PSL_SERR_An_afudup)
-#define CXL_PSL_SERR_An_afuto_mask (1ull << (63-32))
-#define CXL_PSL_SERR_An_afudis_mask (1ull << (63-33))
-#define CXL_PSL_SERR_An_afuov_mask (1ull << (63-34))
-#define CXL_PSL_SERR_An_badsrc_mask (1ull << (63-35))
-#define CXL_PSL_SERR_An_badctx_mask (1ull << (63-36))
-#define CXL_PSL_SERR_An_llcmdis_mask (1ull << (63-37))
-#define CXL_PSL_SERR_An_llcmdto_mask (1ull << (63-38))
-#define CXL_PSL_SERR_An_afupar_mask (1ull << (63-39))
-#define CXL_PSL_SERR_An_afudup_mask (1ull << (63-40))
-#define CXL_PSL_SERR_An_IRQ_MASKS ( \
- CXL_PSL_SERR_An_afuto_mask | CXL_PSL_SERR_An_afudis_mask | CXL_PSL_SERR_An_afuov_mask | \
- CXL_PSL_SERR_An_badsrc_mask | CXL_PSL_SERR_An_badctx_mask | CXL_PSL_SERR_An_llcmdis_mask | \
- CXL_PSL_SERR_An_llcmdto_mask | CXL_PSL_SERR_An_afupar_mask | CXL_PSL_SERR_An_afudup_mask)
-
-#define CXL_PSL_SERR_An_AE (1ull << (63-30))
-
-/****** CXL_PSL_SCNTL_An ****************************************************/
-#define CXL_PSL_SCNTL_An_CR (0x1ull << (63-15))
-/* Programming Modes: */
-#define CXL_PSL_SCNTL_An_PM_MASK (0xffffull << (63-31))
-#define CXL_PSL_SCNTL_An_PM_Shared (0x0000ull << (63-31))
-#define CXL_PSL_SCNTL_An_PM_OS (0x0001ull << (63-31))
-#define CXL_PSL_SCNTL_An_PM_Process (0x0002ull << (63-31))
-#define CXL_PSL_SCNTL_An_PM_AFU (0x0004ull << (63-31))
-#define CXL_PSL_SCNTL_An_PM_AFU_PBT (0x0104ull << (63-31))
-/* Purge Status (ro) */
-#define CXL_PSL_SCNTL_An_Ps_MASK (0x3ull << (63-39))
-#define CXL_PSL_SCNTL_An_Ps_Pending (0x1ull << (63-39))
-#define CXL_PSL_SCNTL_An_Ps_Complete (0x3ull << (63-39))
-/* Purge */
-#define CXL_PSL_SCNTL_An_Pc (0x1ull << (63-48))
-/* Suspend Status (ro) */
-#define CXL_PSL_SCNTL_An_Ss_MASK (0x3ull << (63-55))
-#define CXL_PSL_SCNTL_An_Ss_Pending (0x1ull << (63-55))
-#define CXL_PSL_SCNTL_An_Ss_Complete (0x3ull << (63-55))
-/* Suspend Control */
-#define CXL_PSL_SCNTL_An_Sc (0x1ull << (63-63))
-
-/* AFU Slice Enable Status (ro) */
-#define CXL_AFU_Cntl_An_ES_MASK (0x7ull << (63-2))
-#define CXL_AFU_Cntl_An_ES_Disabled (0x0ull << (63-2))
-#define CXL_AFU_Cntl_An_ES_Enabled (0x4ull << (63-2))
-/* AFU Slice Enable */
-#define CXL_AFU_Cntl_An_E (0x1ull << (63-3))
-/* AFU Slice Reset status (ro) */
-#define CXL_AFU_Cntl_An_RS_MASK (0x3ull << (63-5))
-#define CXL_AFU_Cntl_An_RS_Pending (0x1ull << (63-5))
-#define CXL_AFU_Cntl_An_RS_Complete (0x2ull << (63-5))
-/* AFU Slice Reset */
-#define CXL_AFU_Cntl_An_RA (0x1ull << (63-7))
-
-/****** CXL_SSTP0/1_An ******************************************************/
-/* These top bits are for the segment that CONTAINS the segment table */
-#define CXL_SSTP0_An_B_SHIFT SLB_VSID_SSIZE_SHIFT
-#define CXL_SSTP0_An_KS (1ull << (63-2))
-#define CXL_SSTP0_An_KP (1ull << (63-3))
-#define CXL_SSTP0_An_N (1ull << (63-4))
-#define CXL_SSTP0_An_L (1ull << (63-5))
-#define CXL_SSTP0_An_C (1ull << (63-6))
-#define CXL_SSTP0_An_TA (1ull << (63-7))
-#define CXL_SSTP0_An_LP_SHIFT (63-9) /* 2 Bits */
-/* And finally, the virtual address & size of the segment table: */
-#define CXL_SSTP0_An_SegTableSize_SHIFT (63-31) /* 12 Bits */
-#define CXL_SSTP0_An_SegTableSize_MASK \
- (((1ull << 12) - 1) << CXL_SSTP0_An_SegTableSize_SHIFT)
-#define CXL_SSTP0_An_STVA_U_MASK ((1ull << (63-49))-1)
-#define CXL_SSTP1_An_STVA_L_MASK (~((1ull << (63-55))-1))
-#define CXL_SSTP1_An_V (1ull << (63-63))
-
-/****** CXL_PSL_SLBIE_[An] - CAIA 1 **************************************************/
-/* write: */
-#define CXL_SLBIE_C PPC_BIT(36) /* Class */
-#define CXL_SLBIE_SS PPC_BITMASK(37, 38) /* Segment Size */
-#define CXL_SLBIE_SS_SHIFT PPC_BITLSHIFT(38)
-#define CXL_SLBIE_TA PPC_BIT(38) /* Tags Active */
-/* read: */
-#define CXL_SLBIE_MAX PPC_BITMASK(24, 31)
-#define CXL_SLBIE_PENDING PPC_BITMASK(56, 63)
-
-/****** Common to all CXL_TLBIA/SLBIA_[An] - CAIA 1 **********************************/
-#define CXL_TLB_SLB_P (1ull) /* Pending (read) */
-
-/****** Common to all CXL_TLB/SLB_IA/IE_[An] registers - CAIA 1 **********************/
-#define CXL_TLB_SLB_IQ_ALL (0ull) /* Inv qualifier */
-#define CXL_TLB_SLB_IQ_LPID (1ull) /* Inv qualifier */
-#define CXL_TLB_SLB_IQ_LPIDPID (3ull) /* Inv qualifier */
-
-/****** CXL_PSL_AFUSEL ******************************************************/
-#define CXL_PSL_AFUSEL_A (1ull << (63-55)) /* Adapter wide invalidates affect all AFUs */
-
-/****** CXL_PSL_DSISR_An - CAIA 1 ****************************************************/
-#define CXL_PSL_DSISR_An_DS (1ull << (63-0)) /* Segment not found */
-#define CXL_PSL_DSISR_An_DM (1ull << (63-1)) /* PTE not found (See also: M) or protection fault */
-#define CXL_PSL_DSISR_An_ST (1ull << (63-2)) /* Segment Table PTE not found */
-#define CXL_PSL_DSISR_An_UR (1ull << (63-3)) /* AURP PTE not found */
-#define CXL_PSL_DSISR_TRANS (CXL_PSL_DSISR_An_DS | CXL_PSL_DSISR_An_DM | CXL_PSL_DSISR_An_ST | CXL_PSL_DSISR_An_UR)
-#define CXL_PSL_DSISR_An_PE (1ull << (63-4)) /* PSL Error (implementation specific) */
-#define CXL_PSL_DSISR_An_AE (1ull << (63-5)) /* AFU Error */
-#define CXL_PSL_DSISR_An_OC (1ull << (63-6)) /* OS Context Warning */
-#define CXL_PSL_DSISR_PENDING (CXL_PSL_DSISR_TRANS | CXL_PSL_DSISR_An_PE | CXL_PSL_DSISR_An_AE | CXL_PSL_DSISR_An_OC)
-/* NOTE: Bits 32:63 are undefined if DSISR[DS] = 1 */
-#define CXL_PSL_DSISR_An_M DSISR_NOHPTE /* PTE not found */
-#define CXL_PSL_DSISR_An_P DSISR_PROTFAULT /* Storage protection violation */
-#define CXL_PSL_DSISR_An_A (1ull << (63-37)) /* AFU lock access to write through or cache inhibited storage */
-#define CXL_PSL_DSISR_An_S DSISR_ISSTORE /* Access was afu_wr or afu_zero */
-#define CXL_PSL_DSISR_An_K DSISR_KEYFAULT /* Access not permitted by virtual page class key protection */
-
-/****** CXL_PSL_DSISR_An - CAIA 2 ****************************************************/
-#define CXL_PSL9_DSISR_An_TF (1ull << (63-3)) /* Translation fault */
-#define CXL_PSL9_DSISR_An_PE (1ull << (63-4)) /* PSL Error (implementation specific) */
-#define CXL_PSL9_DSISR_An_AE (1ull << (63-5)) /* AFU Error */
-#define CXL_PSL9_DSISR_An_OC (1ull << (63-6)) /* OS Context Warning */
-#define CXL_PSL9_DSISR_An_S (1ull << (63-38)) /* TF for a write operation */
-#define CXL_PSL9_DSISR_PENDING (CXL_PSL9_DSISR_An_TF | CXL_PSL9_DSISR_An_PE | CXL_PSL9_DSISR_An_AE | CXL_PSL9_DSISR_An_OC)
-/*
- * NOTE: Bits 56:63 (Checkout Response Status) are valid when DSISR_An[TF] = 1
- * Status (0:7) Encoding
- */
-#define CXL_PSL9_DSISR_An_CO_MASK 0x00000000000000ffULL
-#define CXL_PSL9_DSISR_An_SF 0x0000000000000080ULL /* Segment Fault 0b10000000 */
-#define CXL_PSL9_DSISR_An_PF_SLR 0x0000000000000088ULL /* PTE not found (Single Level Radix) 0b10001000 */
-#define CXL_PSL9_DSISR_An_PF_RGC 0x000000000000008CULL /* PTE not found (Radix Guest (child)) 0b10001100 */
-#define CXL_PSL9_DSISR_An_PF_RGP 0x0000000000000090ULL /* PTE not found (Radix Guest (parent)) 0b10010000 */
-#define CXL_PSL9_DSISR_An_PF_HRH 0x0000000000000094ULL /* PTE not found (HPT/Radix Host) 0b10010100 */
-#define CXL_PSL9_DSISR_An_PF_STEG 0x000000000000009CULL /* PTE not found (STEG VA) 0b10011100 */
-#define CXL_PSL9_DSISR_An_URTCH 0x00000000000000B4ULL /* Unsupported Radix Tree Configuration 0b10110100 */
-
-/****** CXL_PSL_TFC_An ******************************************************/
-#define CXL_PSL_TFC_An_A (1ull << (63-28)) /* Acknowledge non-translation fault */
-#define CXL_PSL_TFC_An_C (1ull << (63-29)) /* Continue (abort transaction) */
-#define CXL_PSL_TFC_An_AE (1ull << (63-30)) /* Restart PSL with address error */
-#define CXL_PSL_TFC_An_R (1ull << (63-31)) /* Restart PSL transaction */
-
-/****** CXL_PSL_DEBUG *****************************************************/
-#define CXL_PSL_DEBUG_CDC (1ull << (63-27)) /* Coherent Data cache support */
-
-/****** CXL_XSL9_IERAT_ERAT - CAIA 2 **********************************/
-#define CXL_XSL9_IERAT_MLPID (1ull << (63-0)) /* Match LPID */
-#define CXL_XSL9_IERAT_MPID (1ull << (63-1)) /* Match PID */
-#define CXL_XSL9_IERAT_PRS (1ull << (63-4)) /* PRS bit for Radix invalidations */
-#define CXL_XSL9_IERAT_INVR (1ull << (63-3)) /* Invalidate Radix */
-#define CXL_XSL9_IERAT_IALL (1ull << (63-8)) /* Invalidate All */
-#define CXL_XSL9_IERAT_IINPROG (1ull << (63-63)) /* Invalidate in progress */
-
-/* cxl_process_element->software_status */
-#define CXL_PE_SOFTWARE_STATE_V (1ul << (31 - 0)) /* Valid */
-#define CXL_PE_SOFTWARE_STATE_C (1ul << (31 - 29)) /* Complete */
-#define CXL_PE_SOFTWARE_STATE_S (1ul << (31 - 30)) /* Suspend */
-#define CXL_PE_SOFTWARE_STATE_T (1ul << (31 - 31)) /* Terminate */
-
-/****** CXL_PSL_RXCTL_An (Implementation Specific) **************************
- * Controls AFU Hang Pulse, which sets the timeout for the AFU to respond to
- * the PSL for any response (except MMIO). Timeouts will occur between 1x to 2x
- * of the hang pulse frequency.
- */
-#define CXL_PSL_RXCTL_AFUHP_4S 0x7000000000000000ULL
-
-/* SPA->sw_command_status */
-#define CXL_SPA_SW_CMD_MASK 0xffff000000000000ULL
-#define CXL_SPA_SW_CMD_TERMINATE 0x0001000000000000ULL
-#define CXL_SPA_SW_CMD_REMOVE 0x0002000000000000ULL
-#define CXL_SPA_SW_CMD_SUSPEND 0x0003000000000000ULL
-#define CXL_SPA_SW_CMD_RESUME 0x0004000000000000ULL
-#define CXL_SPA_SW_CMD_ADD 0x0005000000000000ULL
-#define CXL_SPA_SW_CMD_UPDATE 0x0006000000000000ULL
-#define CXL_SPA_SW_STATE_MASK 0x0000ffff00000000ULL
-#define CXL_SPA_SW_STATE_TERMINATED 0x0000000100000000ULL
-#define CXL_SPA_SW_STATE_REMOVED 0x0000000200000000ULL
-#define CXL_SPA_SW_STATE_SUSPENDED 0x0000000300000000ULL
-#define CXL_SPA_SW_STATE_RESUMED 0x0000000400000000ULL
-#define CXL_SPA_SW_STATE_ADDED 0x0000000500000000ULL
-#define CXL_SPA_SW_STATE_UPDATED 0x0000000600000000ULL
-#define CXL_SPA_SW_PSL_ID_MASK 0x00000000ffff0000ULL
-#define CXL_SPA_SW_LINK_MASK 0x000000000000ffffULL
-
-#define CXL_MAX_SLICES 4
-#define MAX_AFU_MMIO_REGS 3
-
-#define CXL_MODE_TIME_SLICED 0x4
-#define CXL_SUPPORTED_MODES (CXL_MODE_DEDICATED | CXL_MODE_DIRECTED)
-
-#define CXL_DEV_MINORS 13 /* 1 control + 4 AFUs * 3 (dedicated/master/shared) */
-#define CXL_CARD_MINOR(adapter) (adapter->adapter_num * CXL_DEV_MINORS)
-#define CXL_DEVT_ADAPTER(dev) (MINOR(dev) / CXL_DEV_MINORS)
-
-#define CXL_PSL9_TRACEID_MAX 0xAU
-#define CXL_PSL9_TRACESTATE_FIN 0x3U
-
-enum cxl_context_status {
- CLOSED,
- OPENED,
- STARTED
-};
-
-enum prefault_modes {
- CXL_PREFAULT_NONE,
- CXL_PREFAULT_WED,
- CXL_PREFAULT_ALL,
-};
-
-enum cxl_attrs {
- CXL_ADAPTER_ATTRS,
- CXL_AFU_MASTER_ATTRS,
- CXL_AFU_ATTRS,
-};
-
-struct cxl_sste {
- __be64 esid_data;
- __be64 vsid_data;
-};
-
-#define to_cxl_adapter(d) container_of(d, struct cxl, dev)
-#define to_cxl_afu(d) container_of(d, struct cxl_afu, dev)
-
-struct cxl_afu_native {
- void __iomem *p1n_mmio;
- void __iomem *afu_desc_mmio;
- irq_hw_number_t psl_hwirq;
- unsigned int psl_virq;
- struct mutex spa_mutex;
- /*
- * Only the first part of the SPA is used for the process element
- * linked list. The only other part that software needs to worry about
- * is sw_command_status, which we store a separate pointer to.
- * Everything else in the SPA is only used by hardware
- */
- struct cxl_process_element *spa;
- __be64 *sw_command_status;
- unsigned int spa_size;
- int spa_order;
- int spa_max_procs;
- u64 pp_offset;
-};
-
-struct cxl_afu_guest {
- struct cxl_afu *parent;
- u64 handle;
- phys_addr_t p2n_phys;
- u64 p2n_size;
- int max_ints;
- bool handle_err;
- struct delayed_work work_err;
- int previous_state;
-};
-
-struct cxl_afu {
- struct cxl_afu_native *native;
- struct cxl_afu_guest *guest;
- irq_hw_number_t serr_hwirq;
- unsigned int serr_virq;
- char *psl_irq_name;
- char *err_irq_name;
- void __iomem *p2n_mmio;
- phys_addr_t psn_phys;
- u64 pp_size;
-
- struct cxl *adapter;
- struct device dev;
- struct cdev afu_cdev_s, afu_cdev_m, afu_cdev_d;
- struct device *chardev_s, *chardev_m, *chardev_d;
- struct idr contexts_idr;
- struct dentry *debugfs;
- struct mutex contexts_lock;
- spinlock_t afu_cntl_lock;
-
- /* -1: AFU deconfigured/locked, >= 0: number of readers */
- atomic_t configured_state;
-
- /* AFU error buffer fields and bin attribute for sysfs */
- u64 eb_len, eb_offset;
- struct bin_attribute attr_eb;
-
- /* pointer to the vphb */
- struct pci_controller *phb;
-
- int pp_irqs;
- int irqs_max;
- int num_procs;
- int max_procs_virtualised;
- int slice;
- int modes_supported;
- int current_mode;
- int crs_num;
- u64 crs_len;
- u64 crs_offset;
- struct list_head crs;
- enum prefault_modes prefault_mode;
- bool psa;
- bool pp_psa;
- bool enabled;
-};
-
-
-struct cxl_irq_name {
- struct list_head list;
- char *name;
-};
-
-struct irq_avail {
- irq_hw_number_t offset;
- irq_hw_number_t range;
- unsigned long *bitmap;
-};
-
-/*
- * This is a cxl context. If the PSL is in dedicated mode, there will be one
- * of these per AFU. If in AFU directed there can be lots of these.
- */
-struct cxl_context {
- struct cxl_afu *afu;
-
- /* Problem state MMIO */
- phys_addr_t psn_phys;
- u64 psn_size;
-
- /* Used to unmap any mmaps when force detaching */
- struct address_space *mapping;
- struct mutex mapping_lock;
- struct page *ff_page;
- bool mmio_err_ff;
- bool kernelapi;
-
- spinlock_t sste_lock; /* Protects segment table entries */
- struct cxl_sste *sstp;
- u64 sstp0, sstp1;
- unsigned int sst_size, sst_lru;
-
- wait_queue_head_t wq;
- /* use mm context associated with this pid for ds faults */
- struct pid *pid;
- spinlock_t lock; /* Protects pending_irq_mask, pending_fault and fault_addr */
- /* Only used in PR mode */
- u64 process_token;
-
- /* driver private data */
- void *priv;
-
- unsigned long *irq_bitmap; /* Accessed from IRQ context */
- struct cxl_irq_ranges irqs;
- struct list_head irq_names;
- u64 fault_addr;
- u64 fault_dsisr;
- u64 afu_err;
-
- /*
- * This status and it's lock pretects start and detach context
- * from racing. It also prevents detach from racing with
- * itself
- */
- enum cxl_context_status status;
- struct mutex status_mutex;
-
-
- /* XXX: Is it possible to need multiple work items at once? */
- struct work_struct fault_work;
- u64 dsisr;
- u64 dar;
-
- struct cxl_process_element *elem;
-
- /*
- * pe is the process element handle, assigned by this driver when the
- * context is initialized.
- *
- * external_pe is the PE shown outside of cxl.
- * On bare-metal, pe=external_pe, because we decide what the handle is.
- * In a guest, we only find out about the pe used by pHyp when the
- * context is attached, and that's the value we want to report outside
- * of cxl.
- */
- int pe;
- int external_pe;
-
- u32 irq_count;
- bool pe_inserted;
- bool master;
- bool kernel;
- bool pending_irq;
- bool pending_fault;
- bool pending_afu_err;
-
- /* Used by AFU drivers for driver specific event delivery */
- struct cxl_afu_driver_ops *afu_driver_ops;
- atomic_t afu_driver_events;
-
- struct rcu_head rcu;
-
- struct mm_struct *mm;
-
- u16 tidr;
- bool assign_tidr;
-};
-
-struct cxl_irq_info;
-
-struct cxl_service_layer_ops {
- int (*adapter_regs_init)(struct cxl *adapter, struct pci_dev *dev);
- int (*invalidate_all)(struct cxl *adapter);
- int (*afu_regs_init)(struct cxl_afu *afu);
- int (*sanitise_afu_regs)(struct cxl_afu *afu);
- int (*register_serr_irq)(struct cxl_afu *afu);
- void (*release_serr_irq)(struct cxl_afu *afu);
- irqreturn_t (*handle_interrupt)(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info);
- irqreturn_t (*fail_irq)(struct cxl_afu *afu, struct cxl_irq_info *irq_info);
- int (*activate_dedicated_process)(struct cxl_afu *afu);
- int (*attach_afu_directed)(struct cxl_context *ctx, u64 wed, u64 amr);
- int (*attach_dedicated_process)(struct cxl_context *ctx, u64 wed, u64 amr);
- void (*update_dedicated_ivtes)(struct cxl_context *ctx);
- void (*debugfs_add_adapter_regs)(struct cxl *adapter, struct dentry *dir);
- void (*debugfs_add_afu_regs)(struct cxl_afu *afu, struct dentry *dir);
- void (*psl_irq_dump_registers)(struct cxl_context *ctx);
- void (*err_irq_dump_registers)(struct cxl *adapter);
- void (*debugfs_stop_trace)(struct cxl *adapter);
- void (*write_timebase_ctrl)(struct cxl *adapter);
- u64 (*timebase_read)(struct cxl *adapter);
- int capi_mode;
- bool needs_reset_before_disable;
-};
-
-struct cxl_native {
- u64 afu_desc_off;
- u64 afu_desc_size;
- void __iomem *p1_mmio;
- void __iomem *p2_mmio;
- irq_hw_number_t err_hwirq;
- unsigned int err_virq;
- u64 ps_off;
- bool no_data_cache; /* set if no data cache on the card */
- const struct cxl_service_layer_ops *sl_ops;
-};
-
-struct cxl_guest {
- struct platform_device *pdev;
- int irq_nranges;
- struct cdev cdev;
- irq_hw_number_t irq_base_offset;
- struct irq_avail *irq_avail;
- spinlock_t irq_alloc_lock;
- u64 handle;
- char *status;
- u16 vendor;
- u16 device;
- u16 subsystem_vendor;
- u16 subsystem;
-};
-
-struct cxl {
- struct cxl_native *native;
- struct cxl_guest *guest;
- spinlock_t afu_list_lock;
- struct cxl_afu *afu[CXL_MAX_SLICES];
- struct device dev;
- struct dentry *trace;
- struct dentry *psl_err_chk;
- struct dentry *debugfs;
- char *irq_name;
- struct bin_attribute cxl_attr;
- int adapter_num;
- int user_irqs;
- u64 ps_size;
- u16 psl_rev;
- u16 base_image;
- u8 vsec_status;
- u8 caia_major;
- u8 caia_minor;
- u8 slices;
- bool user_image_loaded;
- bool perst_loads_image;
- bool perst_select_user;
- bool perst_same_image;
- bool psl_timebase_synced;
- bool tunneled_ops_supported;
-
- /*
- * number of contexts mapped on to this card. Possible values are:
- * >0: Number of contexts mapped and new one can be mapped.
- * 0: No active contexts and new ones can be mapped.
- * -1: No contexts mapped and new ones cannot be mapped.
- */
- atomic_t contexts_num;
-};
-
-int cxl_pci_alloc_one_irq(struct cxl *adapter);
-void cxl_pci_release_one_irq(struct cxl *adapter, int hwirq);
-int cxl_pci_alloc_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter, unsigned int num);
-void cxl_pci_release_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter);
-int cxl_pci_setup_irq(struct cxl *adapter, unsigned int hwirq, unsigned int virq);
-int cxl_update_image_control(struct cxl *adapter);
-int cxl_pci_reset(struct cxl *adapter);
-void cxl_pci_release_afu(struct device *dev);
-ssize_t cxl_pci_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len);
-
-/* common == phyp + powernv - CAIA 1&2 */
-struct cxl_process_element_common {
- __be32 tid;
- __be32 pid;
- __be64 csrp;
- union {
- struct {
- __be64 aurp0;
- __be64 aurp1;
- __be64 sstp0;
- __be64 sstp1;
- } psl8; /* CAIA 1 */
- struct {
- u8 reserved2[8];
- u8 reserved3[8];
- u8 reserved4[8];
- u8 reserved5[8];
- } psl9; /* CAIA 2 */
- } u;
- __be64 amr;
- u8 reserved6[4];
- __be64 wed;
-} __packed;
-
-/* just powernv - CAIA 1&2 */
-struct cxl_process_element {
- __be64 sr;
- __be64 SPOffset;
- union {
- __be64 sdr; /* CAIA 1 */
- u8 reserved1[8]; /* CAIA 2 */
- } u;
- __be64 haurp;
- __be32 ctxtime;
- __be16 ivte_offsets[4];
- __be16 ivte_ranges[4];
- __be32 lpid;
- struct cxl_process_element_common common;
- __be32 software_state;
-} __packed;
-
-static inline bool cxl_adapter_link_ok(struct cxl *cxl, struct cxl_afu *afu)
-{
- struct pci_dev *pdev;
-
- if (cpu_has_feature(CPU_FTR_HVMODE)) {
- pdev = to_pci_dev(cxl->dev.parent);
- return !pci_channel_offline(pdev);
- }
- return true;
-}
-
-static inline void __iomem *_cxl_p1_addr(struct cxl *cxl, cxl_p1_reg_t reg)
-{
- WARN_ON(!cpu_has_feature(CPU_FTR_HVMODE));
- return cxl->native->p1_mmio + cxl_reg_off(reg);
-}
-
-static inline void cxl_p1_write(struct cxl *cxl, cxl_p1_reg_t reg, u64 val)
-{
- if (likely(cxl_adapter_link_ok(cxl, NULL)))
- out_be64(_cxl_p1_addr(cxl, reg), val);
-}
-
-static inline u64 cxl_p1_read(struct cxl *cxl, cxl_p1_reg_t reg)
-{
- if (likely(cxl_adapter_link_ok(cxl, NULL)))
- return in_be64(_cxl_p1_addr(cxl, reg));
- else
- return ~0ULL;
-}
-
-static inline void __iomem *_cxl_p1n_addr(struct cxl_afu *afu, cxl_p1n_reg_t reg)
-{
- WARN_ON(!cpu_has_feature(CPU_FTR_HVMODE));
- return afu->native->p1n_mmio + cxl_reg_off(reg);
-}
-
-static inline void cxl_p1n_write(struct cxl_afu *afu, cxl_p1n_reg_t reg, u64 val)
-{
- if (likely(cxl_adapter_link_ok(afu->adapter, afu)))
- out_be64(_cxl_p1n_addr(afu, reg), val);
-}
-
-static inline u64 cxl_p1n_read(struct cxl_afu *afu, cxl_p1n_reg_t reg)
-{
- if (likely(cxl_adapter_link_ok(afu->adapter, afu)))
- return in_be64(_cxl_p1n_addr(afu, reg));
- else
- return ~0ULL;
-}
-
-static inline void __iomem *_cxl_p2n_addr(struct cxl_afu *afu, cxl_p2n_reg_t reg)
-{
- return afu->p2n_mmio + cxl_reg_off(reg);
-}
-
-static inline void cxl_p2n_write(struct cxl_afu *afu, cxl_p2n_reg_t reg, u64 val)
-{
- if (likely(cxl_adapter_link_ok(afu->adapter, afu)))
- out_be64(_cxl_p2n_addr(afu, reg), val);
-}
-
-static inline u64 cxl_p2n_read(struct cxl_afu *afu, cxl_p2n_reg_t reg)
-{
- if (likely(cxl_adapter_link_ok(afu->adapter, afu)))
- return in_be64(_cxl_p2n_addr(afu, reg));
- else
- return ~0ULL;
-}
-
-static inline bool cxl_is_power8(void)
-{
- if ((pvr_version_is(PVR_POWER8E)) ||
- (pvr_version_is(PVR_POWER8NVL)) ||
- (pvr_version_is(PVR_POWER8)) ||
- (pvr_version_is(PVR_HX_C2000)))
- return true;
- return false;
-}
-
-static inline bool cxl_is_power9(void)
-{
- if (pvr_version_is(PVR_POWER9))
- return true;
- return false;
-}
-
-ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
- loff_t off, size_t count);
-
-
-struct cxl_calls {
- void (*cxl_slbia)(struct mm_struct *mm);
- struct module *owner;
-};
-int register_cxl_calls(struct cxl_calls *calls);
-void unregister_cxl_calls(struct cxl_calls *calls);
-int cxl_update_properties(struct device_node *dn, struct property *new_prop);
-
-void cxl_remove_adapter_nr(struct cxl *adapter);
-
-void cxl_release_spa(struct cxl_afu *afu);
-
-dev_t cxl_get_dev(void);
-int cxl_file_init(void);
-void cxl_file_exit(void);
-int cxl_register_adapter(struct cxl *adapter);
-int cxl_register_afu(struct cxl_afu *afu);
-int cxl_chardev_d_afu_add(struct cxl_afu *afu);
-int cxl_chardev_m_afu_add(struct cxl_afu *afu);
-int cxl_chardev_s_afu_add(struct cxl_afu *afu);
-void cxl_chardev_afu_remove(struct cxl_afu *afu);
-
-void cxl_context_detach_all(struct cxl_afu *afu);
-void cxl_context_free(struct cxl_context *ctx);
-void cxl_context_detach(struct cxl_context *ctx);
-
-int cxl_sysfs_adapter_add(struct cxl *adapter);
-void cxl_sysfs_adapter_remove(struct cxl *adapter);
-int cxl_sysfs_afu_add(struct cxl_afu *afu);
-void cxl_sysfs_afu_remove(struct cxl_afu *afu);
-int cxl_sysfs_afu_m_add(struct cxl_afu *afu);
-void cxl_sysfs_afu_m_remove(struct cxl_afu *afu);
-
-struct cxl *cxl_alloc_adapter(void);
-struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice);
-int cxl_afu_select_best_mode(struct cxl_afu *afu);
-
-int cxl_native_register_psl_irq(struct cxl_afu *afu);
-void cxl_native_release_psl_irq(struct cxl_afu *afu);
-int cxl_native_register_psl_err_irq(struct cxl *adapter);
-void cxl_native_release_psl_err_irq(struct cxl *adapter);
-int cxl_native_register_serr_irq(struct cxl_afu *afu);
-void cxl_native_release_serr_irq(struct cxl_afu *afu);
-int afu_register_irqs(struct cxl_context *ctx, u32 count);
-void afu_release_irqs(struct cxl_context *ctx, void *cookie);
-void afu_irq_name_free(struct cxl_context *ctx);
-
-int cxl_attach_afu_directed_psl9(struct cxl_context *ctx, u64 wed, u64 amr);
-int cxl_attach_afu_directed_psl8(struct cxl_context *ctx, u64 wed, u64 amr);
-int cxl_activate_dedicated_process_psl9(struct cxl_afu *afu);
-int cxl_activate_dedicated_process_psl8(struct cxl_afu *afu);
-int cxl_attach_dedicated_process_psl9(struct cxl_context *ctx, u64 wed, u64 amr);
-int cxl_attach_dedicated_process_psl8(struct cxl_context *ctx, u64 wed, u64 amr);
-void cxl_update_dedicated_ivtes_psl9(struct cxl_context *ctx);
-void cxl_update_dedicated_ivtes_psl8(struct cxl_context *ctx);
-
-#ifdef CONFIG_DEBUG_FS
-
-void cxl_debugfs_init(void);
-void cxl_debugfs_exit(void);
-void cxl_debugfs_adapter_add(struct cxl *adapter);
-void cxl_debugfs_adapter_remove(struct cxl *adapter);
-void cxl_debugfs_afu_add(struct cxl_afu *afu);
-void cxl_debugfs_afu_remove(struct cxl_afu *afu);
-void cxl_debugfs_add_adapter_regs_psl9(struct cxl *adapter, struct dentry *dir);
-void cxl_debugfs_add_adapter_regs_psl8(struct cxl *adapter, struct dentry *dir);
-void cxl_debugfs_add_afu_regs_psl9(struct cxl_afu *afu, struct dentry *dir);
-void cxl_debugfs_add_afu_regs_psl8(struct cxl_afu *afu, struct dentry *dir);
-
-#else /* CONFIG_DEBUG_FS */
-
-static inline void __init cxl_debugfs_init(void)
-{
-}
-
-static inline void cxl_debugfs_exit(void)
-{
-}
-
-static inline void cxl_debugfs_adapter_add(struct cxl *adapter)
-{
-}
-
-static inline void cxl_debugfs_adapter_remove(struct cxl *adapter)
-{
-}
-
-static inline void cxl_debugfs_afu_add(struct cxl_afu *afu)
-{
-}
-
-static inline void cxl_debugfs_afu_remove(struct cxl_afu *afu)
-{
-}
-
-static inline void cxl_debugfs_add_adapter_regs_psl9(struct cxl *adapter,
- struct dentry *dir)
-{
-}
-
-static inline void cxl_debugfs_add_adapter_regs_psl8(struct cxl *adapter,
- struct dentry *dir)
-{
-}
-
-static inline void cxl_debugfs_add_afu_regs_psl9(struct cxl_afu *afu, struct dentry *dir)
-{
-}
-
-static inline void cxl_debugfs_add_afu_regs_psl8(struct cxl_afu *afu, struct dentry *dir)
-{
-}
-
-#endif /* CONFIG_DEBUG_FS */
-
-void cxl_handle_fault(struct work_struct *work);
-void cxl_prefault(struct cxl_context *ctx, u64 wed);
-int cxl_handle_mm_fault(struct mm_struct *mm, u64 dsisr, u64 dar);
-
-struct cxl *get_cxl_adapter(int num);
-int cxl_alloc_sst(struct cxl_context *ctx);
-void cxl_dump_debug_buffer(void *addr, size_t size);
-
-void init_cxl_native(void);
-
-struct cxl_context *cxl_context_alloc(void);
-int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master);
-void cxl_context_set_mapping(struct cxl_context *ctx,
- struct address_space *mapping);
-void cxl_context_free(struct cxl_context *ctx);
-int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma);
-unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
- irq_handler_t handler, void *cookie, const char *name);
-void cxl_unmap_irq(unsigned int virq, void *cookie);
-int __detach_context(struct cxl_context *ctx);
-
-/*
- * This must match the layout of the H_COLLECT_CA_INT_INFO retbuf defined
- * in PAPR.
- * Field pid_tid is now 'reserved' because it's no more used on bare-metal.
- * On a guest environment, PSL_PID_An is located on the upper 32 bits and
- * PSL_TID_An register in the lower 32 bits.
- */
-struct cxl_irq_info {
- u64 dsisr;
- u64 dar;
- u64 dsr;
- u64 reserved;
- u64 afu_err;
- u64 errstat;
- u64 proc_handle;
- u64 padding[2]; /* to match the expected retbuf size for plpar_hcall9 */
-};
-
-void cxl_assign_psn_space(struct cxl_context *ctx);
-int cxl_invalidate_all_psl9(struct cxl *adapter);
-int cxl_invalidate_all_psl8(struct cxl *adapter);
-irqreturn_t cxl_irq_psl9(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info);
-irqreturn_t cxl_irq_psl8(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info);
-irqreturn_t cxl_fail_irq_psl(struct cxl_afu *afu, struct cxl_irq_info *irq_info);
-int cxl_register_one_irq(struct cxl *adapter, irq_handler_t handler,
- void *cookie, irq_hw_number_t *dest_hwirq,
- unsigned int *dest_virq, const char *name);
-
-int cxl_check_error(struct cxl_afu *afu);
-int cxl_afu_slbia(struct cxl_afu *afu);
-int cxl_data_cache_flush(struct cxl *adapter);
-int cxl_afu_disable(struct cxl_afu *afu);
-int cxl_psl_purge(struct cxl_afu *afu);
-int cxl_calc_capp_routing(struct pci_dev *dev, u64 *chipid,
- u32 *phb_index, u64 *capp_unit_id);
-int cxl_slot_is_switched(struct pci_dev *dev);
-int cxl_get_xsl9_dsnctl(struct pci_dev *dev, u64 capp_unit_id, u64 *reg);
-u64 cxl_calculate_sr(bool master, bool kernel, bool real_mode, bool p9);
-
-void cxl_native_irq_dump_regs_psl9(struct cxl_context *ctx);
-void cxl_native_irq_dump_regs_psl8(struct cxl_context *ctx);
-void cxl_native_err_irq_dump_regs_psl8(struct cxl *adapter);
-void cxl_native_err_irq_dump_regs_psl9(struct cxl *adapter);
-int cxl_pci_vphb_add(struct cxl_afu *afu);
-void cxl_pci_vphb_remove(struct cxl_afu *afu);
-void cxl_release_mapping(struct cxl_context *ctx);
-
-extern struct pci_driver cxl_pci_driver;
-extern struct platform_driver cxl_of_driver;
-int afu_allocate_irqs(struct cxl_context *ctx, u32 count);
-
-int afu_open(struct inode *inode, struct file *file);
-int afu_release(struct inode *inode, struct file *file);
-long afu_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-int afu_mmap(struct file *file, struct vm_area_struct *vm);
-__poll_t afu_poll(struct file *file, struct poll_table_struct *poll);
-ssize_t afu_read(struct file *file, char __user *buf, size_t count, loff_t *off);
-extern const struct file_operations afu_fops;
-
-struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_device *dev);
-void cxl_guest_remove_adapter(struct cxl *adapter);
-int cxl_of_read_adapter_handle(struct cxl *adapter, struct device_node *np);
-int cxl_of_read_adapter_properties(struct cxl *adapter, struct device_node *np);
-ssize_t cxl_guest_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len);
-ssize_t cxl_guest_read_afu_vpd(struct cxl_afu *afu, void *buf, size_t len);
-int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_np);
-void cxl_guest_remove_afu(struct cxl_afu *afu);
-int cxl_of_read_afu_handle(struct cxl_afu *afu, struct device_node *afu_np);
-int cxl_of_read_afu_properties(struct cxl_afu *afu, struct device_node *afu_np);
-int cxl_guest_add_chardev(struct cxl *adapter);
-void cxl_guest_remove_chardev(struct cxl *adapter);
-void cxl_guest_reload_module(struct cxl *adapter);
-int cxl_of_probe(struct platform_device *pdev);
-
-struct cxl_backend_ops {
- struct module *module;
- int (*adapter_reset)(struct cxl *adapter);
- int (*alloc_one_irq)(struct cxl *adapter);
- void (*release_one_irq)(struct cxl *adapter, int hwirq);
- int (*alloc_irq_ranges)(struct cxl_irq_ranges *irqs,
- struct cxl *adapter, unsigned int num);
- void (*release_irq_ranges)(struct cxl_irq_ranges *irqs,
- struct cxl *adapter);
- int (*setup_irq)(struct cxl *adapter, unsigned int hwirq,
- unsigned int virq);
- irqreturn_t (*handle_psl_slice_error)(struct cxl_context *ctx,
- u64 dsisr, u64 errstat);
- irqreturn_t (*psl_interrupt)(int irq, void *data);
- int (*ack_irq)(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask);
- void (*irq_wait)(struct cxl_context *ctx);
- int (*attach_process)(struct cxl_context *ctx, bool kernel,
- u64 wed, u64 amr);
- int (*detach_process)(struct cxl_context *ctx);
- void (*update_ivtes)(struct cxl_context *ctx);
- bool (*support_attributes)(const char *attr_name, enum cxl_attrs type);
- bool (*link_ok)(struct cxl *cxl, struct cxl_afu *afu);
- void (*release_afu)(struct device *dev);
- ssize_t (*afu_read_err_buffer)(struct cxl_afu *afu, char *buf,
- loff_t off, size_t count);
- int (*afu_check_and_enable)(struct cxl_afu *afu);
- int (*afu_activate_mode)(struct cxl_afu *afu, int mode);
- int (*afu_deactivate_mode)(struct cxl_afu *afu, int mode);
- int (*afu_reset)(struct cxl_afu *afu);
- int (*afu_cr_read8)(struct cxl_afu *afu, int cr_idx, u64 offset, u8 *val);
- int (*afu_cr_read16)(struct cxl_afu *afu, int cr_idx, u64 offset, u16 *val);
- int (*afu_cr_read32)(struct cxl_afu *afu, int cr_idx, u64 offset, u32 *val);
- int (*afu_cr_read64)(struct cxl_afu *afu, int cr_idx, u64 offset, u64 *val);
- int (*afu_cr_write8)(struct cxl_afu *afu, int cr_idx, u64 offset, u8 val);
- int (*afu_cr_write16)(struct cxl_afu *afu, int cr_idx, u64 offset, u16 val);
- int (*afu_cr_write32)(struct cxl_afu *afu, int cr_idx, u64 offset, u32 val);
- ssize_t (*read_adapter_vpd)(struct cxl *adapter, void *buf, size_t count);
-};
-extern const struct cxl_backend_ops cxl_native_ops;
-extern const struct cxl_backend_ops cxl_guest_ops;
-extern const struct cxl_backend_ops *cxl_ops;
-
-/* check if the given pci_dev is on the cxl vphb bus */
-bool cxl_pci_is_vphb_device(struct pci_dev *dev);
-
-/* decode AFU error bits in the PSL register PSL_SERR_An */
-void cxl_afu_decode_psl_serr(struct cxl_afu *afu, u64 serr);
-
-/*
- * Increments the number of attached contexts on an adapter.
- * In case an adapter_context_lock is taken the return -EBUSY.
- */
-int cxl_adapter_context_get(struct cxl *adapter);
-
-/* Decrements the number of attached contexts on an adapter */
-void cxl_adapter_context_put(struct cxl *adapter);
-
-/* If no active contexts then prevents contexts from being attached */
-int cxl_adapter_context_lock(struct cxl *adapter);
-
-/* Unlock the contexts-lock if taken. Warn and force unlock otherwise */
-void cxl_adapter_context_unlock(struct cxl *adapter);
-
-/* Increases the reference count to "struct mm_struct" */
-void cxl_context_mm_count_get(struct cxl_context *ctx);
-
-/* Decrements the reference count to "struct mm_struct" */
-void cxl_context_mm_count_put(struct cxl_context *ctx);
-
-#endif
diff --git a/drivers/misc/cxl/cxllib.c b/drivers/misc/cxl/cxllib.c
deleted file mode 100644
index e5fe0a171472..000000000000
--- a/drivers/misc/cxl/cxllib.c
+++ /dev/null
@@ -1,271 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2017 IBM Corp.
- */
-
-#include <linux/hugetlb.h>
-#include <linux/sched/mm.h>
-#include <asm/opal-api.h>
-#include <asm/pnv-pci.h>
-#include <misc/cxllib.h>
-
-#include "cxl.h"
-
-#define CXL_INVALID_DRA ~0ull
-#define CXL_DUMMY_READ_SIZE 128
-#define CXL_DUMMY_READ_ALIGN 8
-#define CXL_CAPI_WINDOW_START 0x2000000000000ull
-#define CXL_CAPI_WINDOW_LOG_SIZE 48
-#define CXL_XSL_CONFIG_CURRENT_VERSION CXL_XSL_CONFIG_VERSION1
-
-
-bool cxllib_slot_is_supported(struct pci_dev *dev, unsigned long flags)
-{
- int rc;
- u32 phb_index;
- u64 chip_id, capp_unit_id;
-
- /* No flags currently supported */
- if (flags)
- return false;
-
- if (!cpu_has_feature(CPU_FTR_HVMODE))
- return false;
-
- if (!cxl_is_power9())
- return false;
-
- if (cxl_slot_is_switched(dev))
- return false;
-
- /* on p9, some pci slots are not connected to a CAPP unit */
- rc = cxl_calc_capp_routing(dev, &chip_id, &phb_index, &capp_unit_id);
- if (rc)
- return false;
-
- return true;
-}
-EXPORT_SYMBOL_GPL(cxllib_slot_is_supported);
-
-static DEFINE_MUTEX(dra_mutex);
-static u64 dummy_read_addr = CXL_INVALID_DRA;
-
-static int allocate_dummy_read_buf(void)
-{
- u64 buf, vaddr;
- size_t buf_size;
-
- /*
- * Dummy read buffer is 128-byte long, aligned on a
- * 256-byte boundary and we need the physical address.
- */
- buf_size = CXL_DUMMY_READ_SIZE + (1ull << CXL_DUMMY_READ_ALIGN);
- buf = (u64) kzalloc(buf_size, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- vaddr = (buf + (1ull << CXL_DUMMY_READ_ALIGN) - 1) &
- (~0ull << CXL_DUMMY_READ_ALIGN);
-
- WARN((vaddr + CXL_DUMMY_READ_SIZE) > (buf + buf_size),
- "Dummy read buffer alignment issue");
- dummy_read_addr = virt_to_phys((void *) vaddr);
- return 0;
-}
-
-int cxllib_get_xsl_config(struct pci_dev *dev, struct cxllib_xsl_config *cfg)
-{
- int rc;
- u32 phb_index;
- u64 chip_id, capp_unit_id;
-
- if (!cpu_has_feature(CPU_FTR_HVMODE))
- return -EINVAL;
-
- mutex_lock(&dra_mutex);
- if (dummy_read_addr == CXL_INVALID_DRA) {
- rc = allocate_dummy_read_buf();
- if (rc) {
- mutex_unlock(&dra_mutex);
- return rc;
- }
- }
- mutex_unlock(&dra_mutex);
-
- rc = cxl_calc_capp_routing(dev, &chip_id, &phb_index, &capp_unit_id);
- if (rc)
- return rc;
-
- rc = cxl_get_xsl9_dsnctl(dev, capp_unit_id, &cfg->dsnctl);
- if (rc)
- return rc;
-
- cfg->version = CXL_XSL_CONFIG_CURRENT_VERSION;
- cfg->log_bar_size = CXL_CAPI_WINDOW_LOG_SIZE;
- cfg->bar_addr = CXL_CAPI_WINDOW_START;
- cfg->dra = dummy_read_addr;
- return 0;
-}
-EXPORT_SYMBOL_GPL(cxllib_get_xsl_config);
-
-int cxllib_switch_phb_mode(struct pci_dev *dev, enum cxllib_mode mode,
- unsigned long flags)
-{
- int rc = 0;
-
- if (!cpu_has_feature(CPU_FTR_HVMODE))
- return -EINVAL;
-
- switch (mode) {
- case CXL_MODE_PCI:
- /*
- * We currently don't support going back to PCI mode
- * However, we'll turn the invalidations off, so that
- * the firmware doesn't have to ack them and can do
- * things like reset, etc.. with no worries.
- * So always return EPERM (can't go back to PCI) or
- * EBUSY if we couldn't even turn off snooping
- */
- rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_OFF);
- if (rc)
- rc = -EBUSY;
- else
- rc = -EPERM;
- break;
- case CXL_MODE_CXL:
- /* DMA only supported on TVT1 for the time being */
- if (flags != CXL_MODE_DMA_TVT1)
- return -EINVAL;
- rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_DMA_TVT1);
- if (rc)
- return rc;
- rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_ON);
- break;
- default:
- rc = -EINVAL;
- }
- return rc;
-}
-EXPORT_SYMBOL_GPL(cxllib_switch_phb_mode);
-
-/*
- * When switching the PHB to capi mode, the TVT#1 entry for
- * the Partitionable Endpoint is set in bypass mode, like
- * in PCI mode.
- * Configure the device dma to use TVT#1, which is done
- * by calling dma_set_mask() with a mask large enough.
- */
-int cxllib_set_device_dma(struct pci_dev *dev, unsigned long flags)
-{
- int rc;
-
- if (flags)
- return -EINVAL;
-
- rc = dma_set_mask(&dev->dev, DMA_BIT_MASK(64));
- return rc;
-}
-EXPORT_SYMBOL_GPL(cxllib_set_device_dma);
-
-int cxllib_get_PE_attributes(struct task_struct *task,
- unsigned long translation_mode,
- struct cxllib_pe_attributes *attr)
-{
- if (translation_mode != CXL_TRANSLATED_MODE &&
- translation_mode != CXL_REAL_MODE)
- return -EINVAL;
-
- attr->sr = cxl_calculate_sr(false,
- task == NULL,
- translation_mode == CXL_REAL_MODE,
- true);
- attr->lpid = mfspr(SPRN_LPID);
- if (task) {
- struct mm_struct *mm = get_task_mm(task);
- if (mm == NULL)
- return -EINVAL;
- /*
- * Caller is keeping a reference on mm_users for as long
- * as XSL uses the memory context
- */
- attr->pid = mm->context.id;
- mmput(mm);
- attr->tid = task->thread.tidr;
- } else {
- attr->pid = 0;
- attr->tid = 0;
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(cxllib_get_PE_attributes);
-
-static int get_vma_info(struct mm_struct *mm, u64 addr,
- u64 *vma_start, u64 *vma_end,
- unsigned long *page_size)
-{
- struct vm_area_struct *vma = NULL;
- int rc = 0;
-
- mmap_read_lock(mm);
-
- vma = find_vma(mm, addr);
- if (!vma) {
- rc = -EFAULT;
- goto out;
- }
- *page_size = vma_kernel_pagesize(vma);
- *vma_start = vma->vm_start;
- *vma_end = vma->vm_end;
-out:
- mmap_read_unlock(mm);
- return rc;
-}
-
-int cxllib_handle_fault(struct mm_struct *mm, u64 addr, u64 size, u64 flags)
-{
- int rc;
- u64 dar, vma_start, vma_end;
- unsigned long page_size;
-
- if (mm == NULL)
- return -EFAULT;
-
- /*
- * The buffer we have to process can extend over several pages
- * and may also cover several VMAs.
- * We iterate over all the pages. The page size could vary
- * between VMAs.
- */
- rc = get_vma_info(mm, addr, &vma_start, &vma_end, &page_size);
- if (rc)
- return rc;
-
- for (dar = (addr & ~(page_size - 1)); dar < (addr + size);
- dar += page_size) {
- if (dar < vma_start || dar >= vma_end) {
- /*
- * We don't hold mm->mmap_lock while iterating, since
- * the lock is required by one of the lower-level page
- * fault processing functions and it could
- * create a deadlock.
- *
- * It means the VMAs can be altered between 2
- * loop iterations and we could theoretically
- * miss a page (however unlikely). But that's
- * not really a problem, as the driver will
- * retry access, get another page fault on the
- * missing page and call us again.
- */
- rc = get_vma_info(mm, dar, &vma_start, &vma_end,
- &page_size);
- if (rc)
- return rc;
- }
-
- rc = cxl_handle_mm_fault(mm, flags, dar);
- if (rc)
- return -EFAULT;
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(cxllib_handle_fault);
diff --git a/drivers/misc/cxl/debugfs.c b/drivers/misc/cxl/debugfs.c
deleted file mode 100644
index 7b987bf498b5..000000000000
--- a/drivers/misc/cxl/debugfs.c
+++ /dev/null
@@ -1,134 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2014 IBM Corp.
- */
-
-#include <linux/debugfs.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-
-#include "cxl.h"
-
-static struct dentry *cxl_debugfs;
-
-/* Helpers to export CXL mmaped IO registers via debugfs */
-static int debugfs_io_u64_get(void *data, u64 *val)
-{
- *val = in_be64((u64 __iomem *)data);
- return 0;
-}
-
-static int debugfs_io_u64_set(void *data, u64 val)
-{
- out_be64((u64 __iomem *)data, val);
- return 0;
-}
-DEFINE_DEBUGFS_ATTRIBUTE(fops_io_x64, debugfs_io_u64_get, debugfs_io_u64_set,
- "0x%016llx\n");
-
-static void debugfs_create_io_x64(const char *name, umode_t mode,
- struct dentry *parent, u64 __iomem *value)
-{
- debugfs_create_file_unsafe(name, mode, parent, (void __force *)value,
- &fops_io_x64);
-}
-
-void cxl_debugfs_add_adapter_regs_psl9(struct cxl *adapter, struct dentry *dir)
-{
- debugfs_create_io_x64("fir1", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL9_FIR1));
- debugfs_create_io_x64("fir_mask", 0400, dir,
- _cxl_p1_addr(adapter, CXL_PSL9_FIR_MASK));
- debugfs_create_io_x64("fir_cntl", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL9_FIR_CNTL));
- debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1_addr(adapter, CXL_PSL9_TRACECFG));
- debugfs_create_io_x64("debug", 0600, dir,
- _cxl_p1_addr(adapter, CXL_PSL9_DEBUG));
- debugfs_create_io_x64("xsl-debug", 0600, dir,
- _cxl_p1_addr(adapter, CXL_XSL9_DBG));
-}
-
-void cxl_debugfs_add_adapter_regs_psl8(struct cxl *adapter, struct dentry *dir)
-{
- debugfs_create_io_x64("fir1", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR1));
- debugfs_create_io_x64("fir2", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR2));
- debugfs_create_io_x64("fir_cntl", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR_CNTL));
- debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_TRACE));
-}
-
-void cxl_debugfs_adapter_add(struct cxl *adapter)
-{
- struct dentry *dir;
- char buf[32];
-
- if (!cxl_debugfs)
- return;
-
- snprintf(buf, 32, "card%i", adapter->adapter_num);
- dir = debugfs_create_dir(buf, cxl_debugfs);
- adapter->debugfs = dir;
-
- debugfs_create_io_x64("err_ivte", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_ErrIVTE));
-
- if (adapter->native->sl_ops->debugfs_add_adapter_regs)
- adapter->native->sl_ops->debugfs_add_adapter_regs(adapter, dir);
-}
-
-void cxl_debugfs_adapter_remove(struct cxl *adapter)
-{
- debugfs_remove_recursive(adapter->debugfs);
-}
-
-void cxl_debugfs_add_afu_regs_psl9(struct cxl_afu *afu, struct dentry *dir)
-{
- debugfs_create_io_x64("serr", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SERR_An));
-}
-
-void cxl_debugfs_add_afu_regs_psl8(struct cxl_afu *afu, struct dentry *dir)
-{
- debugfs_create_io_x64("sstp0", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_SSTP0_An));
- debugfs_create_io_x64("sstp1", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_SSTP1_An));
-
- debugfs_create_io_x64("fir", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_FIR_SLICE_An));
- debugfs_create_io_x64("serr", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SERR_An));
- debugfs_create_io_x64("afu_debug", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_AFU_DEBUG_An));
- debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SLICE_TRACE));
-}
-
-void cxl_debugfs_afu_add(struct cxl_afu *afu)
-{
- struct dentry *dir;
- char buf[32];
-
- if (!afu->adapter->debugfs)
- return;
-
- snprintf(buf, 32, "psl%i.%i", afu->adapter->adapter_num, afu->slice);
- dir = debugfs_create_dir(buf, afu->adapter->debugfs);
- afu->debugfs = dir;
-
- debugfs_create_io_x64("sr", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SR_An));
- debugfs_create_io_x64("dsisr", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_PSL_DSISR_An));
- debugfs_create_io_x64("dar", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_PSL_DAR_An));
-
- debugfs_create_io_x64("err_status", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_PSL_ErrStat_An));
-
- if (afu->adapter->native->sl_ops->debugfs_add_afu_regs)
- afu->adapter->native->sl_ops->debugfs_add_afu_regs(afu, dir);
-}
-
-void cxl_debugfs_afu_remove(struct cxl_afu *afu)
-{
- debugfs_remove_recursive(afu->debugfs);
-}
-
-void __init cxl_debugfs_init(void)
-{
- if (!cpu_has_feature(CPU_FTR_HVMODE))
- return;
-
- cxl_debugfs = debugfs_create_dir("cxl", NULL);
-}
-
-void cxl_debugfs_exit(void)
-{
- debugfs_remove_recursive(cxl_debugfs);
-}
diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c
deleted file mode 100644
index 2c64f55cf01f..000000000000
--- a/drivers/misc/cxl/fault.c
+++ /dev/null
@@ -1,341 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2014 IBM Corp.
- */
-
-#include <linux/workqueue.h>
-#include <linux/sched/signal.h>
-#include <linux/sched/mm.h>
-#include <linux/pid.h>
-#include <linux/mm.h>
-#include <linux/moduleparam.h>
-
-#undef MODULE_PARAM_PREFIX
-#define MODULE_PARAM_PREFIX "cxl" "."
-#include <asm/current.h>
-#include <asm/copro.h>
-#include <asm/mmu.h>
-
-#include "cxl.h"
-#include "trace.h"
-
-static bool sste_matches(struct cxl_sste *sste, struct copro_slb *slb)
-{
- return ((sste->vsid_data == cpu_to_be64(slb->vsid)) &&
- (sste->esid_data == cpu_to_be64(slb->esid)));
-}
-
-/*
- * This finds a free SSTE for the given SLB, or returns NULL if it's already in
- * the segment table.
- */
-static struct cxl_sste *find_free_sste(struct cxl_context *ctx,
- struct copro_slb *slb)
-{
- struct cxl_sste *primary, *sste, *ret = NULL;
- unsigned int mask = (ctx->sst_size >> 7) - 1; /* SSTP0[SegTableSize] */
- unsigned int entry;
- unsigned int hash;
-
- if (slb->vsid & SLB_VSID_B_1T)
- hash = (slb->esid >> SID_SHIFT_1T) & mask;
- else /* 256M */
- hash = (slb->esid >> SID_SHIFT) & mask;
-
- primary = ctx->sstp + (hash << 3);
-
- for (entry = 0, sste = primary; entry < 8; entry++, sste++) {
- if (!ret && !(be64_to_cpu(sste->esid_data) & SLB_ESID_V))
- ret = sste;
- if (sste_matches(sste, slb))
- return NULL;
- }
- if (ret)
- return ret;
-
- /* Nothing free, select an entry to cast out */
- ret = primary + ctx->sst_lru;
- ctx->sst_lru = (ctx->sst_lru + 1) & 0x7;
-
- return ret;
-}
-
-static void cxl_load_segment(struct cxl_context *ctx, struct copro_slb *slb)
-{
- /* mask is the group index, we search primary and secondary here. */
- struct cxl_sste *sste;
- unsigned long flags;
-
- spin_lock_irqsave(&ctx->sste_lock, flags);
- sste = find_free_sste(ctx, slb);
- if (!sste)
- goto out_unlock;
-
- pr_devel("CXL Populating SST[%li]: %#llx %#llx\n",
- sste - ctx->sstp, slb->vsid, slb->esid);
- trace_cxl_ste_write(ctx, sste - ctx->sstp, slb->esid, slb->vsid);
-
- sste->vsid_data = cpu_to_be64(slb->vsid);
- sste->esid_data = cpu_to_be64(slb->esid);
-out_unlock:
- spin_unlock_irqrestore(&ctx->sste_lock, flags);
-}
-
-static int cxl_fault_segment(struct cxl_context *ctx, struct mm_struct *mm,
- u64 ea)
-{
- struct copro_slb slb = {0,0};
- int rc;
-
- if (!(rc = copro_calculate_slb(mm, ea, &slb))) {
- cxl_load_segment(ctx, &slb);
- }
-
- return rc;
-}
-
-static void cxl_ack_ae(struct cxl_context *ctx)
-{
- unsigned long flags;
-
- cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_AE, 0);
-
- spin_lock_irqsave(&ctx->lock, flags);
- ctx->pending_fault = true;
- ctx->fault_addr = ctx->dar;
- ctx->fault_dsisr = ctx->dsisr;
- spin_unlock_irqrestore(&ctx->lock, flags);
-
- wake_up_all(&ctx->wq);
-}
-
-static int cxl_handle_segment_miss(struct cxl_context *ctx,
- struct mm_struct *mm, u64 ea)
-{
- int rc;
-
- pr_devel("CXL interrupt: Segment fault pe: %i ea: %#llx\n", ctx->pe, ea);
- trace_cxl_ste_miss(ctx, ea);
-
- if ((rc = cxl_fault_segment(ctx, mm, ea)))
- cxl_ack_ae(ctx);
- else {
-
- mb(); /* Order seg table write to TFC MMIO write */
- cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
- }
-
- return IRQ_HANDLED;
-}
-
-int cxl_handle_mm_fault(struct mm_struct *mm, u64 dsisr, u64 dar)
-{
- vm_fault_t flt = 0;
- int result;
- unsigned long access, flags, inv_flags = 0;
-
- /*
- * Add the fault handling cpu to task mm cpumask so that we
- * can do a safe lockless page table walk when inserting the
- * hash page table entry. This function get called with a
- * valid mm for user space addresses. Hence using the if (mm)
- * check is sufficient here.
- */
- if (mm && !cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
- cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
- /*
- * We need to make sure we walk the table only after
- * we update the cpumask. The other side of the barrier
- * is explained in serialize_against_pte_lookup()
- */
- smp_mb();
- }
- if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) {
- pr_devel("copro_handle_mm_fault failed: %#x\n", result);
- return result;
- }
-
- if (!radix_enabled()) {
- /*
- * update_mmu_cache() will not have loaded the hash since current->trap
- * is not a 0x400 or 0x300, so just call hash_page_mm() here.
- */
- access = _PAGE_PRESENT | _PAGE_READ;
- if (dsisr & CXL_PSL_DSISR_An_S)
- access |= _PAGE_WRITE;
-
- if (!mm && (get_region_id(dar) != USER_REGION_ID))
- access |= _PAGE_PRIVILEGED;
-
- if (dsisr & DSISR_NOHPTE)
- inv_flags |= HPTE_NOHPTE_UPDATE;
-
- local_irq_save(flags);
- hash_page_mm(mm, dar, access, 0x300, inv_flags);
- local_irq_restore(flags);
- }
- return 0;
-}
-
-static void cxl_handle_page_fault(struct cxl_context *ctx,
- struct mm_struct *mm,
- u64 dsisr, u64 dar)
-{
- trace_cxl_pte_miss(ctx, dsisr, dar);
-
- if (cxl_handle_mm_fault(mm, dsisr, dar)) {
- cxl_ack_ae(ctx);
- } else {
- pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe);
- cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
- }
-}
-
-/*
- * Returns the mm_struct corresponding to the context ctx.
- * mm_users == 0, the context may be in the process of being closed.
- */
-static struct mm_struct *get_mem_context(struct cxl_context *ctx)
-{
- if (ctx->mm == NULL)
- return NULL;
-
- if (!mmget_not_zero(ctx->mm))
- return NULL;
-
- return ctx->mm;
-}
-
-static bool cxl_is_segment_miss(struct cxl_context *ctx, u64 dsisr)
-{
- if ((cxl_is_power8() && (dsisr & CXL_PSL_DSISR_An_DS)))
- return true;
-
- return false;
-}
-
-static bool cxl_is_page_fault(struct cxl_context *ctx, u64 dsisr)
-{
- if ((cxl_is_power8()) && (dsisr & CXL_PSL_DSISR_An_DM))
- return true;
-
- if (cxl_is_power9())
- return true;
-
- return false;
-}
-
-void cxl_handle_fault(struct work_struct *fault_work)
-{
- struct cxl_context *ctx =
- container_of(fault_work, struct cxl_context, fault_work);
- u64 dsisr = ctx->dsisr;
- u64 dar = ctx->dar;
- struct mm_struct *mm = NULL;
-
- if (cpu_has_feature(CPU_FTR_HVMODE)) {
- if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr ||
- cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An) != dar ||
- cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) != ctx->pe) {
- /* Most likely explanation is harmless - a dedicated
- * process has detached and these were cleared by the
- * PSL purge, but warn about it just in case
- */
- dev_notice(&ctx->afu->dev, "cxl_handle_fault: Translation fault regs changed\n");
- return;
- }
- }
-
- /* Early return if the context is being / has been detached */
- if (ctx->status == CLOSED) {
- cxl_ack_ae(ctx);
- return;
- }
-
- pr_devel("CXL BOTTOM HALF handling fault for afu pe: %i. "
- "DSISR: %#llx DAR: %#llx\n", ctx->pe, dsisr, dar);
-
- if (!ctx->kernel) {
-
- mm = get_mem_context(ctx);
- if (mm == NULL) {
- pr_devel("%s: unable to get mm for pe=%d pid=%i\n",
- __func__, ctx->pe, pid_nr(ctx->pid));
- cxl_ack_ae(ctx);
- return;
- } else {
- pr_devel("Handling page fault for pe=%d pid=%i\n",
- ctx->pe, pid_nr(ctx->pid));
- }
- }
-
- if (cxl_is_segment_miss(ctx, dsisr))
- cxl_handle_segment_miss(ctx, mm, dar);
- else if (cxl_is_page_fault(ctx, dsisr))
- cxl_handle_page_fault(ctx, mm, dsisr, dar);
- else
- WARN(1, "cxl_handle_fault has nothing to handle\n");
-
- if (mm)
- mmput(mm);
-}
-
-static u64 next_segment(u64 ea, u64 vsid)
-{
- if (vsid & SLB_VSID_B_1T)
- ea |= (1ULL << 40) - 1;
- else
- ea |= (1ULL << 28) - 1;
-
- return ea + 1;
-}
-
-static void cxl_prefault_vma(struct cxl_context *ctx, struct mm_struct *mm)
-{
- u64 ea, last_esid = 0;
- struct copro_slb slb;
- VMA_ITERATOR(vmi, mm, 0);
- struct vm_area_struct *vma;
- int rc;
-
- mmap_read_lock(mm);
- for_each_vma(vmi, vma) {
- for (ea = vma->vm_start; ea < vma->vm_end;
- ea = next_segment(ea, slb.vsid)) {
- rc = copro_calculate_slb(mm, ea, &slb);
- if (rc)
- continue;
-
- if (last_esid == slb.esid)
- continue;
-
- cxl_load_segment(ctx, &slb);
- last_esid = slb.esid;
- }
- }
- mmap_read_unlock(mm);
-}
-
-void cxl_prefault(struct cxl_context *ctx, u64 wed)
-{
- struct mm_struct *mm = get_mem_context(ctx);
-
- if (mm == NULL) {
- pr_devel("cxl_prefault unable to get mm %i\n",
- pid_nr(ctx->pid));
- return;
- }
-
- switch (ctx->afu->prefault_mode) {
- case CXL_PREFAULT_WED:
- cxl_fault_segment(ctx, mm, wed);
- break;
- case CXL_PREFAULT_ALL:
- cxl_prefault_vma(ctx, mm);
- break;
- default:
- break;
- }
-
- mmput(mm);
-}
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
deleted file mode 100644
index 012e11b959bc..000000000000
--- a/drivers/misc/cxl/file.c
+++ /dev/null
@@ -1,699 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2014 IBM Corp.
- */
-
-#include <linux/spinlock.h>
-#include <linux/module.h>
-#include <linux/export.h>
-#include <linux/kernel.h>
-#include <linux/bitmap.h>
-#include <linux/sched/signal.h>
-#include <linux/poll.h>
-#include <linux/pid.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/sched/mm.h>
-#include <linux/mmu_context.h>
-#include <asm/cputable.h>
-#include <asm/current.h>
-#include <asm/copro.h>
-
-#include "cxl.h"
-#include "trace.h"
-
-#define CXL_NUM_MINORS 256 /* Total to reserve */
-
-#define CXL_AFU_MINOR_D(afu) (CXL_CARD_MINOR(afu->adapter) + 1 + (3 * afu->slice))
-#define CXL_AFU_MINOR_M(afu) (CXL_AFU_MINOR_D(afu) + 1)
-#define CXL_AFU_MINOR_S(afu) (CXL_AFU_MINOR_D(afu) + 2)
-#define CXL_AFU_MKDEV_D(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_D(afu))
-#define CXL_AFU_MKDEV_M(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_M(afu))
-#define CXL_AFU_MKDEV_S(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_S(afu))
-
-#define CXL_DEVT_AFU(dev) ((MINOR(dev) % CXL_DEV_MINORS - 1) / 3)
-
-#define CXL_DEVT_IS_CARD(dev) (MINOR(dev) % CXL_DEV_MINORS == 0)
-
-static dev_t cxl_dev;
-
-static int __afu_open(struct inode *inode, struct file *file, bool master)
-{
- struct cxl *adapter;
- struct cxl_afu *afu;
- struct cxl_context *ctx;
- int adapter_num = CXL_DEVT_ADAPTER(inode->i_rdev);
- int slice = CXL_DEVT_AFU(inode->i_rdev);
- int rc = -ENODEV;
-
- pr_devel("afu_open afu%i.%i\n", slice, adapter_num);
-
- if (!(adapter = get_cxl_adapter(adapter_num)))
- return -ENODEV;
-
- if (slice > adapter->slices)
- goto err_put_adapter;
-
- spin_lock(&adapter->afu_list_lock);
- if (!(afu = adapter->afu[slice])) {
- spin_unlock(&adapter->afu_list_lock);
- goto err_put_adapter;
- }
-
- /*
- * taking a ref to the afu so that it doesn't go away
- * for rest of the function. This ref is released before
- * we return.
- */
- cxl_afu_get(afu);
- spin_unlock(&adapter->afu_list_lock);
-
- if (!afu->current_mode)
- goto err_put_afu;
-
- if (!cxl_ops->link_ok(adapter, afu)) {
- rc = -EIO;
- goto err_put_afu;
- }
-
- if (!(ctx = cxl_context_alloc())) {
- rc = -ENOMEM;
- goto err_put_afu;
- }
-
- rc = cxl_context_init(ctx, afu, master);
- if (rc)
- goto err_put_afu;
-
- cxl_context_set_mapping(ctx, inode->i_mapping);
-
- pr_devel("afu_open pe: %i\n", ctx->pe);
- file->private_data = ctx;
-
- /* indicate success */
- rc = 0;
-
-err_put_afu:
- /* release the ref taken earlier */
- cxl_afu_put(afu);
-err_put_adapter:
- put_device(&adapter->dev);
- return rc;
-}
-
-int afu_open(struct inode *inode, struct file *file)
-{
- return __afu_open(inode, file, false);
-}
-
-static int afu_master_open(struct inode *inode, struct file *file)
-{
- return __afu_open(inode, file, true);
-}
-
-int afu_release(struct inode *inode, struct file *file)
-{
- struct cxl_context *ctx = file->private_data;
-
- pr_devel("%s: closing cxl file descriptor. pe: %i\n",
- __func__, ctx->pe);
- cxl_context_detach(ctx);
-
-
- /*
- * Delete the context's mapping pointer, unless it's created by the
- * kernel API, in which case leave it so it can be freed by reclaim_ctx()
- */
- if (!ctx->kernelapi) {
- mutex_lock(&ctx->mapping_lock);
- ctx->mapping = NULL;
- mutex_unlock(&ctx->mapping_lock);
- }
-
- /*
- * At this this point all bottom halfs have finished and we should be
- * getting no more IRQs from the hardware for this context. Once it's
- * removed from the IDR (and RCU synchronised) it's safe to free the
- * sstp and context.
- */
- cxl_context_free(ctx);
-
- return 0;
-}
-
-static long afu_ioctl_start_work(struct cxl_context *ctx,
- struct cxl_ioctl_start_work __user *uwork)
-{
- struct cxl_ioctl_start_work work;
- u64 amr = 0;
- int rc;
-
- pr_devel("%s: pe: %i\n", __func__, ctx->pe);
-
- /* Do this outside the status_mutex to avoid a circular dependency with
- * the locking in cxl_mmap_fault() */
- if (copy_from_user(&work, uwork, sizeof(work)))
- return -EFAULT;
-
- mutex_lock(&ctx->status_mutex);
- if (ctx->status != OPENED) {
- rc = -EIO;
- goto out;
- }
-
- /*
- * if any of the reserved fields are set or any of the unused
- * flags are set it's invalid
- */
- if (work.reserved1 || work.reserved2 || work.reserved3 ||
- work.reserved4 || work.reserved5 ||
- (work.flags & ~CXL_START_WORK_ALL)) {
- rc = -EINVAL;
- goto out;
- }
-
- if (!(work.flags & CXL_START_WORK_NUM_IRQS))
- work.num_interrupts = ctx->afu->pp_irqs;
- else if ((work.num_interrupts < ctx->afu->pp_irqs) ||
- (work.num_interrupts > ctx->afu->irqs_max)) {
- rc = -EINVAL;
- goto out;
- }
-
- if ((rc = afu_register_irqs(ctx, work.num_interrupts)))
- goto out;
-
- if (work.flags & CXL_START_WORK_AMR)
- amr = work.amr & mfspr(SPRN_UAMOR);
-
- if (work.flags & CXL_START_WORK_TID)
- ctx->assign_tidr = true;
-
- ctx->mmio_err_ff = !!(work.flags & CXL_START_WORK_ERR_FF);
-
- /*
- * Increment the mapped context count for adapter. This also checks
- * if adapter_context_lock is taken.
- */
- rc = cxl_adapter_context_get(ctx->afu->adapter);
- if (rc) {
- afu_release_irqs(ctx, ctx);
- goto out;
- }
-
- /*
- * We grab the PID here and not in the file open to allow for the case
- * where a process (master, some daemon, etc) has opened the chardev on
- * behalf of another process, so the AFU's mm gets bound to the process
- * that performs this ioctl and not the process that opened the file.
- * Also we grab the PID of the group leader so that if the task that
- * has performed the attach operation exits the mm context of the
- * process is still accessible.
- */
- ctx->pid = get_task_pid(current, PIDTYPE_PID);
-
- /* acquire a reference to the task's mm */
- ctx->mm = get_task_mm(current);
-
- /* ensure this mm_struct can't be freed */
- cxl_context_mm_count_get(ctx);
-
- if (ctx->mm) {
- /* decrement the use count from above */
- mmput(ctx->mm);
- /* make TLBIs for this context global */
- mm_context_add_copro(ctx->mm);
- }
-
- /*
- * Increment driver use count. Enables global TLBIs for hash
- * and callbacks to handle the segment table
- */
- cxl_ctx_get();
-
- /*
- * A barrier is needed to make sure all TLBIs are global
- * before we attach and the context starts being used by the
- * adapter.
- *
- * Needed after mm_context_add_copro() for radix and
- * cxl_ctx_get() for hash/p8.
- *
- * The barrier should really be mb(), since it involves a
- * device. However, it's only useful when we have local
- * vs. global TLBIs, i.e SMP=y. So keep smp_mb().
- */
- smp_mb();
-
- trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
-
- if ((rc = cxl_ops->attach_process(ctx, false, work.work_element_descriptor,
- amr))) {
- afu_release_irqs(ctx, ctx);
- cxl_adapter_context_put(ctx->afu->adapter);
- put_pid(ctx->pid);
- ctx->pid = NULL;
- cxl_ctx_put();
- cxl_context_mm_count_put(ctx);
- if (ctx->mm)
- mm_context_remove_copro(ctx->mm);
- goto out;
- }
-
- rc = 0;
- if (work.flags & CXL_START_WORK_TID) {
- work.tid = ctx->tidr;
- if (copy_to_user(uwork, &work, sizeof(work)))
- rc = -EFAULT;
- }
-
- ctx->status = STARTED;
-
-out:
- mutex_unlock(&ctx->status_mutex);
- return rc;
-}
-
-static long afu_ioctl_process_element(struct cxl_context *ctx,
- int __user *upe)
-{
- pr_devel("%s: pe: %i\n", __func__, ctx->pe);
-
- if (copy_to_user(upe, &ctx->external_pe, sizeof(__u32)))
- return -EFAULT;
-
- return 0;
-}
-
-static long afu_ioctl_get_afu_id(struct cxl_context *ctx,
- struct cxl_afu_id __user *upafuid)
-{
- struct cxl_afu_id afuid = { 0 };
-
- afuid.card_id = ctx->afu->adapter->adapter_num;
- afuid.afu_offset = ctx->afu->slice;
- afuid.afu_mode = ctx->afu->current_mode;
-
- /* set the flag bit in case the afu is a slave */
- if (ctx->afu->current_mode == CXL_MODE_DIRECTED && !ctx->master)
- afuid.flags |= CXL_AFUID_FLAG_SLAVE;
-
- if (copy_to_user(upafuid, &afuid, sizeof(afuid)))
- return -EFAULT;
-
- return 0;
-}
-
-long afu_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- struct cxl_context *ctx = file->private_data;
-
- if (ctx->status == CLOSED)
- return -EIO;
-
- if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
- return -EIO;
-
- pr_devel("afu_ioctl\n");
- switch (cmd) {
- case CXL_IOCTL_START_WORK:
- return afu_ioctl_start_work(ctx, (struct cxl_ioctl_start_work __user *)arg);
- case CXL_IOCTL_GET_PROCESS_ELEMENT:
- return afu_ioctl_process_element(ctx, (__u32 __user *)arg);
- case CXL_IOCTL_GET_AFU_ID:
- return afu_ioctl_get_afu_id(ctx, (struct cxl_afu_id __user *)
- arg);
- }
- return -EINVAL;
-}
-
-static long afu_compat_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- return afu_ioctl(file, cmd, arg);
-}
-
-int afu_mmap(struct file *file, struct vm_area_struct *vm)
-{
- struct cxl_context *ctx = file->private_data;
-
- /* AFU must be started before we can MMIO */
- if (ctx->status != STARTED)
- return -EIO;
-
- if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
- return -EIO;
-
- return cxl_context_iomap(ctx, vm);
-}
-
-static inline bool ctx_event_pending(struct cxl_context *ctx)
-{
- if (ctx->pending_irq || ctx->pending_fault || ctx->pending_afu_err)
- return true;
-
- if (ctx->afu_driver_ops && atomic_read(&ctx->afu_driver_events))
- return true;
-
- return false;
-}
-
-__poll_t afu_poll(struct file *file, struct poll_table_struct *poll)
-{
- struct cxl_context *ctx = file->private_data;
- __poll_t mask = 0;
- unsigned long flags;
-
-
- poll_wait(file, &ctx->wq, poll);
-
- pr_devel("afu_poll wait done pe: %i\n", ctx->pe);
-
- spin_lock_irqsave(&ctx->lock, flags);
- if (ctx_event_pending(ctx))
- mask |= EPOLLIN | EPOLLRDNORM;
- else if (ctx->status == CLOSED)
- /* Only error on closed when there are no futher events pending
- */
- mask |= EPOLLERR;
- spin_unlock_irqrestore(&ctx->lock, flags);
-
- pr_devel("afu_poll pe: %i returning %#x\n", ctx->pe, mask);
-
- return mask;
-}
-
-static ssize_t afu_driver_event_copy(struct cxl_context *ctx,
- char __user *buf,
- struct cxl_event *event,
- struct cxl_event_afu_driver_reserved *pl)
-{
- /* Check event */
- if (!pl) {
- ctx->afu_driver_ops->event_delivered(ctx, pl, -EINVAL);
- return -EFAULT;
- }
-
- /* Check event size */
- event->header.size += pl->data_size;
- if (event->header.size > CXL_READ_MIN_SIZE) {
- ctx->afu_driver_ops->event_delivered(ctx, pl, -EINVAL);
- return -EFAULT;
- }
-
- /* Copy event header */
- if (copy_to_user(buf, event, sizeof(struct cxl_event_header))) {
- ctx->afu_driver_ops->event_delivered(ctx, pl, -EFAULT);
- return -EFAULT;
- }
-
- /* Copy event data */
- buf += sizeof(struct cxl_event_header);
- if (copy_to_user(buf, &pl->data, pl->data_size)) {
- ctx->afu_driver_ops->event_delivered(ctx, pl, -EFAULT);
- return -EFAULT;
- }
-
- ctx->afu_driver_ops->event_delivered(ctx, pl, 0); /* Success */
- return event->header.size;
-}
-
-ssize_t afu_read(struct file *file, char __user *buf, size_t count,
- loff_t *off)
-{
- struct cxl_context *ctx = file->private_data;
- struct cxl_event_afu_driver_reserved *pl = NULL;
- struct cxl_event event;
- unsigned long flags;
- int rc;
- DEFINE_WAIT(wait);
-
- if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
- return -EIO;
-
- if (count < CXL_READ_MIN_SIZE)
- return -EINVAL;
-
- spin_lock_irqsave(&ctx->lock, flags);
-
- for (;;) {
- prepare_to_wait(&ctx->wq, &wait, TASK_INTERRUPTIBLE);
- if (ctx_event_pending(ctx) || (ctx->status == CLOSED))
- break;
-
- if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
- rc = -EIO;
- goto out;
- }
-
- if (file->f_flags & O_NONBLOCK) {
- rc = -EAGAIN;
- goto out;
- }
-
- if (signal_pending(current)) {
- rc = -ERESTARTSYS;
- goto out;
- }
-
- spin_unlock_irqrestore(&ctx->lock, flags);
- pr_devel("afu_read going to sleep...\n");
- schedule();
- pr_devel("afu_read woken up\n");
- spin_lock_irqsave(&ctx->lock, flags);
- }
-
- finish_wait(&ctx->wq, &wait);
-
- memset(&event, 0, sizeof(event));
- event.header.process_element = ctx->pe;
- event.header.size = sizeof(struct cxl_event_header);
- if (ctx->afu_driver_ops && atomic_read(&ctx->afu_driver_events)) {
- pr_devel("afu_read delivering AFU driver specific event\n");
- pl = ctx->afu_driver_ops->fetch_event(ctx);
- atomic_dec(&ctx->afu_driver_events);
- event.header.type = CXL_EVENT_AFU_DRIVER;
- } else if (ctx->pending_irq) {
- pr_devel("afu_read delivering AFU interrupt\n");
- event.header.size += sizeof(struct cxl_event_afu_interrupt);
- event.header.type = CXL_EVENT_AFU_INTERRUPT;
- event.irq.irq = find_first_bit(ctx->irq_bitmap, ctx->irq_count) + 1;
- clear_bit(event.irq.irq - 1, ctx->irq_bitmap);
- if (bitmap_empty(ctx->irq_bitmap, ctx->irq_count))
- ctx->pending_irq = false;
- } else if (ctx->pending_fault) {
- pr_devel("afu_read delivering data storage fault\n");
- event.header.size += sizeof(struct cxl_event_data_storage);
- event.header.type = CXL_EVENT_DATA_STORAGE;
- event.fault.addr = ctx->fault_addr;
- event.fault.dsisr = ctx->fault_dsisr;
- ctx->pending_fault = false;
- } else if (ctx->pending_afu_err) {
- pr_devel("afu_read delivering afu error\n");
- event.header.size += sizeof(struct cxl_event_afu_error);
- event.header.type = CXL_EVENT_AFU_ERROR;
- event.afu_error.error = ctx->afu_err;
- ctx->pending_afu_err = false;
- } else if (ctx->status == CLOSED) {
- pr_devel("afu_read fatal error\n");
- spin_unlock_irqrestore(&ctx->lock, flags);
- return -EIO;
- } else
- WARN(1, "afu_read must be buggy\n");
-
- spin_unlock_irqrestore(&ctx->lock, flags);
-
- if (event.header.type == CXL_EVENT_AFU_DRIVER)
- return afu_driver_event_copy(ctx, buf, &event, pl);
-
- if (copy_to_user(buf, &event, event.header.size))
- return -EFAULT;
- return event.header.size;
-
-out:
- finish_wait(&ctx->wq, &wait);
- spin_unlock_irqrestore(&ctx->lock, flags);
- return rc;
-}
-
-/*
- * Note: if this is updated, we need to update api.c to patch the new ones in
- * too
- */
-const struct file_operations afu_fops = {
- .owner = THIS_MODULE,
- .open = afu_open,
- .poll = afu_poll,
- .read = afu_read,
- .release = afu_release,
- .unlocked_ioctl = afu_ioctl,
- .compat_ioctl = afu_compat_ioctl,
- .mmap = afu_mmap,
-};
-
-static const struct file_operations afu_master_fops = {
- .owner = THIS_MODULE,
- .open = afu_master_open,
- .poll = afu_poll,
- .read = afu_read,
- .release = afu_release,
- .unlocked_ioctl = afu_ioctl,
- .compat_ioctl = afu_compat_ioctl,
- .mmap = afu_mmap,
-};
-
-
-static char *cxl_devnode(const struct device *dev, umode_t *mode)
-{
- if (cpu_has_feature(CPU_FTR_HVMODE) &&
- CXL_DEVT_IS_CARD(dev->devt)) {
- /*
- * These minor numbers will eventually be used to program the
- * PSL and AFUs once we have dynamic reprogramming support
- */
- return NULL;
- }
- return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev));
-}
-
-static const struct class cxl_class = {
- .name = "cxl",
- .devnode = cxl_devnode,
-};
-
-static int cxl_add_chardev(struct cxl_afu *afu, dev_t devt, struct cdev *cdev,
- struct device **chardev, char *postfix, char *desc,
- const struct file_operations *fops)
-{
- struct device *dev;
- int rc;
-
- cdev_init(cdev, fops);
- rc = cdev_add(cdev, devt, 1);
- if (rc) {
- dev_err(&afu->dev, "Unable to add %s chardev: %i\n", desc, rc);
- return rc;
- }
-
- dev = device_create(&cxl_class, &afu->dev, devt, afu,
- "afu%i.%i%s", afu->adapter->adapter_num, afu->slice, postfix);
- if (IS_ERR(dev)) {
- rc = PTR_ERR(dev);
- dev_err(&afu->dev, "Unable to create %s chardev in sysfs: %i\n", desc, rc);
- goto err;
- }
-
- *chardev = dev;
-
- return 0;
-err:
- cdev_del(cdev);
- return rc;
-}
-
-int cxl_chardev_d_afu_add(struct cxl_afu *afu)
-{
- return cxl_add_chardev(afu, CXL_AFU_MKDEV_D(afu), &afu->afu_cdev_d,
- &afu->chardev_d, "d", "dedicated",
- &afu_master_fops); /* Uses master fops */
-}
-
-int cxl_chardev_m_afu_add(struct cxl_afu *afu)
-{
- return cxl_add_chardev(afu, CXL_AFU_MKDEV_M(afu), &afu->afu_cdev_m,
- &afu->chardev_m, "m", "master",
- &afu_master_fops);
-}
-
-int cxl_chardev_s_afu_add(struct cxl_afu *afu)
-{
- return cxl_add_chardev(afu, CXL_AFU_MKDEV_S(afu), &afu->afu_cdev_s,
- &afu->chardev_s, "s", "shared",
- &afu_fops);
-}
-
-void cxl_chardev_afu_remove(struct cxl_afu *afu)
-{
- if (afu->chardev_d) {
- cdev_del(&afu->afu_cdev_d);
- device_unregister(afu->chardev_d);
- afu->chardev_d = NULL;
- }
- if (afu->chardev_m) {
- cdev_del(&afu->afu_cdev_m);
- device_unregister(afu->chardev_m);
- afu->chardev_m = NULL;
- }
- if (afu->chardev_s) {
- cdev_del(&afu->afu_cdev_s);
- device_unregister(afu->chardev_s);
- afu->chardev_s = NULL;
- }
-}
-
-int cxl_register_afu(struct cxl_afu *afu)
-{
- afu->dev.class = &cxl_class;
-
- return device_register(&afu->dev);
-}
-
-int cxl_register_adapter(struct cxl *adapter)
-{
- adapter->dev.class = &cxl_class;
-
- /*
- * Future: When we support dynamically reprogramming the PSL & AFU we
- * will expose the interface to do that via a chardev:
- * adapter->dev.devt = CXL_CARD_MKDEV(adapter);
- */
-
- return device_register(&adapter->dev);
-}
-
-dev_t cxl_get_dev(void)
-{
- return cxl_dev;
-}
-
-int __init cxl_file_init(void)
-{
- int rc;
-
- /*
- * If these change we really need to update API. Either change some
- * flags or update API version number CXL_API_VERSION.
- */
- BUILD_BUG_ON(CXL_API_VERSION != 3);
- BUILD_BUG_ON(sizeof(struct cxl_ioctl_start_work) != 64);
- BUILD_BUG_ON(sizeof(struct cxl_event_header) != 8);
- BUILD_BUG_ON(sizeof(struct cxl_event_afu_interrupt) != 8);
- BUILD_BUG_ON(sizeof(struct cxl_event_data_storage) != 32);
- BUILD_BUG_ON(sizeof(struct cxl_event_afu_error) != 16);
-
- if ((rc = alloc_chrdev_region(&cxl_dev, 0, CXL_NUM_MINORS, "cxl"))) {
- pr_err("Unable to allocate CXL major number: %i\n", rc);
- return rc;
- }
-
- pr_devel("CXL device allocated, MAJOR %i\n", MAJOR(cxl_dev));
-
- rc = class_register(&cxl_class);
- if (rc) {
- pr_err("Unable to create CXL class\n");
- goto err;
- }
-
- return 0;
-
-err:
- unregister_chrdev_region(cxl_dev, CXL_NUM_MINORS);
- return rc;
-}
-
-void cxl_file_exit(void)
-{
- unregister_chrdev_region(cxl_dev, CXL_NUM_MINORS);
- class_unregister(&cxl_class);
-}
diff --git a/drivers/misc/cxl/flash.c b/drivers/misc/cxl/flash.c
deleted file mode 100644
index eee9decc121e..000000000000
--- a/drivers/misc/cxl/flash.c
+++ /dev/null
@@ -1,538 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/semaphore.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/of.h>
-#include <asm/rtas.h>
-
-#include "cxl.h"
-#include "hcalls.h"
-
-#define DOWNLOAD_IMAGE 1
-#define VALIDATE_IMAGE 2
-
-struct ai_header {
- u16 version;
- u8 reserved0[6];
- u16 vendor;
- u16 device;
- u16 subsystem_vendor;
- u16 subsystem;
- u64 image_offset;
- u64 image_length;
- u8 reserved1[96];
-};
-
-static struct semaphore sem;
-static unsigned long *buffer[CXL_AI_MAX_ENTRIES];
-static struct sg_list *le;
-static u64 continue_token;
-static unsigned int transfer;
-
-struct update_props_workarea {
- __be32 phandle;
- __be32 state;
- __be64 reserved;
- __be32 nprops;
-} __packed;
-
-struct update_nodes_workarea {
- __be32 state;
- __be64 unit_address;
- __be32 reserved;
-} __packed;
-
-#define DEVICE_SCOPE 3
-#define NODE_ACTION_MASK 0xff000000
-#define NODE_COUNT_MASK 0x00ffffff
-#define OPCODE_DELETE 0x01000000
-#define OPCODE_UPDATE 0x02000000
-#define OPCODE_ADD 0x03000000
-
-static int rcall(int token, char *buf, s32 scope)
-{
- int rc;
-
- spin_lock(&rtas_data_buf_lock);
-
- memcpy(rtas_data_buf, buf, RTAS_DATA_BUF_SIZE);
- rc = rtas_call(token, 2, 1, NULL, rtas_data_buf, scope);
- memcpy(buf, rtas_data_buf, RTAS_DATA_BUF_SIZE);
-
- spin_unlock(&rtas_data_buf_lock);
- return rc;
-}
-
-static int update_property(struct device_node *dn, const char *name,
- u32 vd, char *value)
-{
- struct property *new_prop;
- u32 *val;
- int rc;
-
- new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
- if (!new_prop)
- return -ENOMEM;
-
- new_prop->name = kstrdup(name, GFP_KERNEL);
- if (!new_prop->name) {
- kfree(new_prop);
- return -ENOMEM;
- }
-
- new_prop->length = vd;
- new_prop->value = kzalloc(new_prop->length, GFP_KERNEL);
- if (!new_prop->value) {
- kfree(new_prop->name);
- kfree(new_prop);
- return -ENOMEM;
- }
- memcpy(new_prop->value, value, vd);
-
- val = (u32 *)new_prop->value;
- rc = cxl_update_properties(dn, new_prop);
- pr_devel("%pOFn: update property (%s, length: %i, value: %#x)\n",
- dn, name, vd, be32_to_cpu(*val));
-
- if (rc) {
- kfree(new_prop->name);
- kfree(new_prop->value);
- kfree(new_prop);
- }
- return rc;
-}
-
-static int update_node(__be32 phandle, s32 scope)
-{
- struct update_props_workarea *upwa;
- struct device_node *dn;
- int i, rc, ret;
- char *prop_data;
- char *buf;
- int token;
- u32 nprops;
- u32 vd;
-
- token = rtas_token("ibm,update-properties");
- if (token == RTAS_UNKNOWN_SERVICE)
- return -EINVAL;
-
- buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- dn = of_find_node_by_phandle(be32_to_cpu(phandle));
- if (!dn) {
- kfree(buf);
- return -ENOENT;
- }
-
- upwa = (struct update_props_workarea *)&buf[0];
- upwa->phandle = phandle;
- do {
- rc = rcall(token, buf, scope);
- if (rc < 0)
- break;
-
- prop_data = buf + sizeof(*upwa);
- nprops = be32_to_cpu(upwa->nprops);
-
- if (*prop_data == 0) {
- prop_data++;
- vd = be32_to_cpu(*(__be32 *)prop_data);
- prop_data += vd + sizeof(vd);
- nprops--;
- }
-
- for (i = 0; i < nprops; i++) {
- char *prop_name;
-
- prop_name = prop_data;
- prop_data += strlen(prop_name) + 1;
- vd = be32_to_cpu(*(__be32 *)prop_data);
- prop_data += sizeof(vd);
-
- if ((vd != 0x00000000) && (vd != 0x80000000)) {
- ret = update_property(dn, prop_name, vd,
- prop_data);
- if (ret)
- pr_err("cxl: Could not update property %s - %i\n",
- prop_name, ret);
-
- prop_data += vd;
- }
- }
- } while (rc == 1);
-
- of_node_put(dn);
- kfree(buf);
- return rc;
-}
-
-static int update_devicetree(struct cxl *adapter, s32 scope)
-{
- struct update_nodes_workarea *unwa;
- u32 action, node_count;
- int token, rc, i;
- __be32 *data, phandle;
- char *buf;
-
- token = rtas_token("ibm,update-nodes");
- if (token == RTAS_UNKNOWN_SERVICE)
- return -EINVAL;
-
- buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- unwa = (struct update_nodes_workarea *)&buf[0];
- unwa->unit_address = cpu_to_be64(adapter->guest->handle);
- do {
- rc = rcall(token, buf, scope);
- if (rc && rc != 1)
- break;
-
- data = (__be32 *)buf + 4;
- while (be32_to_cpu(*data) & NODE_ACTION_MASK) {
- action = be32_to_cpu(*data) & NODE_ACTION_MASK;
- node_count = be32_to_cpu(*data) & NODE_COUNT_MASK;
- pr_devel("device reconfiguration - action: %#x, nodes: %#x\n",
- action, node_count);
- data++;
-
- for (i = 0; i < node_count; i++) {
- phandle = *data++;
-
- switch (action) {
- case OPCODE_DELETE:
- /* nothing to do */
- break;
- case OPCODE_UPDATE:
- update_node(phandle, scope);
- break;
- case OPCODE_ADD:
- /* nothing to do, just move pointer */
- data++;
- break;
- }
- }
- }
- } while (rc == 1);
-
- kfree(buf);
- return 0;
-}
-
-static int handle_image(struct cxl *adapter, int operation,
- long (*fct)(u64, u64, u64, u64 *),
- struct cxl_adapter_image *ai)
-{
- size_t mod, s_copy, len_chunk = 0;
- struct ai_header *header = NULL;
- unsigned int entries = 0, i;
- void *dest, *from;
- int rc = 0, need_header;
-
- /* base adapter image header */
- need_header = (ai->flags & CXL_AI_NEED_HEADER);
- if (need_header) {
- header = kzalloc(sizeof(struct ai_header), GFP_KERNEL);
- if (!header)
- return -ENOMEM;
- header->version = cpu_to_be16(1);
- header->vendor = cpu_to_be16(adapter->guest->vendor);
- header->device = cpu_to_be16(adapter->guest->device);
- header->subsystem_vendor = cpu_to_be16(adapter->guest->subsystem_vendor);
- header->subsystem = cpu_to_be16(adapter->guest->subsystem);
- header->image_offset = cpu_to_be64(CXL_AI_HEADER_SIZE);
- header->image_length = cpu_to_be64(ai->len_image);
- }
-
- /* number of entries in the list */
- len_chunk = ai->len_data;
- if (need_header)
- len_chunk += CXL_AI_HEADER_SIZE;
-
- entries = len_chunk / CXL_AI_BUFFER_SIZE;
- mod = len_chunk % CXL_AI_BUFFER_SIZE;
- if (mod)
- entries++;
-
- if (entries > CXL_AI_MAX_ENTRIES) {
- rc = -EINVAL;
- goto err;
- }
-
- /* < -- MAX_CHUNK_SIZE = 4096 * 256 = 1048576 bytes -->
- * chunk 0 ----------------------------------------------------
- * | header | data |
- * ----------------------------------------------------
- * chunk 1 ----------------------------------------------------
- * | data |
- * ----------------------------------------------------
- * ....
- * chunk n ----------------------------------------------------
- * | data |
- * ----------------------------------------------------
- */
- from = (void *) ai->data;
- for (i = 0; i < entries; i++) {
- dest = buffer[i];
- s_copy = CXL_AI_BUFFER_SIZE;
-
- if ((need_header) && (i == 0)) {
- /* add adapter image header */
- memcpy(buffer[i], header, sizeof(struct ai_header));
- s_copy = CXL_AI_BUFFER_SIZE - CXL_AI_HEADER_SIZE;
- dest += CXL_AI_HEADER_SIZE; /* image offset */
- }
- if ((i == (entries - 1)) && mod)
- s_copy = mod;
-
- /* copy data */
- if (copy_from_user(dest, from, s_copy))
- goto err;
-
- /* fill in the list */
- le[i].phys_addr = cpu_to_be64(virt_to_phys(buffer[i]));
- le[i].len = cpu_to_be64(CXL_AI_BUFFER_SIZE);
- if ((i == (entries - 1)) && mod)
- le[i].len = cpu_to_be64(mod);
- from += s_copy;
- }
- pr_devel("%s (op: %i, need header: %i, entries: %i, token: %#llx)\n",
- __func__, operation, need_header, entries, continue_token);
-
- /*
- * download/validate the adapter image to the coherent
- * platform facility
- */
- rc = fct(adapter->guest->handle, virt_to_phys(le), entries,
- &continue_token);
- if (rc == 0) /* success of download/validation operation */
- continue_token = 0;
-
-err:
- kfree(header);
-
- return rc;
-}
-
-static int transfer_image(struct cxl *adapter, int operation,
- struct cxl_adapter_image *ai)
-{
- int rc = 0;
- int afu;
-
- switch (operation) {
- case DOWNLOAD_IMAGE:
- rc = handle_image(adapter, operation,
- &cxl_h_download_adapter_image, ai);
- if (rc < 0) {
- pr_devel("resetting adapter\n");
- cxl_h_reset_adapter(adapter->guest->handle);
- }
- return rc;
-
- case VALIDATE_IMAGE:
- rc = handle_image(adapter, operation,
- &cxl_h_validate_adapter_image, ai);
- if (rc < 0) {
- pr_devel("resetting adapter\n");
- cxl_h_reset_adapter(adapter->guest->handle);
- return rc;
- }
- if (rc == 0) {
- pr_devel("remove current afu\n");
- for (afu = 0; afu < adapter->slices; afu++)
- cxl_guest_remove_afu(adapter->afu[afu]);
-
- pr_devel("resetting adapter\n");
- cxl_h_reset_adapter(adapter->guest->handle);
-
- /* The entire image has now been
- * downloaded and the validation has
- * been successfully performed.
- * After that, the partition should call
- * ibm,update-nodes and
- * ibm,update-properties to receive the
- * current configuration
- */
- rc = update_devicetree(adapter, DEVICE_SCOPE);
- transfer = 1;
- }
- return rc;
- }
-
- return -EINVAL;
-}
-
-static long ioctl_transfer_image(struct cxl *adapter, int operation,
- struct cxl_adapter_image __user *uai)
-{
- struct cxl_adapter_image ai;
-
- pr_devel("%s\n", __func__);
-
- if (copy_from_user(&ai, uai, sizeof(struct cxl_adapter_image)))
- return -EFAULT;
-
- /*
- * Make sure reserved fields and bits are set to 0
- */
- if (ai.reserved1 || ai.reserved2 || ai.reserved3 || ai.reserved4 ||
- (ai.flags & ~CXL_AI_ALL))
- return -EINVAL;
-
- return transfer_image(adapter, operation, &ai);
-}
-
-static int device_open(struct inode *inode, struct file *file)
-{
- int adapter_num = CXL_DEVT_ADAPTER(inode->i_rdev);
- struct cxl *adapter;
- int rc = 0, i;
-
- pr_devel("in %s\n", __func__);
-
- BUG_ON(sizeof(struct ai_header) != CXL_AI_HEADER_SIZE);
-
- /* Allows one process to open the device by using a semaphore */
- if (down_interruptible(&sem) != 0)
- return -EPERM;
-
- if (!(adapter = get_cxl_adapter(adapter_num))) {
- rc = -ENODEV;
- goto err_unlock;
- }
-
- file->private_data = adapter;
- continue_token = 0;
- transfer = 0;
-
- for (i = 0; i < CXL_AI_MAX_ENTRIES; i++)
- buffer[i] = NULL;
-
- /* aligned buffer containing list entries which describes up to
- * 1 megabyte of data (256 entries of 4096 bytes each)
- * Logical real address of buffer 0 - Buffer 0 length in bytes
- * Logical real address of buffer 1 - Buffer 1 length in bytes
- * Logical real address of buffer 2 - Buffer 2 length in bytes
- * ....
- * ....
- * Logical real address of buffer N - Buffer N length in bytes
- */
- le = (struct sg_list *)get_zeroed_page(GFP_KERNEL);
- if (!le) {
- rc = -ENOMEM;
- goto err;
- }
-
- for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) {
- buffer[i] = (unsigned long *)get_zeroed_page(GFP_KERNEL);
- if (!buffer[i]) {
- rc = -ENOMEM;
- goto err1;
- }
- }
-
- return 0;
-
-err1:
- for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) {
- if (buffer[i])
- free_page((unsigned long) buffer[i]);
- }
-
- if (le)
- free_page((unsigned long) le);
-err:
- put_device(&adapter->dev);
-err_unlock:
- up(&sem);
-
- return rc;
-}
-
-static long device_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- struct cxl *adapter = file->private_data;
-
- pr_devel("in %s\n", __func__);
-
- if (cmd == CXL_IOCTL_DOWNLOAD_IMAGE)
- return ioctl_transfer_image(adapter,
- DOWNLOAD_IMAGE,
- (struct cxl_adapter_image __user *)arg);
- else if (cmd == CXL_IOCTL_VALIDATE_IMAGE)
- return ioctl_transfer_image(adapter,
- VALIDATE_IMAGE,
- (struct cxl_adapter_image __user *)arg);
- else
- return -EINVAL;
-}
-
-static int device_close(struct inode *inode, struct file *file)
-{
- struct cxl *adapter = file->private_data;
- int i;
-
- pr_devel("in %s\n", __func__);
-
- for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) {
- if (buffer[i])
- free_page((unsigned long) buffer[i]);
- }
-
- if (le)
- free_page((unsigned long) le);
-
- up(&sem);
- put_device(&adapter->dev);
- continue_token = 0;
-
- /* reload the module */
- if (transfer)
- cxl_guest_reload_module(adapter);
- else {
- pr_devel("resetting adapter\n");
- cxl_h_reset_adapter(adapter->guest->handle);
- }
-
- transfer = 0;
- return 0;
-}
-
-static const struct file_operations fops = {
- .owner = THIS_MODULE,
- .open = device_open,
- .unlocked_ioctl = device_ioctl,
- .compat_ioctl = compat_ptr_ioctl,
- .release = device_close,
-};
-
-void cxl_guest_remove_chardev(struct cxl *adapter)
-{
- cdev_del(&adapter->guest->cdev);
-}
-
-int cxl_guest_add_chardev(struct cxl *adapter)
-{
- dev_t devt;
- int rc;
-
- devt = MKDEV(MAJOR(cxl_get_dev()), CXL_CARD_MINOR(adapter));
- cdev_init(&adapter->guest->cdev, &fops);
- if ((rc = cdev_add(&adapter->guest->cdev, devt, 1))) {
- dev_err(&adapter->dev,
- "Unable to add chardev on adapter (card%i): %i\n",
- adapter->adapter_num, rc);
- goto err;
- }
- adapter->dev.devt = devt;
- sema_init(&sem, 1);
-err:
- return rc;
-}
diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c
deleted file mode 100644
index fb95a2d5cef4..000000000000
--- a/drivers/misc/cxl/guest.c
+++ /dev/null
@@ -1,1208 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2015 IBM Corp.
- */
-
-#include <linux/spinlock.h>
-#include <linux/uaccess.h>
-#include <linux/delay.h>
-#include <linux/irqdomain.h>
-#include <linux/platform_device.h>
-
-#include "cxl.h"
-#include "hcalls.h"
-#include "trace.h"
-
-#define CXL_ERROR_DETECTED_EVENT 1
-#define CXL_SLOT_RESET_EVENT 2
-#define CXL_RESUME_EVENT 3
-
-static void pci_error_handlers(struct cxl_afu *afu,
- int bus_error_event,
- pci_channel_state_t state)
-{
- struct pci_dev *afu_dev;
- struct pci_driver *afu_drv;
- const struct pci_error_handlers *err_handler;
-
- if (afu->phb == NULL)
- return;
-
- list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
- afu_drv = to_pci_driver(afu_dev->dev.driver);
- if (!afu_drv)
- continue;
-
- err_handler = afu_drv->err_handler;
- switch (bus_error_event) {
- case CXL_ERROR_DETECTED_EVENT:
- afu_dev->error_state = state;
-
- if (err_handler &&
- err_handler->error_detected)
- err_handler->error_detected(afu_dev, state);
- break;
- case CXL_SLOT_RESET_EVENT:
- afu_dev->error_state = state;
-
- if (err_handler &&
- err_handler->slot_reset)
- err_handler->slot_reset(afu_dev);
- break;
- case CXL_RESUME_EVENT:
- if (err_handler &&
- err_handler->resume)
- err_handler->resume(afu_dev);
- break;
- }
- }
-}
-
-static irqreturn_t guest_handle_psl_slice_error(struct cxl_context *ctx, u64 dsisr,
- u64 errstat)
-{
- pr_devel("in %s\n", __func__);
- dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%.16llx\n", errstat);
-
- return cxl_ops->ack_irq(ctx, 0, errstat);
-}
-
-static ssize_t guest_collect_vpd(struct cxl *adapter, struct cxl_afu *afu,
- void *buf, size_t len)
-{
- unsigned int entries, mod;
- unsigned long **vpd_buf = NULL;
- struct sg_list *le;
- int rc = 0, i, tocopy;
- u64 out = 0;
-
- if (buf == NULL)
- return -EINVAL;
-
- /* number of entries in the list */
- entries = len / SG_BUFFER_SIZE;
- mod = len % SG_BUFFER_SIZE;
- if (mod)
- entries++;
-
- if (entries > SG_MAX_ENTRIES) {
- entries = SG_MAX_ENTRIES;
- len = SG_MAX_ENTRIES * SG_BUFFER_SIZE;
- mod = 0;
- }
-
- vpd_buf = kcalloc(entries, sizeof(unsigned long *), GFP_KERNEL);
- if (!vpd_buf)
- return -ENOMEM;
-
- le = (struct sg_list *)get_zeroed_page(GFP_KERNEL);
- if (!le) {
- rc = -ENOMEM;
- goto err1;
- }
-
- for (i = 0; i < entries; i++) {
- vpd_buf[i] = (unsigned long *)get_zeroed_page(GFP_KERNEL);
- if (!vpd_buf[i]) {
- rc = -ENOMEM;
- goto err2;
- }
- le[i].phys_addr = cpu_to_be64(virt_to_phys(vpd_buf[i]));
- le[i].len = cpu_to_be64(SG_BUFFER_SIZE);
- if ((i == (entries - 1)) && mod)
- le[i].len = cpu_to_be64(mod);
- }
-
- if (adapter)
- rc = cxl_h_collect_vpd_adapter(adapter->guest->handle,
- virt_to_phys(le), entries, &out);
- else
- rc = cxl_h_collect_vpd(afu->guest->handle, 0,
- virt_to_phys(le), entries, &out);
- pr_devel("length of available (entries: %i), vpd: %#llx\n",
- entries, out);
-
- if (!rc) {
- /*
- * hcall returns in 'out' the size of available VPDs.
- * It fills the buffer with as much data as possible.
- */
- if (out < len)
- len = out;
- rc = len;
- if (out) {
- for (i = 0; i < entries; i++) {
- if (len < SG_BUFFER_SIZE)
- tocopy = len;
- else
- tocopy = SG_BUFFER_SIZE;
- memcpy(buf, vpd_buf[i], tocopy);
- buf += tocopy;
- len -= tocopy;
- }
- }
- }
-err2:
- for (i = 0; i < entries; i++) {
- if (vpd_buf[i])
- free_page((unsigned long) vpd_buf[i]);
- }
- free_page((unsigned long) le);
-err1:
- kfree(vpd_buf);
- return rc;
-}
-
-static int guest_get_irq_info(struct cxl_context *ctx, struct cxl_irq_info *info)
-{
- return cxl_h_collect_int_info(ctx->afu->guest->handle, ctx->process_token, info);
-}
-
-static irqreturn_t guest_psl_irq(int irq, void *data)
-{
- struct cxl_context *ctx = data;
- struct cxl_irq_info irq_info;
- int rc;
-
- pr_devel("%d: received PSL interrupt %i\n", ctx->pe, irq);
- rc = guest_get_irq_info(ctx, &irq_info);
- if (rc) {
- WARN(1, "Unable to get IRQ info: %i\n", rc);
- return IRQ_HANDLED;
- }
-
- rc = cxl_irq_psl8(irq, ctx, &irq_info);
- return rc;
-}
-
-static int afu_read_error_state(struct cxl_afu *afu, int *state_out)
-{
- u64 state;
- int rc = 0;
-
- if (!afu)
- return -EIO;
-
- rc = cxl_h_read_error_state(afu->guest->handle, &state);
- if (!rc) {
- WARN_ON(state != H_STATE_NORMAL &&
- state != H_STATE_DISABLE &&
- state != H_STATE_TEMP_UNAVAILABLE &&
- state != H_STATE_PERM_UNAVAILABLE);
- *state_out = state & 0xffffffff;
- }
- return rc;
-}
-
-static irqreturn_t guest_slice_irq_err(int irq, void *data)
-{
- struct cxl_afu *afu = data;
- int rc;
- u64 serr, afu_error, dsisr;
-
- rc = cxl_h_get_fn_error_interrupt(afu->guest->handle, &serr);
- if (rc) {
- dev_crit(&afu->dev, "Couldn't read PSL_SERR_An: %d\n", rc);
- return IRQ_HANDLED;
- }
- afu_error = cxl_p2n_read(afu, CXL_AFU_ERR_An);
- dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
- cxl_afu_decode_psl_serr(afu, serr);
- dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n", afu_error);
- dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n", dsisr);
-
- rc = cxl_h_ack_fn_error_interrupt(afu->guest->handle, serr);
- if (rc)
- dev_crit(&afu->dev, "Couldn't ack slice error interrupt: %d\n",
- rc);
-
- return IRQ_HANDLED;
-}
-
-
-static int irq_alloc_range(struct cxl *adapter, int len, int *irq)
-{
- int i, n;
- struct irq_avail *cur;
-
- for (i = 0; i < adapter->guest->irq_nranges; i++) {
- cur = &adapter->guest->irq_avail[i];
- n = bitmap_find_next_zero_area(cur->bitmap, cur->range,
- 0, len, 0);
- if (n < cur->range) {
- bitmap_set(cur->bitmap, n, len);
- *irq = cur->offset + n;
- pr_devel("guest: allocate IRQs %#x->%#x\n",
- *irq, *irq + len - 1);
-
- return 0;
- }
- }
- return -ENOSPC;
-}
-
-static int irq_free_range(struct cxl *adapter, int irq, int len)
-{
- int i, n;
- struct irq_avail *cur;
-
- if (len == 0)
- return -ENOENT;
-
- for (i = 0; i < adapter->guest->irq_nranges; i++) {
- cur = &adapter->guest->irq_avail[i];
- if (irq >= cur->offset &&
- (irq + len) <= (cur->offset + cur->range)) {
- n = irq - cur->offset;
- bitmap_clear(cur->bitmap, n, len);
- pr_devel("guest: release IRQs %#x->%#x\n",
- irq, irq + len - 1);
- return 0;
- }
- }
- return -ENOENT;
-}
-
-static int guest_reset(struct cxl *adapter)
-{
- struct cxl_afu *afu = NULL;
- int i, rc;
-
- pr_devel("Adapter reset request\n");
- spin_lock(&adapter->afu_list_lock);
- for (i = 0; i < adapter->slices; i++) {
- if ((afu = adapter->afu[i])) {
- pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT,
- pci_channel_io_frozen);
- cxl_context_detach_all(afu);
- }
- }
-
- rc = cxl_h_reset_adapter(adapter->guest->handle);
- for (i = 0; i < adapter->slices; i++) {
- if (!rc && (afu = adapter->afu[i])) {
- pci_error_handlers(afu, CXL_SLOT_RESET_EVENT,
- pci_channel_io_normal);
- pci_error_handlers(afu, CXL_RESUME_EVENT, 0);
- }
- }
- spin_unlock(&adapter->afu_list_lock);
- return rc;
-}
-
-static int guest_alloc_one_irq(struct cxl *adapter)
-{
- int irq;
-
- spin_lock(&adapter->guest->irq_alloc_lock);
- if (irq_alloc_range(adapter, 1, &irq))
- irq = -ENOSPC;
- spin_unlock(&adapter->guest->irq_alloc_lock);
- return irq;
-}
-
-static void guest_release_one_irq(struct cxl *adapter, int irq)
-{
- spin_lock(&adapter->guest->irq_alloc_lock);
- irq_free_range(adapter, irq, 1);
- spin_unlock(&adapter->guest->irq_alloc_lock);
-}
-
-static int guest_alloc_irq_ranges(struct cxl_irq_ranges *irqs,
- struct cxl *adapter, unsigned int num)
-{
- int i, try, irq;
-
- memset(irqs, 0, sizeof(struct cxl_irq_ranges));
-
- spin_lock(&adapter->guest->irq_alloc_lock);
- for (i = 0; i < CXL_IRQ_RANGES && num; i++) {
- try = num;
- while (try) {
- if (irq_alloc_range(adapter, try, &irq) == 0)
- break;
- try /= 2;
- }
- if (!try)
- goto error;
- irqs->offset[i] = irq;
- irqs->range[i] = try;
- num -= try;
- }
- if (num)
- goto error;
- spin_unlock(&adapter->guest->irq_alloc_lock);
- return 0;
-
-error:
- for (i = 0; i < CXL_IRQ_RANGES; i++)
- irq_free_range(adapter, irqs->offset[i], irqs->range[i]);
- spin_unlock(&adapter->guest->irq_alloc_lock);
- return -ENOSPC;
-}
-
-static void guest_release_irq_ranges(struct cxl_irq_ranges *irqs,
- struct cxl *adapter)
-{
- int i;
-
- spin_lock(&adapter->guest->irq_alloc_lock);
- for (i = 0; i < CXL_IRQ_RANGES; i++)
- irq_free_range(adapter, irqs->offset[i], irqs->range[i]);
- spin_unlock(&adapter->guest->irq_alloc_lock);
-}
-
-static int guest_register_serr_irq(struct cxl_afu *afu)
-{
- afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
- dev_name(&afu->dev));
- if (!afu->err_irq_name)
- return -ENOMEM;
-
- if (!(afu->serr_virq = cxl_map_irq(afu->adapter, afu->serr_hwirq,
- guest_slice_irq_err, afu, afu->err_irq_name))) {
- kfree(afu->err_irq_name);
- afu->err_irq_name = NULL;
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void guest_release_serr_irq(struct cxl_afu *afu)
-{
- cxl_unmap_irq(afu->serr_virq, afu);
- cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
- kfree(afu->err_irq_name);
-}
-
-static int guest_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask)
-{
- return cxl_h_control_faults(ctx->afu->guest->handle, ctx->process_token,
- tfc >> 32, (psl_reset_mask != 0));
-}
-
-static void disable_afu_irqs(struct cxl_context *ctx)
-{
- irq_hw_number_t hwirq;
- unsigned int virq;
- int r, i;
-
- pr_devel("Disabling AFU(%d) interrupts\n", ctx->afu->slice);
- for (r = 0; r < CXL_IRQ_RANGES; r++) {
- hwirq = ctx->irqs.offset[r];
- for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
- virq = irq_find_mapping(NULL, hwirq);
- disable_irq(virq);
- }
- }
-}
-
-static void enable_afu_irqs(struct cxl_context *ctx)
-{
- irq_hw_number_t hwirq;
- unsigned int virq;
- int r, i;
-
- pr_devel("Enabling AFU(%d) interrupts\n", ctx->afu->slice);
- for (r = 0; r < CXL_IRQ_RANGES; r++) {
- hwirq = ctx->irqs.offset[r];
- for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
- virq = irq_find_mapping(NULL, hwirq);
- enable_irq(virq);
- }
- }
-}
-
-static int _guest_afu_cr_readXX(int sz, struct cxl_afu *afu, int cr_idx,
- u64 offset, u64 *val)
-{
- unsigned long cr;
- char c;
- int rc = 0;
-
- if (afu->crs_len < sz)
- return -ENOENT;
-
- if (unlikely(offset >= afu->crs_len))
- return -ERANGE;
-
- cr = get_zeroed_page(GFP_KERNEL);
- if (!cr)
- return -ENOMEM;
-
- rc = cxl_h_get_config(afu->guest->handle, cr_idx, offset,
- virt_to_phys((void *)cr), sz);
- if (rc)
- goto err;
-
- switch (sz) {
- case 1:
- c = *((char *) cr);
- *val = c;
- break;
- case 2:
- *val = in_le16((u16 *)cr);
- break;
- case 4:
- *val = in_le32((unsigned *)cr);
- break;
- case 8:
- *val = in_le64((u64 *)cr);
- break;
- default:
- WARN_ON(1);
- }
-err:
- free_page(cr);
- return rc;
-}
-
-static int guest_afu_cr_read32(struct cxl_afu *afu, int cr_idx, u64 offset,
- u32 *out)
-{
- int rc;
- u64 val;
-
- rc = _guest_afu_cr_readXX(4, afu, cr_idx, offset, &val);
- if (!rc)
- *out = (u32) val;
- return rc;
-}
-
-static int guest_afu_cr_read16(struct cxl_afu *afu, int cr_idx, u64 offset,
- u16 *out)
-{
- int rc;
- u64 val;
-
- rc = _guest_afu_cr_readXX(2, afu, cr_idx, offset, &val);
- if (!rc)
- *out = (u16) val;
- return rc;
-}
-
-static int guest_afu_cr_read8(struct cxl_afu *afu, int cr_idx, u64 offset,
- u8 *out)
-{
- int rc;
- u64 val;
-
- rc = _guest_afu_cr_readXX(1, afu, cr_idx, offset, &val);
- if (!rc)
- *out = (u8) val;
- return rc;
-}
-
-static int guest_afu_cr_read64(struct cxl_afu *afu, int cr_idx, u64 offset,
- u64 *out)
-{
- return _guest_afu_cr_readXX(8, afu, cr_idx, offset, out);
-}
-
-static int guest_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in)
-{
- /* config record is not writable from guest */
- return -EPERM;
-}
-
-static int guest_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in)
-{
- /* config record is not writable from guest */
- return -EPERM;
-}
-
-static int guest_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in)
-{
- /* config record is not writable from guest */
- return -EPERM;
-}
-
-static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
-{
- struct cxl_process_element_hcall *elem;
- struct cxl *adapter = ctx->afu->adapter;
- const struct cred *cred;
- u32 pid, idx;
- int rc, r, i;
- u64 mmio_addr, mmio_size;
- __be64 flags = 0;
-
- /* Must be 8 byte aligned and cannot cross a 4096 byte boundary */
- if (!(elem = (struct cxl_process_element_hcall *)
- get_zeroed_page(GFP_KERNEL)))
- return -ENOMEM;
-
- elem->version = cpu_to_be64(CXL_PROCESS_ELEMENT_VERSION);
- if (ctx->kernel) {
- pid = 0;
- flags |= CXL_PE_TRANSLATION_ENABLED;
- flags |= CXL_PE_PRIVILEGED_PROCESS;
- if (mfmsr() & MSR_SF)
- flags |= CXL_PE_64_BIT;
- } else {
- pid = current->pid;
- flags |= CXL_PE_PROBLEM_STATE;
- flags |= CXL_PE_TRANSLATION_ENABLED;
- if (!test_tsk_thread_flag(current, TIF_32BIT))
- flags |= CXL_PE_64_BIT;
- cred = get_current_cred();
- if (uid_eq(cred->euid, GLOBAL_ROOT_UID))
- flags |= CXL_PE_PRIVILEGED_PROCESS;
- put_cred(cred);
- }
- elem->flags = cpu_to_be64(flags);
- elem->common.tid = cpu_to_be32(0); /* Unused */
- elem->common.pid = cpu_to_be32(pid);
- elem->common.csrp = cpu_to_be64(0); /* disable */
- elem->common.u.psl8.aurp0 = cpu_to_be64(0); /* disable */
- elem->common.u.psl8.aurp1 = cpu_to_be64(0); /* disable */
-
- cxl_prefault(ctx, wed);
-
- elem->common.u.psl8.sstp0 = cpu_to_be64(ctx->sstp0);
- elem->common.u.psl8.sstp1 = cpu_to_be64(ctx->sstp1);
-
- /*
- * Ensure we have at least one interrupt allocated to take faults for
- * kernel contexts that may not have allocated any AFU IRQs at all:
- */
- if (ctx->irqs.range[0] == 0) {
- rc = afu_register_irqs(ctx, 0);
- if (rc)
- goto out_free;
- }
-
- for (r = 0; r < CXL_IRQ_RANGES; r++) {
- for (i = 0; i < ctx->irqs.range[r]; i++) {
- if (r == 0 && i == 0) {
- elem->pslVirtualIsn = cpu_to_be32(ctx->irqs.offset[0]);
- } else {
- idx = ctx->irqs.offset[r] + i - adapter->guest->irq_base_offset;
- elem->applicationVirtualIsnBitmap[idx / 8] |= 0x80 >> (idx % 8);
- }
- }
- }
- elem->common.amr = cpu_to_be64(amr);
- elem->common.wed = cpu_to_be64(wed);
-
- disable_afu_irqs(ctx);
-
- rc = cxl_h_attach_process(ctx->afu->guest->handle, elem,
- &ctx->process_token, &mmio_addr, &mmio_size);
- if (rc == H_SUCCESS) {
- if (ctx->master || !ctx->afu->pp_psa) {
- ctx->psn_phys = ctx->afu->psn_phys;
- ctx->psn_size = ctx->afu->adapter->ps_size;
- } else {
- ctx->psn_phys = mmio_addr;
- ctx->psn_size = mmio_size;
- }
- if (ctx->afu->pp_psa && mmio_size &&
- ctx->afu->pp_size == 0) {
- /*
- * There's no property in the device tree to read the
- * pp_size. We only find out at the 1st attach.
- * Compared to bare-metal, it is too late and we
- * should really lock here. However, on powerVM,
- * pp_size is really only used to display in /sys.
- * Being discussed with pHyp for their next release.
- */
- ctx->afu->pp_size = mmio_size;
- }
- /* from PAPR: process element is bytes 4-7 of process token */
- ctx->external_pe = ctx->process_token & 0xFFFFFFFF;
- pr_devel("CXL pe=%i is known as %i for pHyp, mmio_size=%#llx",
- ctx->pe, ctx->external_pe, ctx->psn_size);
- ctx->pe_inserted = true;
- enable_afu_irqs(ctx);
- }
-
-out_free:
- free_page((u64)elem);
- return rc;
-}
-
-static int guest_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr)
-{
- pr_devel("in %s\n", __func__);
-
- ctx->kernel = kernel;
- if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
- return attach_afu_directed(ctx, wed, amr);
-
- /* dedicated mode not supported on FW840 */
-
- return -EINVAL;
-}
-
-static int detach_afu_directed(struct cxl_context *ctx)
-{
- if (!ctx->pe_inserted)
- return 0;
- if (cxl_h_detach_process(ctx->afu->guest->handle, ctx->process_token))
- return -1;
- return 0;
-}
-
-static int guest_detach_process(struct cxl_context *ctx)
-{
- pr_devel("in %s\n", __func__);
- trace_cxl_detach(ctx);
-
- if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
- return -EIO;
-
- if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
- return detach_afu_directed(ctx);
-
- return -EINVAL;
-}
-
-static void guest_release_afu(struct device *dev)
-{
- struct cxl_afu *afu = to_cxl_afu(dev);
-
- pr_devel("%s\n", __func__);
-
- idr_destroy(&afu->contexts_idr);
-
- kfree(afu->guest);
- kfree(afu);
-}
-
-ssize_t cxl_guest_read_afu_vpd(struct cxl_afu *afu, void *buf, size_t len)
-{
- return guest_collect_vpd(NULL, afu, buf, len);
-}
-
-#define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE
-static ssize_t guest_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
- loff_t off, size_t count)
-{
- void *tbuf = NULL;
- int rc = 0;
-
- tbuf = (void *) get_zeroed_page(GFP_KERNEL);
- if (!tbuf)
- return -ENOMEM;
-
- rc = cxl_h_get_afu_err(afu->guest->handle,
- off & 0x7,
- virt_to_phys(tbuf),
- count);
- if (rc)
- goto err;
-
- if (count > ERR_BUFF_MAX_COPY_SIZE)
- count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7);
- memcpy(buf, tbuf, count);
-err:
- free_page((u64)tbuf);
-
- return rc;
-}
-
-static int guest_afu_check_and_enable(struct cxl_afu *afu)
-{
- return 0;
-}
-
-static bool guest_support_attributes(const char *attr_name,
- enum cxl_attrs type)
-{
- switch (type) {
- case CXL_ADAPTER_ATTRS:
- if ((strcmp(attr_name, "base_image") == 0) ||
- (strcmp(attr_name, "load_image_on_perst") == 0) ||
- (strcmp(attr_name, "perst_reloads_same_image") == 0) ||
- (strcmp(attr_name, "image_loaded") == 0))
- return false;
- break;
- case CXL_AFU_MASTER_ATTRS:
- if ((strcmp(attr_name, "pp_mmio_off") == 0))
- return false;
- break;
- case CXL_AFU_ATTRS:
- break;
- default:
- break;
- }
-
- return true;
-}
-
-static int activate_afu_directed(struct cxl_afu *afu)
-{
- int rc;
-
- dev_info(&afu->dev, "Activating AFU(%d) directed mode\n", afu->slice);
-
- afu->current_mode = CXL_MODE_DIRECTED;
-
- afu->num_procs = afu->max_procs_virtualised;
-
- if ((rc = cxl_chardev_m_afu_add(afu)))
- return rc;
-
- if ((rc = cxl_sysfs_afu_m_add(afu)))
- goto err;
-
- if ((rc = cxl_chardev_s_afu_add(afu)))
- goto err1;
-
- return 0;
-err1:
- cxl_sysfs_afu_m_remove(afu);
-err:
- cxl_chardev_afu_remove(afu);
- return rc;
-}
-
-static int guest_afu_activate_mode(struct cxl_afu *afu, int mode)
-{
- if (!mode)
- return 0;
- if (!(mode & afu->modes_supported))
- return -EINVAL;
-
- if (mode == CXL_MODE_DIRECTED)
- return activate_afu_directed(afu);
-
- if (mode == CXL_MODE_DEDICATED)
- dev_err(&afu->dev, "Dedicated mode not supported\n");
-
- return -EINVAL;
-}
-
-static int deactivate_afu_directed(struct cxl_afu *afu)
-{
- dev_info(&afu->dev, "Deactivating AFU(%d) directed mode\n", afu->slice);
-
- afu->current_mode = 0;
- afu->num_procs = 0;
-
- cxl_sysfs_afu_m_remove(afu);
- cxl_chardev_afu_remove(afu);
-
- cxl_ops->afu_reset(afu);
-
- return 0;
-}
-
-static int guest_afu_deactivate_mode(struct cxl_afu *afu, int mode)
-{
- if (!mode)
- return 0;
- if (!(mode & afu->modes_supported))
- return -EINVAL;
-
- if (mode == CXL_MODE_DIRECTED)
- return deactivate_afu_directed(afu);
- return 0;
-}
-
-static int guest_afu_reset(struct cxl_afu *afu)
-{
- pr_devel("AFU(%d) reset request\n", afu->slice);
- return cxl_h_reset_afu(afu->guest->handle);
-}
-
-static int guest_map_slice_regs(struct cxl_afu *afu)
-{
- if (!(afu->p2n_mmio = ioremap(afu->guest->p2n_phys, afu->guest->p2n_size))) {
- dev_err(&afu->dev, "Error mapping AFU(%d) MMIO regions\n",
- afu->slice);
- return -ENOMEM;
- }
- return 0;
-}
-
-static void guest_unmap_slice_regs(struct cxl_afu *afu)
-{
- if (afu->p2n_mmio)
- iounmap(afu->p2n_mmio);
-}
-
-static int afu_update_state(struct cxl_afu *afu)
-{
- int rc, cur_state;
-
- rc = afu_read_error_state(afu, &cur_state);
- if (rc)
- return rc;
-
- if (afu->guest->previous_state == cur_state)
- return 0;
-
- pr_devel("AFU(%d) update state to %#x\n", afu->slice, cur_state);
-
- switch (cur_state) {
- case H_STATE_NORMAL:
- afu->guest->previous_state = cur_state;
- break;
-
- case H_STATE_DISABLE:
- pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT,
- pci_channel_io_frozen);
-
- cxl_context_detach_all(afu);
- if ((rc = cxl_ops->afu_reset(afu)))
- pr_devel("reset hcall failed %d\n", rc);
-
- rc = afu_read_error_state(afu, &cur_state);
- if (!rc && cur_state == H_STATE_NORMAL) {
- pci_error_handlers(afu, CXL_SLOT_RESET_EVENT,
- pci_channel_io_normal);
- pci_error_handlers(afu, CXL_RESUME_EVENT, 0);
- }
- afu->guest->previous_state = 0;
- break;
-
- case H_STATE_TEMP_UNAVAILABLE:
- afu->guest->previous_state = cur_state;
- break;
-
- case H_STATE_PERM_UNAVAILABLE:
- dev_err(&afu->dev, "AFU is in permanent error state\n");
- pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT,
- pci_channel_io_perm_failure);
- afu->guest->previous_state = cur_state;
- break;
-
- default:
- pr_err("Unexpected AFU(%d) error state: %#x\n",
- afu->slice, cur_state);
- return -EINVAL;
- }
-
- return rc;
-}
-
-static void afu_handle_errstate(struct work_struct *work)
-{
- struct cxl_afu_guest *afu_guest =
- container_of(to_delayed_work(work), struct cxl_afu_guest, work_err);
-
- if (!afu_update_state(afu_guest->parent) &&
- afu_guest->previous_state == H_STATE_PERM_UNAVAILABLE)
- return;
-
- if (afu_guest->handle_err)
- schedule_delayed_work(&afu_guest->work_err,
- msecs_to_jiffies(3000));
-}
-
-static bool guest_link_ok(struct cxl *cxl, struct cxl_afu *afu)
-{
- int state;
-
- if (afu && (!afu_read_error_state(afu, &state))) {
- if (state == H_STATE_NORMAL)
- return true;
- }
-
- return false;
-}
-
-static int afu_properties_look_ok(struct cxl_afu *afu)
-{
- if (afu->pp_irqs < 0) {
- dev_err(&afu->dev, "Unexpected per-process minimum interrupt value\n");
- return -EINVAL;
- }
-
- if (afu->max_procs_virtualised < 1) {
- dev_err(&afu->dev, "Unexpected max number of processes virtualised value\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_np)
-{
- struct cxl_afu *afu;
- bool free = true;
- int rc;
-
- pr_devel("in %s - AFU(%d)\n", __func__, slice);
- if (!(afu = cxl_alloc_afu(adapter, slice)))
- return -ENOMEM;
-
- if (!(afu->guest = kzalloc(sizeof(struct cxl_afu_guest), GFP_KERNEL))) {
- kfree(afu);
- return -ENOMEM;
- }
-
- if ((rc = dev_set_name(&afu->dev, "afu%i.%i",
- adapter->adapter_num,
- slice)))
- goto err1;
-
- adapter->slices++;
-
- if ((rc = cxl_of_read_afu_handle(afu, afu_np)))
- goto err1;
-
- if ((rc = cxl_ops->afu_reset(afu)))
- goto err1;
-
- if ((rc = cxl_of_read_afu_properties(afu, afu_np)))
- goto err1;
-
- if ((rc = afu_properties_look_ok(afu)))
- goto err1;
-
- if ((rc = guest_map_slice_regs(afu)))
- goto err1;
-
- if ((rc = guest_register_serr_irq(afu)))
- goto err2;
-
- /*
- * After we call this function we must not free the afu directly, even
- * if it returns an error!
- */
- if ((rc = cxl_register_afu(afu)))
- goto err_put_dev;
-
- if ((rc = cxl_sysfs_afu_add(afu)))
- goto err_del_dev;
-
- /*
- * pHyp doesn't expose the programming models supported by the
- * AFU. pHyp currently only supports directed mode. If it adds
- * dedicated mode later, this version of cxl has no way to
- * detect it. So we'll initialize the driver, but the first
- * attach will fail.
- * Being discussed with pHyp to do better (likely new property)
- */
- if (afu->max_procs_virtualised == 1)
- afu->modes_supported = CXL_MODE_DEDICATED;
- else
- afu->modes_supported = CXL_MODE_DIRECTED;
-
- if ((rc = cxl_afu_select_best_mode(afu)))
- goto err_remove_sysfs;
-
- adapter->afu[afu->slice] = afu;
-
- afu->enabled = true;
-
- /*
- * wake up the cpu periodically to check the state
- * of the AFU using "afu" stored in the guest structure.
- */
- afu->guest->parent = afu;
- afu->guest->handle_err = true;
- INIT_DELAYED_WORK(&afu->guest->work_err, afu_handle_errstate);
- schedule_delayed_work(&afu->guest->work_err, msecs_to_jiffies(1000));
-
- if ((rc = cxl_pci_vphb_add(afu)))
- dev_info(&afu->dev, "Can't register vPHB\n");
-
- return 0;
-
-err_remove_sysfs:
- cxl_sysfs_afu_remove(afu);
-err_del_dev:
- device_del(&afu->dev);
-err_put_dev:
- put_device(&afu->dev);
- free = false;
- guest_release_serr_irq(afu);
-err2:
- guest_unmap_slice_regs(afu);
-err1:
- if (free) {
- kfree(afu->guest);
- kfree(afu);
- }
- return rc;
-}
-
-void cxl_guest_remove_afu(struct cxl_afu *afu)
-{
- if (!afu)
- return;
-
- /* flush and stop pending job */
- afu->guest->handle_err = false;
- flush_delayed_work(&afu->guest->work_err);
-
- cxl_pci_vphb_remove(afu);
- cxl_sysfs_afu_remove(afu);
-
- spin_lock(&afu->adapter->afu_list_lock);
- afu->adapter->afu[afu->slice] = NULL;
- spin_unlock(&afu->adapter->afu_list_lock);
-
- cxl_context_detach_all(afu);
- cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
- guest_release_serr_irq(afu);
- guest_unmap_slice_regs(afu);
-
- device_unregister(&afu->dev);
-}
-
-static void free_adapter(struct cxl *adapter)
-{
- struct irq_avail *cur;
- int i;
-
- if (adapter->guest) {
- if (adapter->guest->irq_avail) {
- for (i = 0; i < adapter->guest->irq_nranges; i++) {
- cur = &adapter->guest->irq_avail[i];
- bitmap_free(cur->bitmap);
- }
- kfree(adapter->guest->irq_avail);
- }
- kfree(adapter->guest->status);
- kfree(adapter->guest);
- }
- cxl_remove_adapter_nr(adapter);
- kfree(adapter);
-}
-
-static int properties_look_ok(struct cxl *adapter)
-{
- /* The absence of this property means that the operational
- * status is unknown or okay
- */
- if (strlen(adapter->guest->status) &&
- strcmp(adapter->guest->status, "okay")) {
- pr_err("ABORTING:Bad operational status of the device\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-ssize_t cxl_guest_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len)
-{
- return guest_collect_vpd(adapter, NULL, buf, len);
-}
-
-void cxl_guest_remove_adapter(struct cxl *adapter)
-{
- pr_devel("in %s\n", __func__);
-
- cxl_sysfs_adapter_remove(adapter);
-
- cxl_guest_remove_chardev(adapter);
- device_unregister(&adapter->dev);
-}
-
-static void release_adapter(struct device *dev)
-{
- free_adapter(to_cxl_adapter(dev));
-}
-
-struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_device *pdev)
-{
- struct cxl *adapter;
- bool free = true;
- int rc;
-
- if (!(adapter = cxl_alloc_adapter()))
- return ERR_PTR(-ENOMEM);
-
- if (!(adapter->guest = kzalloc(sizeof(struct cxl_guest), GFP_KERNEL))) {
- free_adapter(adapter);
- return ERR_PTR(-ENOMEM);
- }
-
- adapter->slices = 0;
- adapter->guest->pdev = pdev;
- adapter->dev.parent = &pdev->dev;
- adapter->dev.release = release_adapter;
- dev_set_drvdata(&pdev->dev, adapter);
-
- /*
- * Hypervisor controls PSL timebase initialization (p1 register).
- * On FW840, PSL is initialized.
- */
- adapter->psl_timebase_synced = true;
-
- if ((rc = cxl_of_read_adapter_handle(adapter, np)))
- goto err1;
-
- if ((rc = cxl_of_read_adapter_properties(adapter, np)))
- goto err1;
-
- if ((rc = properties_look_ok(adapter)))
- goto err1;
-
- if ((rc = cxl_guest_add_chardev(adapter)))
- goto err1;
-
- /*
- * After we call this function we must not free the adapter directly,
- * even if it returns an error!
- */
- if ((rc = cxl_register_adapter(adapter)))
- goto err_put_dev;
-
- if ((rc = cxl_sysfs_adapter_add(adapter)))
- goto err_del_dev;
-
- /* release the context lock as the adapter is configured */
- cxl_adapter_context_unlock(adapter);
-
- return adapter;
-
-err_del_dev:
- device_del(&adapter->dev);
-err_put_dev:
- put_device(&adapter->dev);
- free = false;
- cxl_guest_remove_chardev(adapter);
-err1:
- if (free)
- free_adapter(adapter);
- return ERR_PTR(rc);
-}
-
-void cxl_guest_reload_module(struct cxl *adapter)
-{
- struct platform_device *pdev;
-
- pdev = adapter->guest->pdev;
- cxl_guest_remove_adapter(adapter);
-
- cxl_of_probe(pdev);
-}
-
-const struct cxl_backend_ops cxl_guest_ops = {
- .module = THIS_MODULE,
- .adapter_reset = guest_reset,
- .alloc_one_irq = guest_alloc_one_irq,
- .release_one_irq = guest_release_one_irq,
- .alloc_irq_ranges = guest_alloc_irq_ranges,
- .release_irq_ranges = guest_release_irq_ranges,
- .setup_irq = NULL,
- .handle_psl_slice_error = guest_handle_psl_slice_error,
- .psl_interrupt = guest_psl_irq,
- .ack_irq = guest_ack_irq,
- .attach_process = guest_attach_process,
- .detach_process = guest_detach_process,
- .update_ivtes = NULL,
- .support_attributes = guest_support_attributes,
- .link_ok = guest_link_ok,
- .release_afu = guest_release_afu,
- .afu_read_err_buffer = guest_afu_read_err_buffer,
- .afu_check_and_enable = guest_afu_check_and_enable,
- .afu_activate_mode = guest_afu_activate_mode,
- .afu_deactivate_mode = guest_afu_deactivate_mode,
- .afu_reset = guest_afu_reset,
- .afu_cr_read8 = guest_afu_cr_read8,
- .afu_cr_read16 = guest_afu_cr_read16,
- .afu_cr_read32 = guest_afu_cr_read32,
- .afu_cr_read64 = guest_afu_cr_read64,
- .afu_cr_write8 = guest_afu_cr_write8,
- .afu_cr_write16 = guest_afu_cr_write16,
- .afu_cr_write32 = guest_afu_cr_write32,
- .read_adapter_vpd = cxl_guest_read_adapter_vpd,
-};
diff --git a/drivers/misc/cxl/hcalls.c b/drivers/misc/cxl/hcalls.c
deleted file mode 100644
index aba5e20eeb1f..000000000000
--- a/drivers/misc/cxl/hcalls.c
+++ /dev/null
@@ -1,643 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2015 IBM Corp.
- */
-
-
-#include <linux/compiler.h>
-#include <linux/types.h>
-#include <linux/delay.h>
-#include <asm/byteorder.h>
-#include "hcalls.h"
-#include "trace.h"
-
-#define CXL_HCALL_TIMEOUT 60000
-#define CXL_HCALL_TIMEOUT_DOWNLOAD 120000
-
-#define H_ATTACH_CA_PROCESS 0x344
-#define H_CONTROL_CA_FUNCTION 0x348
-#define H_DETACH_CA_PROCESS 0x34C
-#define H_COLLECT_CA_INT_INFO 0x350
-#define H_CONTROL_CA_FAULTS 0x354
-#define H_DOWNLOAD_CA_FUNCTION 0x35C
-#define H_DOWNLOAD_CA_FACILITY 0x364
-#define H_CONTROL_CA_FACILITY 0x368
-
-#define H_CONTROL_CA_FUNCTION_RESET 1 /* perform a reset */
-#define H_CONTROL_CA_FUNCTION_SUSPEND_PROCESS 2 /* suspend a process from being executed */
-#define H_CONTROL_CA_FUNCTION_RESUME_PROCESS 3 /* resume a process to be executed */
-#define H_CONTROL_CA_FUNCTION_READ_ERR_STATE 4 /* read the error state */
-#define H_CONTROL_CA_FUNCTION_GET_AFU_ERR 5 /* collect the AFU error buffer */
-#define H_CONTROL_CA_FUNCTION_GET_CONFIG 6 /* collect configuration record */
-#define H_CONTROL_CA_FUNCTION_GET_DOWNLOAD_STATE 7 /* query to return download status */
-#define H_CONTROL_CA_FUNCTION_TERMINATE_PROCESS 8 /* terminate the process before completion */
-#define H_CONTROL_CA_FUNCTION_COLLECT_VPD 9 /* collect VPD */
-#define H_CONTROL_CA_FUNCTION_GET_FUNCTION_ERR_INT 11 /* read the function-wide error data based on an interrupt */
-#define H_CONTROL_CA_FUNCTION_ACK_FUNCTION_ERR_INT 12 /* acknowledge function-wide error data based on an interrupt */
-#define H_CONTROL_CA_FUNCTION_GET_ERROR_LOG 13 /* retrieve the Platform Log ID (PLID) of an error log */
-
-#define H_CONTROL_CA_FAULTS_RESPOND_PSL 1
-#define H_CONTROL_CA_FAULTS_RESPOND_AFU 2
-
-#define H_CONTROL_CA_FACILITY_RESET 1 /* perform a reset */
-#define H_CONTROL_CA_FACILITY_COLLECT_VPD 2 /* collect VPD */
-
-#define H_DOWNLOAD_CA_FACILITY_DOWNLOAD 1 /* download adapter image */
-#define H_DOWNLOAD_CA_FACILITY_VALIDATE 2 /* validate adapter image */
-
-
-#define _CXL_LOOP_HCALL(call, rc, retbuf, fn, ...) \
- { \
- unsigned int delay, total_delay = 0; \
- u64 token = 0; \
- \
- memset(retbuf, 0, sizeof(retbuf)); \
- while (1) { \
- rc = call(fn, retbuf, __VA_ARGS__, token); \
- token = retbuf[0]; \
- if (rc != H_BUSY && !H_IS_LONG_BUSY(rc)) \
- break; \
- \
- if (rc == H_BUSY) \
- delay = 10; \
- else \
- delay = get_longbusy_msecs(rc); \
- \
- total_delay += delay; \
- if (total_delay > CXL_HCALL_TIMEOUT) { \
- WARN(1, "Warning: Giving up waiting for CXL hcall " \
- "%#x after %u msec\n", fn, total_delay); \
- rc = H_BUSY; \
- break; \
- } \
- msleep(delay); \
- } \
- }
-#define CXL_H_WAIT_UNTIL_DONE(...) _CXL_LOOP_HCALL(plpar_hcall, __VA_ARGS__)
-#define CXL_H9_WAIT_UNTIL_DONE(...) _CXL_LOOP_HCALL(plpar_hcall9, __VA_ARGS__)
-
-#define _PRINT_MSG(rc, format, ...) \
- { \
- if ((rc != H_SUCCESS) && (rc != H_CONTINUE)) \
- pr_err(format, __VA_ARGS__); \
- else \
- pr_devel(format, __VA_ARGS__); \
- } \
-
-
-static char *afu_op_names[] = {
- "UNKNOWN_OP", /* 0 undefined */
- "RESET", /* 1 */
- "SUSPEND_PROCESS", /* 2 */
- "RESUME_PROCESS", /* 3 */
- "READ_ERR_STATE", /* 4 */
- "GET_AFU_ERR", /* 5 */
- "GET_CONFIG", /* 6 */
- "GET_DOWNLOAD_STATE", /* 7 */
- "TERMINATE_PROCESS", /* 8 */
- "COLLECT_VPD", /* 9 */
- "UNKNOWN_OP", /* 10 undefined */
- "GET_FUNCTION_ERR_INT", /* 11 */
- "ACK_FUNCTION_ERR_INT", /* 12 */
- "GET_ERROR_LOG", /* 13 */
-};
-
-static char *control_adapter_op_names[] = {
- "UNKNOWN_OP", /* 0 undefined */
- "RESET", /* 1 */
- "COLLECT_VPD", /* 2 */
-};
-
-static char *download_op_names[] = {
- "UNKNOWN_OP", /* 0 undefined */
- "DOWNLOAD", /* 1 */
- "VALIDATE", /* 2 */
-};
-
-static char *op_str(unsigned int op, char *name_array[], int array_len)
-{
- if (op >= array_len)
- return "UNKNOWN_OP";
- return name_array[op];
-}
-
-#define OP_STR(op, name_array) op_str(op, name_array, ARRAY_SIZE(name_array))
-
-#define OP_STR_AFU(op) OP_STR(op, afu_op_names)
-#define OP_STR_CONTROL_ADAPTER(op) OP_STR(op, control_adapter_op_names)
-#define OP_STR_DOWNLOAD_ADAPTER(op) OP_STR(op, download_op_names)
-
-
-long cxl_h_attach_process(u64 unit_address,
- struct cxl_process_element_hcall *element,
- u64 *process_token, u64 *mmio_addr, u64 *mmio_size)
-{
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
- long rc;
-
- CXL_H_WAIT_UNTIL_DONE(rc, retbuf, H_ATTACH_CA_PROCESS, unit_address, virt_to_phys(element));
- _PRINT_MSG(rc, "cxl_h_attach_process(%#.16llx, %#.16lx): %li\n",
- unit_address, virt_to_phys(element), rc);
- trace_cxl_hcall_attach(unit_address, virt_to_phys(element), retbuf[0], retbuf[1], retbuf[2], rc);
-
- pr_devel("token: 0x%.8lx mmio_addr: 0x%lx mmio_size: 0x%lx\nProcess Element Structure:\n",
- retbuf[0], retbuf[1], retbuf[2]);
- cxl_dump_debug_buffer(element, sizeof(*element));
-
- switch (rc) {
- case H_SUCCESS: /* The process info is attached to the coherent platform function */
- *process_token = retbuf[0];
- if (mmio_addr)
- *mmio_addr = retbuf[1];
- if (mmio_size)
- *mmio_size = retbuf[2];
- return 0;
- case H_PARAMETER: /* An incorrect parameter was supplied. */
- case H_FUNCTION: /* The function is not supported. */
- return -EINVAL;
- case H_AUTHORITY: /* The partition does not have authority to perform this hcall */
- case H_RESOURCE: /* The coherent platform function does not have enough additional resource to attach the process */
- case H_HARDWARE: /* A hardware event prevented the attach operation */
- case H_STATE: /* The coherent platform function is not in a valid state */
- case H_BUSY:
- return -EBUSY;
- default:
- WARN(1, "Unexpected return code: %lx", rc);
- return -EINVAL;
- }
-}
-
-/*
- * cxl_h_detach_process - Detach a process element from a coherent
- * platform function.
- */
-long cxl_h_detach_process(u64 unit_address, u64 process_token)
-{
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
- long rc;
-
- CXL_H_WAIT_UNTIL_DONE(rc, retbuf, H_DETACH_CA_PROCESS, unit_address, process_token);
- _PRINT_MSG(rc, "cxl_h_detach_process(%#.16llx, 0x%.8llx): %li\n", unit_address, process_token, rc);
- trace_cxl_hcall_detach(unit_address, process_token, rc);
-
- switch (rc) {
- case H_SUCCESS: /* The process was detached from the coherent platform function */
- return 0;
- case H_PARAMETER: /* An incorrect parameter was supplied. */
- return -EINVAL;
- case H_AUTHORITY: /* The partition does not have authority to perform this hcall */
- case H_RESOURCE: /* The function has page table mappings for MMIO */
- case H_HARDWARE: /* A hardware event prevented the detach operation */
- case H_STATE: /* The coherent platform function is not in a valid state */
- case H_BUSY:
- return -EBUSY;
- default:
- WARN(1, "Unexpected return code: %lx", rc);
- return -EINVAL;
- }
-}
-
-/*
- * cxl_h_control_function - This H_CONTROL_CA_FUNCTION hypervisor call allows
- * the partition to manipulate or query
- * certain coherent platform function behaviors.
- */
-static long cxl_h_control_function(u64 unit_address, u64 op,
- u64 p1, u64 p2, u64 p3, u64 p4, u64 *out)
-{
- unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
- long rc;
-
- CXL_H9_WAIT_UNTIL_DONE(rc, retbuf, H_CONTROL_CA_FUNCTION, unit_address, op, p1, p2, p3, p4);
- _PRINT_MSG(rc, "cxl_h_control_function(%#.16llx, %s(%#llx, %#llx, %#llx, %#llx, R4: %#lx)): %li\n",
- unit_address, OP_STR_AFU(op), p1, p2, p3, p4, retbuf[0], rc);
- trace_cxl_hcall_control_function(unit_address, OP_STR_AFU(op), p1, p2, p3, p4, retbuf[0], rc);
-
- switch (rc) {
- case H_SUCCESS: /* The operation is completed for the coherent platform function */
- if ((op == H_CONTROL_CA_FUNCTION_GET_FUNCTION_ERR_INT ||
- op == H_CONTROL_CA_FUNCTION_READ_ERR_STATE ||
- op == H_CONTROL_CA_FUNCTION_COLLECT_VPD))
- *out = retbuf[0];
- return 0;
- case H_PARAMETER: /* An incorrect parameter was supplied. */
- case H_FUNCTION: /* The function is not supported. */
- case H_NOT_FOUND: /* The operation supplied was not valid */
- case H_NOT_AVAILABLE: /* The operation cannot be performed because the AFU has not been downloaded */
- case H_SG_LIST: /* An block list entry was invalid */
- return -EINVAL;
- case H_AUTHORITY: /* The partition does not have authority to perform this hcall */
- case H_RESOURCE: /* The function has page table mappings for MMIO */
- case H_HARDWARE: /* A hardware event prevented the attach operation */
- case H_STATE: /* The coherent platform function is not in a valid state */
- case H_BUSY:
- return -EBUSY;
- default:
- WARN(1, "Unexpected return code: %lx", rc);
- return -EINVAL;
- }
-}
-
-/*
- * cxl_h_reset_afu - Perform a reset to the coherent platform function.
- */
-long cxl_h_reset_afu(u64 unit_address)
-{
- return cxl_h_control_function(unit_address,
- H_CONTROL_CA_FUNCTION_RESET,
- 0, 0, 0, 0,
- NULL);
-}
-
-/*
- * cxl_h_suspend_process - Suspend a process from being executed
- * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when
- * process was attached.
- */
-long cxl_h_suspend_process(u64 unit_address, u64 process_token)
-{
- return cxl_h_control_function(unit_address,
- H_CONTROL_CA_FUNCTION_SUSPEND_PROCESS,
- process_token, 0, 0, 0,
- NULL);
-}
-
-/*
- * cxl_h_resume_process - Resume a process to be executed
- * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when
- * process was attached.
- */
-long cxl_h_resume_process(u64 unit_address, u64 process_token)
-{
- return cxl_h_control_function(unit_address,
- H_CONTROL_CA_FUNCTION_RESUME_PROCESS,
- process_token, 0, 0, 0,
- NULL);
-}
-
-/*
- * cxl_h_read_error_state - Checks the error state of the coherent
- * platform function.
- * R4 contains the error state
- */
-long cxl_h_read_error_state(u64 unit_address, u64 *state)
-{
- return cxl_h_control_function(unit_address,
- H_CONTROL_CA_FUNCTION_READ_ERR_STATE,
- 0, 0, 0, 0,
- state);
-}
-
-/*
- * cxl_h_get_afu_err - collect the AFU error buffer
- * Parameter1 = byte offset into error buffer to retrieve, valid values
- * are between 0 and (ibm,error-buffer-size - 1)
- * Parameter2 = 4K aligned real address of error buffer, to be filled in
- * Parameter3 = length of error buffer, valid values are 4K or less
- */
-long cxl_h_get_afu_err(u64 unit_address, u64 offset,
- u64 buf_address, u64 len)
-{
- return cxl_h_control_function(unit_address,
- H_CONTROL_CA_FUNCTION_GET_AFU_ERR,
- offset, buf_address, len, 0,
- NULL);
-}
-
-/*
- * cxl_h_get_config - collect configuration record for the
- * coherent platform function
- * Parameter1 = # of configuration record to retrieve, valid values are
- * between 0 and (ibm,#config-records - 1)
- * Parameter2 = byte offset into configuration record to retrieve,
- * valid values are between 0 and (ibm,config-record-size - 1)
- * Parameter3 = 4K aligned real address of configuration record buffer,
- * to be filled in
- * Parameter4 = length of configuration buffer, valid values are 4K or less
- */
-long cxl_h_get_config(u64 unit_address, u64 cr_num, u64 offset,
- u64 buf_address, u64 len)
-{
- return cxl_h_control_function(unit_address,
- H_CONTROL_CA_FUNCTION_GET_CONFIG,
- cr_num, offset, buf_address, len,
- NULL);
-}
-
-/*
- * cxl_h_terminate_process - Terminate the process before completion
- * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when
- * process was attached.
- */
-long cxl_h_terminate_process(u64 unit_address, u64 process_token)
-{
- return cxl_h_control_function(unit_address,
- H_CONTROL_CA_FUNCTION_TERMINATE_PROCESS,
- process_token, 0, 0, 0,
- NULL);
-}
-
-/*
- * cxl_h_collect_vpd - Collect VPD for the coherent platform function.
- * Parameter1 = # of VPD record to retrieve, valid values are between 0
- * and (ibm,#config-records - 1).
- * Parameter2 = 4K naturally aligned real buffer containing block
- * list entries
- * Parameter3 = number of block list entries in the block list, valid
- * values are between 0 and 256
- */
-long cxl_h_collect_vpd(u64 unit_address, u64 record, u64 list_address,
- u64 num, u64 *out)
-{
- return cxl_h_control_function(unit_address,
- H_CONTROL_CA_FUNCTION_COLLECT_VPD,
- record, list_address, num, 0,
- out);
-}
-
-/*
- * cxl_h_get_fn_error_interrupt - Read the function-wide error data based on an interrupt
- */
-long cxl_h_get_fn_error_interrupt(u64 unit_address, u64 *reg)
-{
- return cxl_h_control_function(unit_address,
- H_CONTROL_CA_FUNCTION_GET_FUNCTION_ERR_INT,
- 0, 0, 0, 0, reg);
-}
-
-/*
- * cxl_h_ack_fn_error_interrupt - Acknowledge function-wide error data
- * based on an interrupt
- * Parameter1 = value to write to the function-wide error interrupt register
- */
-long cxl_h_ack_fn_error_interrupt(u64 unit_address, u64 value)
-{
- return cxl_h_control_function(unit_address,
- H_CONTROL_CA_FUNCTION_ACK_FUNCTION_ERR_INT,
- value, 0, 0, 0,
- NULL);
-}
-
-/*
- * cxl_h_get_error_log - Retrieve the Platform Log ID (PLID) of
- * an error log
- */
-long cxl_h_get_error_log(u64 unit_address, u64 value)
-{
- return cxl_h_control_function(unit_address,
- H_CONTROL_CA_FUNCTION_GET_ERROR_LOG,
- 0, 0, 0, 0,
- NULL);
-}
-
-/*
- * cxl_h_collect_int_info - Collect interrupt info about a coherent
- * platform function after an interrupt occurred.
- */
-long cxl_h_collect_int_info(u64 unit_address, u64 process_token,
- struct cxl_irq_info *info)
-{
- long rc;
-
- BUG_ON(sizeof(*info) != sizeof(unsigned long[PLPAR_HCALL9_BUFSIZE]));
-
- rc = plpar_hcall9(H_COLLECT_CA_INT_INFO, (unsigned long *) info,
- unit_address, process_token);
- _PRINT_MSG(rc, "cxl_h_collect_int_info(%#.16llx, 0x%llx): %li\n",
- unit_address, process_token, rc);
- trace_cxl_hcall_collect_int_info(unit_address, process_token, rc);
-
- switch (rc) {
- case H_SUCCESS: /* The interrupt info is returned in return registers. */
- pr_devel("dsisr:%#llx, dar:%#llx, dsr:%#llx, pid_tid:%#llx, afu_err:%#llx, errstat:%#llx\n",
- info->dsisr, info->dar, info->dsr, info->reserved,
- info->afu_err, info->errstat);
- return 0;
- case H_PARAMETER: /* An incorrect parameter was supplied. */
- return -EINVAL;
- case H_AUTHORITY: /* The partition does not have authority to perform this hcall. */
- case H_HARDWARE: /* A hardware event prevented the collection of the interrupt info.*/
- case H_STATE: /* The coherent platform function is not in a valid state to collect interrupt info. */
- return -EBUSY;
- default:
- WARN(1, "Unexpected return code: %lx", rc);
- return -EINVAL;
- }
-}
-
-/*
- * cxl_h_control_faults - Control the operation of a coherent platform
- * function after a fault occurs.
- *
- * Parameters
- * control-mask: value to control the faults
- * looks like PSL_TFC_An shifted >> 32
- * reset-mask: mask to control reset of function faults
- * Set reset_mask = 1 to reset PSL errors
- */
-long cxl_h_control_faults(u64 unit_address, u64 process_token,
- u64 control_mask, u64 reset_mask)
-{
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
- long rc;
-
- memset(retbuf, 0, sizeof(retbuf));
-
- rc = plpar_hcall(H_CONTROL_CA_FAULTS, retbuf, unit_address,
- H_CONTROL_CA_FAULTS_RESPOND_PSL, process_token,
- control_mask, reset_mask);
- _PRINT_MSG(rc, "cxl_h_control_faults(%#.16llx, 0x%llx, %#llx, %#llx): %li (%#lx)\n",
- unit_address, process_token, control_mask, reset_mask,
- rc, retbuf[0]);
- trace_cxl_hcall_control_faults(unit_address, process_token,
- control_mask, reset_mask, retbuf[0], rc);
-
- switch (rc) {
- case H_SUCCESS: /* Faults were successfully controlled for the function. */
- return 0;
- case H_PARAMETER: /* An incorrect parameter was supplied. */
- return -EINVAL;
- case H_HARDWARE: /* A hardware event prevented the control of faults. */
- case H_STATE: /* The function was in an invalid state. */
- case H_AUTHORITY: /* The partition does not have authority to perform this hcall; the coherent platform facilities may need to be licensed. */
- return -EBUSY;
- case H_FUNCTION: /* The function is not supported */
- case H_NOT_FOUND: /* The operation supplied was not valid */
- return -EINVAL;
- default:
- WARN(1, "Unexpected return code: %lx", rc);
- return -EINVAL;
- }
-}
-
-/*
- * cxl_h_control_facility - This H_CONTROL_CA_FACILITY hypervisor call
- * allows the partition to manipulate or query
- * certain coherent platform facility behaviors.
- */
-static long cxl_h_control_facility(u64 unit_address, u64 op,
- u64 p1, u64 p2, u64 p3, u64 p4, u64 *out)
-{
- unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
- long rc;
-
- CXL_H9_WAIT_UNTIL_DONE(rc, retbuf, H_CONTROL_CA_FACILITY, unit_address, op, p1, p2, p3, p4);
- _PRINT_MSG(rc, "cxl_h_control_facility(%#.16llx, %s(%#llx, %#llx, %#llx, %#llx, R4: %#lx)): %li\n",
- unit_address, OP_STR_CONTROL_ADAPTER(op), p1, p2, p3, p4, retbuf[0], rc);
- trace_cxl_hcall_control_facility(unit_address, OP_STR_CONTROL_ADAPTER(op), p1, p2, p3, p4, retbuf[0], rc);
-
- switch (rc) {
- case H_SUCCESS: /* The operation is completed for the coherent platform facility */
- if (op == H_CONTROL_CA_FACILITY_COLLECT_VPD)
- *out = retbuf[0];
- return 0;
- case H_PARAMETER: /* An incorrect parameter was supplied. */
- case H_FUNCTION: /* The function is not supported. */
- case H_NOT_FOUND: /* The operation supplied was not valid */
- case H_NOT_AVAILABLE: /* The operation cannot be performed because the AFU has not been downloaded */
- case H_SG_LIST: /* An block list entry was invalid */
- return -EINVAL;
- case H_AUTHORITY: /* The partition does not have authority to perform this hcall */
- case H_RESOURCE: /* The function has page table mappings for MMIO */
- case H_HARDWARE: /* A hardware event prevented the attach operation */
- case H_STATE: /* The coherent platform facility is not in a valid state */
- case H_BUSY:
- return -EBUSY;
- default:
- WARN(1, "Unexpected return code: %lx", rc);
- return -EINVAL;
- }
-}
-
-/*
- * cxl_h_reset_adapter - Perform a reset to the coherent platform facility.
- */
-long cxl_h_reset_adapter(u64 unit_address)
-{
- return cxl_h_control_facility(unit_address,
- H_CONTROL_CA_FACILITY_RESET,
- 0, 0, 0, 0,
- NULL);
-}
-
-/*
- * cxl_h_collect_vpd - Collect VPD for the coherent platform function.
- * Parameter1 = 4K naturally aligned real buffer containing block
- * list entries
- * Parameter2 = number of block list entries in the block list, valid
- * values are between 0 and 256
- */
-long cxl_h_collect_vpd_adapter(u64 unit_address, u64 list_address,
- u64 num, u64 *out)
-{
- return cxl_h_control_facility(unit_address,
- H_CONTROL_CA_FACILITY_COLLECT_VPD,
- list_address, num, 0, 0,
- out);
-}
-
-/*
- * cxl_h_download_facility - This H_DOWNLOAD_CA_FACILITY
- * hypervisor call provide platform support for
- * downloading a base adapter image to the coherent
- * platform facility, and for validating the entire
- * image after the download.
- * Parameters
- * op: operation to perform to the coherent platform function
- * Download: operation = 1, the base image in the coherent platform
- * facility is first erased, and then
- * programmed using the image supplied
- * in the scatter/gather list.
- * Validate: operation = 2, the base image in the coherent platform
- * facility is compared with the image
- * supplied in the scatter/gather list.
- * list_address: 4K naturally aligned real buffer containing
- * scatter/gather list entries.
- * num: number of block list entries in the scatter/gather list.
- */
-static long cxl_h_download_facility(u64 unit_address, u64 op,
- u64 list_address, u64 num,
- u64 *out)
-{
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
- unsigned int delay, total_delay = 0;
- u64 token = 0;
- long rc;
-
- if (*out != 0)
- token = *out;
-
- memset(retbuf, 0, sizeof(retbuf));
- while (1) {
- rc = plpar_hcall(H_DOWNLOAD_CA_FACILITY, retbuf,
- unit_address, op, list_address, num,
- token);
- token = retbuf[0];
- if (rc != H_BUSY && !H_IS_LONG_BUSY(rc))
- break;
-
- if (rc != H_BUSY) {
- delay = get_longbusy_msecs(rc);
- total_delay += delay;
- if (total_delay > CXL_HCALL_TIMEOUT_DOWNLOAD) {
- WARN(1, "Warning: Giving up waiting for CXL hcall "
- "%#x after %u msec\n",
- H_DOWNLOAD_CA_FACILITY, total_delay);
- rc = H_BUSY;
- break;
- }
- msleep(delay);
- }
- }
- _PRINT_MSG(rc, "cxl_h_download_facility(%#.16llx, %s(%#llx, %#llx), %#lx): %li\n",
- unit_address, OP_STR_DOWNLOAD_ADAPTER(op), list_address, num, retbuf[0], rc);
- trace_cxl_hcall_download_facility(unit_address, OP_STR_DOWNLOAD_ADAPTER(op), list_address, num, retbuf[0], rc);
-
- switch (rc) {
- case H_SUCCESS: /* The operation is completed for the coherent platform facility */
- return 0;
- case H_PARAMETER: /* An incorrect parameter was supplied */
- case H_FUNCTION: /* The function is not supported. */
- case H_SG_LIST: /* An block list entry was invalid */
- case H_BAD_DATA: /* Image verification failed */
- return -EINVAL;
- case H_AUTHORITY: /* The partition does not have authority to perform this hcall */
- case H_RESOURCE: /* The function has page table mappings for MMIO */
- case H_HARDWARE: /* A hardware event prevented the attach operation */
- case H_STATE: /* The coherent platform facility is not in a valid state */
- case H_BUSY:
- return -EBUSY;
- case H_CONTINUE:
- *out = retbuf[0];
- return 1; /* More data is needed for the complete image */
- default:
- WARN(1, "Unexpected return code: %lx", rc);
- return -EINVAL;
- }
-}
-
-/*
- * cxl_h_download_adapter_image - Download the base image to the coherent
- * platform facility.
- */
-long cxl_h_download_adapter_image(u64 unit_address,
- u64 list_address, u64 num,
- u64 *out)
-{
- return cxl_h_download_facility(unit_address,
- H_DOWNLOAD_CA_FACILITY_DOWNLOAD,
- list_address, num, out);
-}
-
-/*
- * cxl_h_validate_adapter_image - Validate the base image in the coherent
- * platform facility.
- */
-long cxl_h_validate_adapter_image(u64 unit_address,
- u64 list_address, u64 num,
- u64 *out)
-{
- return cxl_h_download_facility(unit_address,
- H_DOWNLOAD_CA_FACILITY_VALIDATE,
- list_address, num, out);
-}
diff --git a/drivers/misc/cxl/hcalls.h b/drivers/misc/cxl/hcalls.h
deleted file mode 100644
index d200465dc6ac..000000000000
--- a/drivers/misc/cxl/hcalls.h
+++ /dev/null
@@ -1,200 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright 2015 IBM Corp.
- */
-
-#ifndef _HCALLS_H
-#define _HCALLS_H
-
-#include <linux/types.h>
-#include <asm/byteorder.h>
-#include <asm/hvcall.h>
-#include "cxl.h"
-
-#define SG_BUFFER_SIZE 4096
-#define SG_MAX_ENTRIES 256
-
-struct sg_list {
- u64 phys_addr;
- u64 len;
-};
-
-/*
- * This is straight out of PAPR, but replacing some of the compound fields with
- * a single field, where they were identical to the register layout.
- *
- * The 'flags' parameter regroups the various bit-fields
- */
-#define CXL_PE_CSRP_VALID (1ULL << 63)
-#define CXL_PE_PROBLEM_STATE (1ULL << 62)
-#define CXL_PE_SECONDARY_SEGMENT_TBL_SRCH (1ULL << 61)
-#define CXL_PE_TAGS_ACTIVE (1ULL << 60)
-#define CXL_PE_USER_STATE (1ULL << 59)
-#define CXL_PE_TRANSLATION_ENABLED (1ULL << 58)
-#define CXL_PE_64_BIT (1ULL << 57)
-#define CXL_PE_PRIVILEGED_PROCESS (1ULL << 56)
-
-#define CXL_PROCESS_ELEMENT_VERSION 1
-struct cxl_process_element_hcall {
- __be64 version;
- __be64 flags;
- u8 reserved0[12];
- __be32 pslVirtualIsn;
- u8 applicationVirtualIsnBitmap[256];
- u8 reserved1[144];
- struct cxl_process_element_common common;
- u8 reserved4[12];
-} __packed;
-
-#define H_STATE_NORMAL 1
-#define H_STATE_DISABLE 2
-#define H_STATE_TEMP_UNAVAILABLE 3
-#define H_STATE_PERM_UNAVAILABLE 4
-
-/* NOTE: element must be a logical real address, and must be pinned */
-long cxl_h_attach_process(u64 unit_address, struct cxl_process_element_hcall *element,
- u64 *process_token, u64 *mmio_addr, u64 *mmio_size);
-
-/**
- * cxl_h_detach_process - Detach a process element from a coherent
- * platform function.
- */
-long cxl_h_detach_process(u64 unit_address, u64 process_token);
-
-/**
- * cxl_h_reset_afu - Perform a reset to the coherent platform function.
- */
-long cxl_h_reset_afu(u64 unit_address);
-
-/**
- * cxl_h_suspend_process - Suspend a process from being executed
- * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when
- * process was attached.
- */
-long cxl_h_suspend_process(u64 unit_address, u64 process_token);
-
-/**
- * cxl_h_resume_process - Resume a process to be executed
- * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when
- * process was attached.
- */
-long cxl_h_resume_process(u64 unit_address, u64 process_token);
-
-/**
- * cxl_h_read_error_state - Reads the error state of the coherent
- * platform function.
- * R4 contains the error state
- */
-long cxl_h_read_error_state(u64 unit_address, u64 *state);
-
-/**
- * cxl_h_get_afu_err - collect the AFU error buffer
- * Parameter1 = byte offset into error buffer to retrieve, valid values
- * are between 0 and (ibm,error-buffer-size - 1)
- * Parameter2 = 4K aligned real address of error buffer, to be filled in
- * Parameter3 = length of error buffer, valid values are 4K or less
- */
-long cxl_h_get_afu_err(u64 unit_address, u64 offset, u64 buf_address, u64 len);
-
-/**
- * cxl_h_get_config - collect configuration record for the
- * coherent platform function
- * Parameter1 = # of configuration record to retrieve, valid values are
- * between 0 and (ibm,#config-records - 1)
- * Parameter2 = byte offset into configuration record to retrieve,
- * valid values are between 0 and (ibm,config-record-size - 1)
- * Parameter3 = 4K aligned real address of configuration record buffer,
- * to be filled in
- * Parameter4 = length of configuration buffer, valid values are 4K or less
- */
-long cxl_h_get_config(u64 unit_address, u64 cr_num, u64 offset,
- u64 buf_address, u64 len);
-
-/**
- * cxl_h_terminate_process - Terminate the process before completion
- * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when
- * process was attached.
- */
-long cxl_h_terminate_process(u64 unit_address, u64 process_token);
-
-/**
- * cxl_h_collect_vpd - Collect VPD for the coherent platform function.
- * Parameter1 = # of VPD record to retrieve, valid values are between 0
- * and (ibm,#config-records - 1).
- * Parameter2 = 4K naturally aligned real buffer containing block
- * list entries
- * Parameter3 = number of block list entries in the block list, valid
- * values are between 0 and 256
- */
-long cxl_h_collect_vpd(u64 unit_address, u64 record, u64 list_address,
- u64 num, u64 *out);
-
-/**
- * cxl_h_get_fn_error_interrupt - Read the function-wide error data based on an interrupt
- */
-long cxl_h_get_fn_error_interrupt(u64 unit_address, u64 *reg);
-
-/**
- * cxl_h_ack_fn_error_interrupt - Acknowledge function-wide error data
- * based on an interrupt
- * Parameter1 = value to write to the function-wide error interrupt register
- */
-long cxl_h_ack_fn_error_interrupt(u64 unit_address, u64 value);
-
-/**
- * cxl_h_get_error_log - Retrieve the Platform Log ID (PLID) of
- * an error log
- */
-long cxl_h_get_error_log(u64 unit_address, u64 value);
-
-/**
- * cxl_h_collect_int_info - Collect interrupt info about a coherent
- * platform function after an interrupt occurred.
- */
-long cxl_h_collect_int_info(u64 unit_address, u64 process_token,
- struct cxl_irq_info *info);
-
-/**
- * cxl_h_control_faults - Control the operation of a coherent platform
- * function after a fault occurs.
- *
- * Parameters
- * control-mask: value to control the faults
- * looks like PSL_TFC_An shifted >> 32
- * reset-mask: mask to control reset of function faults
- * Set reset_mask = 1 to reset PSL errors
- */
-long cxl_h_control_faults(u64 unit_address, u64 process_token,
- u64 control_mask, u64 reset_mask);
-
-/**
- * cxl_h_reset_adapter - Perform a reset to the coherent platform facility.
- */
-long cxl_h_reset_adapter(u64 unit_address);
-
-/**
- * cxl_h_collect_vpd - Collect VPD for the coherent platform function.
- * Parameter1 = 4K naturally aligned real buffer containing block
- * list entries
- * Parameter2 = number of block list entries in the block list, valid
- * values are between 0 and 256
- */
-long cxl_h_collect_vpd_adapter(u64 unit_address, u64 list_address,
- u64 num, u64 *out);
-
-/**
- * cxl_h_download_adapter_image - Download the base image to the coherent
- * platform facility.
- */
-long cxl_h_download_adapter_image(u64 unit_address,
- u64 list_address, u64 num,
- u64 *out);
-
-/**
- * cxl_h_validate_adapter_image - Validate the base image in the coherent
- * platform facility.
- */
-long cxl_h_validate_adapter_image(u64 unit_address,
- u64 list_address, u64 num,
- u64 *out);
-#endif /* _HCALLS_H */
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
deleted file mode 100644
index b730e022a48e..000000000000
--- a/drivers/misc/cxl/irq.c
+++ /dev/null
@@ -1,450 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2014 IBM Corp.
- */
-
-#include <linux/interrupt.h>
-#include <linux/irqdomain.h>
-#include <linux/workqueue.h>
-#include <linux/sched.h>
-#include <linux/wait.h>
-#include <linux/slab.h>
-#include <linux/pid.h>
-#include <asm/cputable.h>
-#include <misc/cxl-base.h>
-
-#include "cxl.h"
-#include "trace.h"
-
-static int afu_irq_range_start(void)
-{
- if (cpu_has_feature(CPU_FTR_HVMODE))
- return 1;
- return 0;
-}
-
-static irqreturn_t schedule_cxl_fault(struct cxl_context *ctx, u64 dsisr, u64 dar)
-{
- ctx->dsisr = dsisr;
- ctx->dar = dar;
- schedule_work(&ctx->fault_work);
- return IRQ_HANDLED;
-}
-
-irqreturn_t cxl_irq_psl9(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info)
-{
- u64 dsisr, dar;
-
- dsisr = irq_info->dsisr;
- dar = irq_info->dar;
-
- trace_cxl_psl9_irq(ctx, irq, dsisr, dar);
-
- pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar);
-
- if (dsisr & CXL_PSL9_DSISR_An_TF) {
- pr_devel("CXL interrupt: Scheduling translation fault handling for later (pe: %i)\n", ctx->pe);
- return schedule_cxl_fault(ctx, dsisr, dar);
- }
-
- if (dsisr & CXL_PSL9_DSISR_An_PE)
- return cxl_ops->handle_psl_slice_error(ctx, dsisr,
- irq_info->errstat);
- if (dsisr & CXL_PSL9_DSISR_An_AE) {
- pr_devel("CXL interrupt: AFU Error 0x%016llx\n", irq_info->afu_err);
-
- if (ctx->pending_afu_err) {
- /*
- * This shouldn't happen - the PSL treats these errors
- * as fatal and will have reset the AFU, so there's not
- * much point buffering multiple AFU errors.
- * OTOH if we DO ever see a storm of these come in it's
- * probably best that we log them somewhere:
- */
- dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error undelivered to pe %i: 0x%016llx\n",
- ctx->pe, irq_info->afu_err);
- } else {
- spin_lock(&ctx->lock);
- ctx->afu_err = irq_info->afu_err;
- ctx->pending_afu_err = 1;
- spin_unlock(&ctx->lock);
-
- wake_up_all(&ctx->wq);
- }
-
- cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_A, 0);
- return IRQ_HANDLED;
- }
- if (dsisr & CXL_PSL9_DSISR_An_OC)
- pr_devel("CXL interrupt: OS Context Warning\n");
-
- WARN(1, "Unhandled CXL PSL IRQ\n");
- return IRQ_HANDLED;
-}
-
-irqreturn_t cxl_irq_psl8(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info)
-{
- u64 dsisr, dar;
-
- dsisr = irq_info->dsisr;
- dar = irq_info->dar;
-
- trace_cxl_psl_irq(ctx, irq, dsisr, dar);
-
- pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar);
-
- if (dsisr & CXL_PSL_DSISR_An_DS) {
- /*
- * We don't inherently need to sleep to handle this, but we do
- * need to get a ref to the task's mm, which we can't do from
- * irq context without the potential for a deadlock since it
- * takes the task_lock. An alternate option would be to keep a
- * reference to the task's mm the entire time it has cxl open,
- * but to do that we need to solve the issue where we hold a
- * ref to the mm, but the mm can hold a ref to the fd after an
- * mmap preventing anything from being cleaned up.
- */
- pr_devel("Scheduling segment miss handling for later pe: %i\n", ctx->pe);
- return schedule_cxl_fault(ctx, dsisr, dar);
- }
-
- if (dsisr & CXL_PSL_DSISR_An_M)
- pr_devel("CXL interrupt: PTE not found\n");
- if (dsisr & CXL_PSL_DSISR_An_P)
- pr_devel("CXL interrupt: Storage protection violation\n");
- if (dsisr & CXL_PSL_DSISR_An_A)
- pr_devel("CXL interrupt: AFU lock access to write through or cache inhibited storage\n");
- if (dsisr & CXL_PSL_DSISR_An_S)
- pr_devel("CXL interrupt: Access was afu_wr or afu_zero\n");
- if (dsisr & CXL_PSL_DSISR_An_K)
- pr_devel("CXL interrupt: Access not permitted by virtual page class key protection\n");
-
- if (dsisr & CXL_PSL_DSISR_An_DM) {
- /*
- * In some cases we might be able to handle the fault
- * immediately if hash_page would succeed, but we still need
- * the task's mm, which as above we can't get without a lock
- */
- pr_devel("Scheduling page fault handling for later pe: %i\n", ctx->pe);
- return schedule_cxl_fault(ctx, dsisr, dar);
- }
- if (dsisr & CXL_PSL_DSISR_An_ST)
- WARN(1, "CXL interrupt: Segment Table PTE not found\n");
- if (dsisr & CXL_PSL_DSISR_An_UR)
- pr_devel("CXL interrupt: AURP PTE not found\n");
- if (dsisr & CXL_PSL_DSISR_An_PE)
- return cxl_ops->handle_psl_slice_error(ctx, dsisr,
- irq_info->errstat);
- if (dsisr & CXL_PSL_DSISR_An_AE) {
- pr_devel("CXL interrupt: AFU Error 0x%016llx\n", irq_info->afu_err);
-
- if (ctx->pending_afu_err) {
- /*
- * This shouldn't happen - the PSL treats these errors
- * as fatal and will have reset the AFU, so there's not
- * much point buffering multiple AFU errors.
- * OTOH if we DO ever see a storm of these come in it's
- * probably best that we log them somewhere:
- */
- dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error "
- "undelivered to pe %i: 0x%016llx\n",
- ctx->pe, irq_info->afu_err);
- } else {
- spin_lock(&ctx->lock);
- ctx->afu_err = irq_info->afu_err;
- ctx->pending_afu_err = true;
- spin_unlock(&ctx->lock);
-
- wake_up_all(&ctx->wq);
- }
-
- cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_A, 0);
- return IRQ_HANDLED;
- }
- if (dsisr & CXL_PSL_DSISR_An_OC)
- pr_devel("CXL interrupt: OS Context Warning\n");
-
- WARN(1, "Unhandled CXL PSL IRQ\n");
- return IRQ_HANDLED;
-}
-
-static irqreturn_t cxl_irq_afu(int irq, void *data)
-{
- struct cxl_context *ctx = data;
- irq_hw_number_t hwirq = irqd_to_hwirq(irq_get_irq_data(irq));
- int irq_off, afu_irq = 0;
- __u16 range;
- int r;
-
- /*
- * Look for the interrupt number.
- * On bare-metal, we know range 0 only contains the PSL
- * interrupt so we could start counting at range 1 and initialize
- * afu_irq at 1.
- * In a guest, range 0 also contains AFU interrupts, so it must
- * be counted for. Therefore we initialize afu_irq at 0 to take into
- * account the PSL interrupt.
- *
- * For code-readability, it just seems easier to go over all
- * the ranges on bare-metal and guest. The end result is the same.
- */
- for (r = 0; r < CXL_IRQ_RANGES; r++) {
- irq_off = hwirq - ctx->irqs.offset[r];
- range = ctx->irqs.range[r];
- if (irq_off >= 0 && irq_off < range) {
- afu_irq += irq_off;
- break;
- }
- afu_irq += range;
- }
- if (unlikely(r >= CXL_IRQ_RANGES)) {
- WARN(1, "Received AFU IRQ out of range for pe %i (virq %i hwirq %lx)\n",
- ctx->pe, irq, hwirq);
- return IRQ_HANDLED;
- }
-
- trace_cxl_afu_irq(ctx, afu_irq, irq, hwirq);
- pr_devel("Received AFU interrupt %i for pe: %i (virq %i hwirq %lx)\n",
- afu_irq, ctx->pe, irq, hwirq);
-
- if (unlikely(!ctx->irq_bitmap)) {
- WARN(1, "Received AFU IRQ for context with no IRQ bitmap\n");
- return IRQ_HANDLED;
- }
- spin_lock(&ctx->lock);
- set_bit(afu_irq - 1, ctx->irq_bitmap);
- ctx->pending_irq = true;
- spin_unlock(&ctx->lock);
-
- wake_up_all(&ctx->wq);
-
- return IRQ_HANDLED;
-}
-
-unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
- irq_handler_t handler, void *cookie, const char *name)
-{
- unsigned int virq;
- int result;
-
- /* IRQ Domain? */
- virq = irq_create_mapping(NULL, hwirq);
- if (!virq) {
- dev_warn(&adapter->dev, "cxl_map_irq: irq_create_mapping failed\n");
- return 0;
- }
-
- if (cxl_ops->setup_irq)
- cxl_ops->setup_irq(adapter, hwirq, virq);
-
- pr_devel("hwirq %#lx mapped to virq %u\n", hwirq, virq);
-
- result = request_irq(virq, handler, 0, name, cookie);
- if (result) {
- dev_warn(&adapter->dev, "cxl_map_irq: request_irq failed: %i\n", result);
- return 0;
- }
-
- return virq;
-}
-
-void cxl_unmap_irq(unsigned int virq, void *cookie)
-{
- free_irq(virq, cookie);
-}
-
-int cxl_register_one_irq(struct cxl *adapter,
- irq_handler_t handler,
- void *cookie,
- irq_hw_number_t *dest_hwirq,
- unsigned int *dest_virq,
- const char *name)
-{
- int hwirq, virq;
-
- if ((hwirq = cxl_ops->alloc_one_irq(adapter)) < 0)
- return hwirq;
-
- if (!(virq = cxl_map_irq(adapter, hwirq, handler, cookie, name)))
- goto err;
-
- *dest_hwirq = hwirq;
- *dest_virq = virq;
-
- return 0;
-
-err:
- cxl_ops->release_one_irq(adapter, hwirq);
- return -ENOMEM;
-}
-
-void afu_irq_name_free(struct cxl_context *ctx)
-{
- struct cxl_irq_name *irq_name, *tmp;
-
- list_for_each_entry_safe(irq_name, tmp, &ctx->irq_names, list) {
- kfree(irq_name->name);
- list_del(&irq_name->list);
- kfree(irq_name);
- }
-}
-
-int afu_allocate_irqs(struct cxl_context *ctx, u32 count)
-{
- int rc, r, i, j = 1;
- struct cxl_irq_name *irq_name;
- int alloc_count;
-
- /*
- * In native mode, range 0 is reserved for the multiplexed
- * PSL interrupt. It has been allocated when the AFU was initialized.
- *
- * In a guest, the PSL interrupt is not mutliplexed, but per-context,
- * and is the first interrupt from range 0. It still needs to be
- * allocated, so bump the count by one.
- */
- if (cpu_has_feature(CPU_FTR_HVMODE))
- alloc_count = count;
- else
- alloc_count = count + 1;
-
- if ((rc = cxl_ops->alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter,
- alloc_count)))
- return rc;
-
- if (cpu_has_feature(CPU_FTR_HVMODE)) {
- /* Multiplexed PSL Interrupt */
- ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq;
- ctx->irqs.range[0] = 1;
- }
-
- ctx->irq_count = count;
- ctx->irq_bitmap = bitmap_zalloc(count, GFP_KERNEL);
- if (!ctx->irq_bitmap)
- goto out;
-
- /*
- * Allocate names first. If any fail, bail out before allocating
- * actual hardware IRQs.
- */
- for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) {
- for (i = 0; i < ctx->irqs.range[r]; i++) {
- irq_name = kmalloc(sizeof(struct cxl_irq_name),
- GFP_KERNEL);
- if (!irq_name)
- goto out;
- irq_name->name = kasprintf(GFP_KERNEL, "cxl-%s-pe%i-%i",
- dev_name(&ctx->afu->dev),
- ctx->pe, j);
- if (!irq_name->name) {
- kfree(irq_name);
- goto out;
- }
- /* Add to tail so next look get the correct order */
- list_add_tail(&irq_name->list, &ctx->irq_names);
- j++;
- }
- }
- return 0;
-
-out:
- cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
- bitmap_free(ctx->irq_bitmap);
- afu_irq_name_free(ctx);
- return -ENOMEM;
-}
-
-static void afu_register_hwirqs(struct cxl_context *ctx)
-{
- irq_hw_number_t hwirq;
- struct cxl_irq_name *irq_name;
- int r, i;
- irqreturn_t (*handler)(int irq, void *data);
-
- /* We've allocated all memory now, so let's do the irq allocations */
- irq_name = list_first_entry(&ctx->irq_names, struct cxl_irq_name, list);
- for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) {
- hwirq = ctx->irqs.offset[r];
- for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
- if (r == 0 && i == 0)
- /*
- * The very first interrupt of range 0 is
- * always the PSL interrupt, but we only
- * need to connect a handler for guests,
- * because there's one PSL interrupt per
- * context.
- * On bare-metal, the PSL interrupt is
- * multiplexed and was setup when the AFU
- * was configured.
- */
- handler = cxl_ops->psl_interrupt;
- else
- handler = cxl_irq_afu;
- cxl_map_irq(ctx->afu->adapter, hwirq, handler, ctx,
- irq_name->name);
- irq_name = list_next_entry(irq_name, list);
- }
- }
-}
-
-int afu_register_irqs(struct cxl_context *ctx, u32 count)
-{
- int rc;
-
- rc = afu_allocate_irqs(ctx, count);
- if (rc)
- return rc;
-
- afu_register_hwirqs(ctx);
- return 0;
-}
-
-void afu_release_irqs(struct cxl_context *ctx, void *cookie)
-{
- irq_hw_number_t hwirq;
- unsigned int virq;
- int r, i;
-
- for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) {
- hwirq = ctx->irqs.offset[r];
- for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
- virq = irq_find_mapping(NULL, hwirq);
- if (virq)
- cxl_unmap_irq(virq, cookie);
- }
- }
-
- afu_irq_name_free(ctx);
- cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
-
- ctx->irq_count = 0;
-}
-
-void cxl_afu_decode_psl_serr(struct cxl_afu *afu, u64 serr)
-{
- dev_crit(&afu->dev,
- "PSL Slice error received. Check AFU for root cause.\n");
- dev_crit(&afu->dev, "PSL_SERR_An: 0x%016llx\n", serr);
- if (serr & CXL_PSL_SERR_An_afuto)
- dev_crit(&afu->dev, "AFU MMIO Timeout\n");
- if (serr & CXL_PSL_SERR_An_afudis)
- dev_crit(&afu->dev,
- "MMIO targeted Accelerator that was not enabled\n");
- if (serr & CXL_PSL_SERR_An_afuov)
- dev_crit(&afu->dev, "AFU CTAG Overflow\n");
- if (serr & CXL_PSL_SERR_An_badsrc)
- dev_crit(&afu->dev, "Bad Interrupt Source\n");
- if (serr & CXL_PSL_SERR_An_badctx)
- dev_crit(&afu->dev, "Bad Context Handle\n");
- if (serr & CXL_PSL_SERR_An_llcmdis)
- dev_crit(&afu->dev, "LLCMD to Disabled AFU\n");
- if (serr & CXL_PSL_SERR_An_llcmdto)
- dev_crit(&afu->dev, "LLCMD Timeout to AFU\n");
- if (serr & CXL_PSL_SERR_An_afupar)
- dev_crit(&afu->dev, "AFU MMIO Parity Error\n");
- if (serr & CXL_PSL_SERR_An_afudup)
- dev_crit(&afu->dev, "AFU MMIO Duplicate CTAG Error\n");
- if (serr & CXL_PSL_SERR_An_AE)
- dev_crit(&afu->dev,
- "AFU asserted JDONE with JERROR in AFU Directed Mode\n");
-}
diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c
deleted file mode 100644
index c1fbf6f588f7..000000000000
--- a/drivers/misc/cxl/main.c
+++ /dev/null
@@ -1,383 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2014 IBM Corp.
- */
-
-#include <linux/spinlock.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/mutex.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/mm.h>
-#include <linux/of.h>
-#include <linux/slab.h>
-#include <linux/idr.h>
-#include <linux/pci.h>
-#include <linux/platform_device.h>
-#include <linux/sched/task.h>
-
-#include <asm/cputable.h>
-#include <asm/mmu.h>
-#include <misc/cxl-base.h>
-
-#include "cxl.h"
-#include "trace.h"
-
-static DEFINE_SPINLOCK(adapter_idr_lock);
-static DEFINE_IDR(cxl_adapter_idr);
-
-uint cxl_verbose;
-module_param_named(verbose, cxl_verbose, uint, 0600);
-MODULE_PARM_DESC(verbose, "Enable verbose dmesg output");
-
-const struct cxl_backend_ops *cxl_ops;
-
-int cxl_afu_slbia(struct cxl_afu *afu)
-{
- unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
-
- pr_devel("cxl_afu_slbia issuing SLBIA command\n");
- cxl_p2n_write(afu, CXL_SLBIA_An, CXL_TLB_SLB_IQ_ALL);
- while (cxl_p2n_read(afu, CXL_SLBIA_An) & CXL_TLB_SLB_P) {
- if (time_after_eq(jiffies, timeout)) {
- dev_warn(&afu->dev, "WARNING: CXL AFU SLBIA timed out!\n");
- return -EBUSY;
- }
- /* If the adapter has gone down, we can assume that we
- * will PERST it and that will invalidate everything.
- */
- if (!cxl_ops->link_ok(afu->adapter, afu))
- return -EIO;
- cpu_relax();
- }
- return 0;
-}
-
-static inline void _cxl_slbia(struct cxl_context *ctx, struct mm_struct *mm)
-{
- unsigned long flags;
-
- if (ctx->mm != mm)
- return;
-
- pr_devel("%s matched mm - card: %i afu: %i pe: %i\n", __func__,
- ctx->afu->adapter->adapter_num, ctx->afu->slice, ctx->pe);
-
- spin_lock_irqsave(&ctx->sste_lock, flags);
- trace_cxl_slbia(ctx);
- memset(ctx->sstp, 0, ctx->sst_size);
- spin_unlock_irqrestore(&ctx->sste_lock, flags);
- mb();
- cxl_afu_slbia(ctx->afu);
-}
-
-static inline void cxl_slbia_core(struct mm_struct *mm)
-{
- struct cxl *adapter;
- struct cxl_afu *afu;
- struct cxl_context *ctx;
- int card, slice, id;
-
- pr_devel("%s called\n", __func__);
-
- spin_lock(&adapter_idr_lock);
- idr_for_each_entry(&cxl_adapter_idr, adapter, card) {
- /* XXX: Make this lookup faster with link from mm to ctx */
- spin_lock(&adapter->afu_list_lock);
- for (slice = 0; slice < adapter->slices; slice++) {
- afu = adapter->afu[slice];
- if (!afu || !afu->enabled)
- continue;
- rcu_read_lock();
- idr_for_each_entry(&afu->contexts_idr, ctx, id)
- _cxl_slbia(ctx, mm);
- rcu_read_unlock();
- }
- spin_unlock(&adapter->afu_list_lock);
- }
- spin_unlock(&adapter_idr_lock);
-}
-
-static struct cxl_calls cxl_calls = {
- .cxl_slbia = cxl_slbia_core,
- .owner = THIS_MODULE,
-};
-
-int cxl_alloc_sst(struct cxl_context *ctx)
-{
- unsigned long vsid;
- u64 ea_mask, size, sstp0, sstp1;
-
- sstp0 = 0;
- sstp1 = 0;
-
- ctx->sst_size = PAGE_SIZE;
- ctx->sst_lru = 0;
- ctx->sstp = (struct cxl_sste *)get_zeroed_page(GFP_KERNEL);
- if (!ctx->sstp) {
- pr_err("cxl_alloc_sst: Unable to allocate segment table\n");
- return -ENOMEM;
- }
- pr_devel("SSTP allocated at 0x%p\n", ctx->sstp);
-
- vsid = get_kernel_vsid((u64)ctx->sstp, mmu_kernel_ssize) << 12;
-
- sstp0 |= (u64)mmu_kernel_ssize << CXL_SSTP0_An_B_SHIFT;
- sstp0 |= (SLB_VSID_KERNEL | mmu_psize_defs[mmu_linear_psize].sllp) << 50;
-
- size = (((u64)ctx->sst_size >> 8) - 1) << CXL_SSTP0_An_SegTableSize_SHIFT;
- if (unlikely(size & ~CXL_SSTP0_An_SegTableSize_MASK)) {
- WARN(1, "Impossible segment table size\n");
- return -EINVAL;
- }
- sstp0 |= size;
-
- if (mmu_kernel_ssize == MMU_SEGSIZE_256M)
- ea_mask = 0xfffff00ULL;
- else
- ea_mask = 0xffffffff00ULL;
-
- sstp0 |= vsid >> (50-14); /* Top 14 bits of VSID */
- sstp1 |= (vsid << (64-(50-14))) & ~ea_mask;
- sstp1 |= (u64)ctx->sstp & ea_mask;
- sstp1 |= CXL_SSTP1_An_V;
-
- pr_devel("Looked up %#llx: slbfee. %#llx (ssize: %x, vsid: %#lx), copied to SSTP0: %#llx, SSTP1: %#llx\n",
- (u64)ctx->sstp, (u64)ctx->sstp & ESID_MASK, mmu_kernel_ssize, vsid, sstp0, sstp1);
-
- /* Store calculated sstp hardware points for use later */
- ctx->sstp0 = sstp0;
- ctx->sstp1 = sstp1;
-
- return 0;
-}
-
-/* print buffer content as integers when debugging */
-void cxl_dump_debug_buffer(void *buf, size_t buf_len)
-{
-#ifdef DEBUG
- int i, *ptr;
-
- /*
- * We want to regroup up to 4 integers per line, which means they
- * need to be in the same pr_devel() statement
- */
- ptr = (int *) buf;
- for (i = 0; i * 4 < buf_len; i += 4) {
- if ((i + 3) * 4 < buf_len)
- pr_devel("%.8x %.8x %.8x %.8x\n", ptr[i], ptr[i + 1],
- ptr[i + 2], ptr[i + 3]);
- else if ((i + 2) * 4 < buf_len)
- pr_devel("%.8x %.8x %.8x\n", ptr[i], ptr[i + 1],
- ptr[i + 2]);
- else if ((i + 1) * 4 < buf_len)
- pr_devel("%.8x %.8x\n", ptr[i], ptr[i + 1]);
- else
- pr_devel("%.8x\n", ptr[i]);
- }
-#endif /* DEBUG */
-}
-
-/* Find a CXL adapter by it's number and increase it's refcount */
-struct cxl *get_cxl_adapter(int num)
-{
- struct cxl *adapter;
-
- spin_lock(&adapter_idr_lock);
- if ((adapter = idr_find(&cxl_adapter_idr, num)))
- get_device(&adapter->dev);
- spin_unlock(&adapter_idr_lock);
-
- return adapter;
-}
-
-static int cxl_alloc_adapter_nr(struct cxl *adapter)
-{
- int i;
-
- idr_preload(GFP_KERNEL);
- spin_lock(&adapter_idr_lock);
- i = idr_alloc(&cxl_adapter_idr, adapter, 0, 0, GFP_NOWAIT);
- spin_unlock(&adapter_idr_lock);
- idr_preload_end();
- if (i < 0)
- return i;
-
- adapter->adapter_num = i;
-
- return 0;
-}
-
-void cxl_remove_adapter_nr(struct cxl *adapter)
-{
- idr_remove(&cxl_adapter_idr, adapter->adapter_num);
-}
-
-struct cxl *cxl_alloc_adapter(void)
-{
- struct cxl *adapter;
-
- if (!(adapter = kzalloc(sizeof(struct cxl), GFP_KERNEL)))
- return NULL;
-
- spin_lock_init(&adapter->afu_list_lock);
-
- if (cxl_alloc_adapter_nr(adapter))
- goto err1;
-
- if (dev_set_name(&adapter->dev, "card%i", adapter->adapter_num))
- goto err2;
-
- /* start with context lock taken */
- atomic_set(&adapter->contexts_num, -1);
-
- return adapter;
-err2:
- cxl_remove_adapter_nr(adapter);
-err1:
- kfree(adapter);
- return NULL;
-}
-
-struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice)
-{
- struct cxl_afu *afu;
-
- if (!(afu = kzalloc(sizeof(struct cxl_afu), GFP_KERNEL)))
- return NULL;
-
- afu->adapter = adapter;
- afu->dev.parent = &adapter->dev;
- afu->dev.release = cxl_ops->release_afu;
- afu->slice = slice;
- idr_init(&afu->contexts_idr);
- mutex_init(&afu->contexts_lock);
- spin_lock_init(&afu->afu_cntl_lock);
- atomic_set(&afu->configured_state, -1);
- afu->prefault_mode = CXL_PREFAULT_NONE;
- afu->irqs_max = afu->adapter->user_irqs;
-
- return afu;
-}
-
-int cxl_afu_select_best_mode(struct cxl_afu *afu)
-{
- if (afu->modes_supported & CXL_MODE_DIRECTED)
- return cxl_ops->afu_activate_mode(afu, CXL_MODE_DIRECTED);
-
- if (afu->modes_supported & CXL_MODE_DEDICATED)
- return cxl_ops->afu_activate_mode(afu, CXL_MODE_DEDICATED);
-
- dev_warn(&afu->dev, "No supported programming modes available\n");
- /* We don't fail this so the user can inspect sysfs */
- return 0;
-}
-
-int cxl_adapter_context_get(struct cxl *adapter)
-{
- int rc;
-
- rc = atomic_inc_unless_negative(&adapter->contexts_num);
- return rc ? 0 : -EBUSY;
-}
-
-void cxl_adapter_context_put(struct cxl *adapter)
-{
- atomic_dec_if_positive(&adapter->contexts_num);
-}
-
-int cxl_adapter_context_lock(struct cxl *adapter)
-{
- int rc;
- /* no active contexts -> contexts_num == 0 */
- rc = atomic_cmpxchg(&adapter->contexts_num, 0, -1);
- return rc ? -EBUSY : 0;
-}
-
-void cxl_adapter_context_unlock(struct cxl *adapter)
-{
- int val = atomic_cmpxchg(&adapter->contexts_num, -1, 0);
-
- /*
- * contexts lock taken -> contexts_num == -1
- * If not true then show a warning and force reset the lock.
- * This will happen when context_unlock was requested without
- * doing a context_lock.
- */
- if (val != -1) {
- atomic_set(&adapter->contexts_num, 0);
- WARN(1, "Adapter context unlocked with %d active contexts",
- val);
- }
-}
-
-static int __init init_cxl(void)
-{
- int rc = 0;
-
- if (!tlbie_capable)
- return -EINVAL;
-
- if ((rc = cxl_file_init()))
- return rc;
-
- cxl_debugfs_init();
-
- /*
- * we don't register the callback on P9. slb callack is only
- * used for the PSL8 MMU and CX4.
- */
- if (cxl_is_power8()) {
- rc = register_cxl_calls(&cxl_calls);
- if (rc)
- goto err;
- }
-
- if (cpu_has_feature(CPU_FTR_HVMODE)) {
- cxl_ops = &cxl_native_ops;
- rc = pci_register_driver(&cxl_pci_driver);
- }
-#ifdef CONFIG_PPC_PSERIES
- else {
- cxl_ops = &cxl_guest_ops;
- rc = platform_driver_register(&cxl_of_driver);
- }
-#endif
- if (rc)
- goto err1;
-
- return 0;
-err1:
- if (cxl_is_power8())
- unregister_cxl_calls(&cxl_calls);
-err:
- cxl_debugfs_exit();
- cxl_file_exit();
-
- return rc;
-}
-
-static void exit_cxl(void)
-{
- if (cpu_has_feature(CPU_FTR_HVMODE))
- pci_unregister_driver(&cxl_pci_driver);
-#ifdef CONFIG_PPC_PSERIES
- else
- platform_driver_unregister(&cxl_of_driver);
-#endif
-
- cxl_debugfs_exit();
- cxl_file_exit();
- if (cxl_is_power8())
- unregister_cxl_calls(&cxl_calls);
- idr_destroy(&cxl_adapter_idr);
-}
-
-module_init(init_cxl);
-module_exit(exit_cxl);
-
-MODULE_DESCRIPTION("IBM Coherent Accelerator");
-MODULE_AUTHOR("Ian Munsie <imunsie@au1.ibm.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
deleted file mode 100644
index fbe16a6ab7ad..000000000000
--- a/drivers/misc/cxl/native.c
+++ /dev/null
@@ -1,1592 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2014 IBM Corp.
- */
-
-#include <linux/spinlock.h>
-#include <linux/sched.h>
-#include <linux/sched/clock.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <linux/mm.h>
-#include <linux/uaccess.h>
-#include <linux/delay.h>
-#include <linux/irqdomain.h>
-#include <asm/synch.h>
-#include <asm/switch_to.h>
-#include <misc/cxl-base.h>
-
-#include "cxl.h"
-#include "trace.h"
-
-static int afu_control(struct cxl_afu *afu, u64 command, u64 clear,
- u64 result, u64 mask, bool enabled)
-{
- u64 AFU_Cntl;
- unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
- int rc = 0;
-
- spin_lock(&afu->afu_cntl_lock);
- pr_devel("AFU command starting: %llx\n", command);
-
- trace_cxl_afu_ctrl(afu, command);
-
- AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
- cxl_p2n_write(afu, CXL_AFU_Cntl_An, (AFU_Cntl & ~clear) | command);
-
- AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
- while ((AFU_Cntl & mask) != result) {
- if (time_after_eq(jiffies, timeout)) {
- dev_warn(&afu->dev, "WARNING: AFU control timed out!\n");
- rc = -EBUSY;
- goto out;
- }
-
- if (!cxl_ops->link_ok(afu->adapter, afu)) {
- afu->enabled = enabled;
- rc = -EIO;
- goto out;
- }
-
- pr_devel_ratelimited("AFU control... (0x%016llx)\n",
- AFU_Cntl | command);
- cpu_relax();
- AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
- }
-
- if (AFU_Cntl & CXL_AFU_Cntl_An_RA) {
- /*
- * Workaround for a bug in the XSL used in the Mellanox CX4
- * that fails to clear the RA bit after an AFU reset,
- * preventing subsequent AFU resets from working.
- */
- cxl_p2n_write(afu, CXL_AFU_Cntl_An, AFU_Cntl & ~CXL_AFU_Cntl_An_RA);
- }
-
- pr_devel("AFU command complete: %llx\n", command);
- afu->enabled = enabled;
-out:
- trace_cxl_afu_ctrl_done(afu, command, rc);
- spin_unlock(&afu->afu_cntl_lock);
-
- return rc;
-}
-
-static int afu_enable(struct cxl_afu *afu)
-{
- pr_devel("AFU enable request\n");
-
- return afu_control(afu, CXL_AFU_Cntl_An_E, 0,
- CXL_AFU_Cntl_An_ES_Enabled,
- CXL_AFU_Cntl_An_ES_MASK, true);
-}
-
-int cxl_afu_disable(struct cxl_afu *afu)
-{
- pr_devel("AFU disable request\n");
-
- return afu_control(afu, 0, CXL_AFU_Cntl_An_E,
- CXL_AFU_Cntl_An_ES_Disabled,
- CXL_AFU_Cntl_An_ES_MASK, false);
-}
-
-/* This will disable as well as reset */
-static int native_afu_reset(struct cxl_afu *afu)
-{
- int rc;
- u64 serr;
-
- pr_devel("AFU reset request\n");
-
- rc = afu_control(afu, CXL_AFU_Cntl_An_RA, 0,
- CXL_AFU_Cntl_An_RS_Complete | CXL_AFU_Cntl_An_ES_Disabled,
- CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK,
- false);
-
- /*
- * Re-enable any masked interrupts when the AFU is not
- * activated to avoid side effects after attaching a process
- * in dedicated mode.
- */
- if (afu->current_mode == 0) {
- serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
- serr &= ~CXL_PSL_SERR_An_IRQ_MASKS;
- cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
- }
-
- return rc;
-}
-
-static int native_afu_check_and_enable(struct cxl_afu *afu)
-{
- if (!cxl_ops->link_ok(afu->adapter, afu)) {
- WARN(1, "Refusing to enable afu while link down!\n");
- return -EIO;
- }
- if (afu->enabled)
- return 0;
- return afu_enable(afu);
-}
-
-int cxl_psl_purge(struct cxl_afu *afu)
-{
- u64 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
- u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
- u64 dsisr, dar;
- u64 start, end;
- u64 trans_fault = 0x0ULL;
- unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
- int rc = 0;
-
- trace_cxl_psl_ctrl(afu, CXL_PSL_SCNTL_An_Pc);
-
- pr_devel("PSL purge request\n");
-
- if (cxl_is_power8())
- trans_fault = CXL_PSL_DSISR_TRANS;
- if (cxl_is_power9())
- trans_fault = CXL_PSL9_DSISR_An_TF;
-
- if (!cxl_ops->link_ok(afu->adapter, afu)) {
- dev_warn(&afu->dev, "PSL Purge called with link down, ignoring\n");
- rc = -EIO;
- goto out;
- }
-
- if ((AFU_Cntl & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
- WARN(1, "psl_purge request while AFU not disabled!\n");
- cxl_afu_disable(afu);
- }
-
- cxl_p1n_write(afu, CXL_PSL_SCNTL_An,
- PSL_CNTL | CXL_PSL_SCNTL_An_Pc);
- start = local_clock();
- PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
- while ((PSL_CNTL & CXL_PSL_SCNTL_An_Ps_MASK)
- == CXL_PSL_SCNTL_An_Ps_Pending) {
- if (time_after_eq(jiffies, timeout)) {
- dev_warn(&afu->dev, "WARNING: PSL Purge timed out!\n");
- rc = -EBUSY;
- goto out;
- }
- if (!cxl_ops->link_ok(afu->adapter, afu)) {
- rc = -EIO;
- goto out;
- }
-
- dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
- pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%016llx PSL_DSISR: 0x%016llx\n",
- PSL_CNTL, dsisr);
-
- if (dsisr & trans_fault) {
- dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
- dev_notice(&afu->dev, "PSL purge terminating pending translation, DSISR: 0x%016llx, DAR: 0x%016llx\n",
- dsisr, dar);
- cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
- } else if (dsisr) {
- dev_notice(&afu->dev, "PSL purge acknowledging pending non-translation fault, DSISR: 0x%016llx\n",
- dsisr);
- cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
- } else {
- cpu_relax();
- }
- PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
- }
- end = local_clock();
- pr_devel("PSL purged in %lld ns\n", end - start);
-
- cxl_p1n_write(afu, CXL_PSL_SCNTL_An,
- PSL_CNTL & ~CXL_PSL_SCNTL_An_Pc);
-out:
- trace_cxl_psl_ctrl_done(afu, CXL_PSL_SCNTL_An_Pc, rc);
- return rc;
-}
-
-static int spa_max_procs(int spa_size)
-{
- /*
- * From the CAIA:
- * end_of_SPA_area = SPA_Base + ((n+4) * 128) + (( ((n*8) + 127) >> 7) * 128) + 255
- * Most of that junk is really just an overly-complicated way of saying
- * the last 256 bytes are __aligned(128), so it's really:
- * end_of_SPA_area = end_of_PSL_queue_area + __aligned(128) 255
- * and
- * end_of_PSL_queue_area = SPA_Base + ((n+4) * 128) + (n*8) - 1
- * so
- * sizeof(SPA) = ((n+4) * 128) + (n*8) + __aligned(128) 256
- * Ignore the alignment (which is safe in this case as long as we are
- * careful with our rounding) and solve for n:
- */
- return ((spa_size / 8) - 96) / 17;
-}
-
-static int cxl_alloc_spa(struct cxl_afu *afu, int mode)
-{
- unsigned spa_size;
-
- /* Work out how many pages to allocate */
- afu->native->spa_order = -1;
- do {
- afu->native->spa_order++;
- spa_size = (1 << afu->native->spa_order) * PAGE_SIZE;
-
- if (spa_size > 0x100000) {
- dev_warn(&afu->dev, "num_of_processes too large for the SPA, limiting to %i (0x%x)\n",
- afu->native->spa_max_procs, afu->native->spa_size);
- if (mode != CXL_MODE_DEDICATED)
- afu->num_procs = afu->native->spa_max_procs;
- break;
- }
-
- afu->native->spa_size = spa_size;
- afu->native->spa_max_procs = spa_max_procs(afu->native->spa_size);
- } while (afu->native->spa_max_procs < afu->num_procs);
-
- if (!(afu->native->spa = (struct cxl_process_element *)
- __get_free_pages(GFP_KERNEL | __GFP_ZERO, afu->native->spa_order))) {
- pr_err("cxl_alloc_spa: Unable to allocate scheduled process area\n");
- return -ENOMEM;
- }
- pr_devel("spa pages: %i afu->spa_max_procs: %i afu->num_procs: %i\n",
- 1<<afu->native->spa_order, afu->native->spa_max_procs, afu->num_procs);
-
- return 0;
-}
-
-static void attach_spa(struct cxl_afu *afu)
-{
- u64 spap;
-
- afu->native->sw_command_status = (__be64 *)((char *)afu->native->spa +
- ((afu->native->spa_max_procs + 3) * 128));
-
- spap = virt_to_phys(afu->native->spa) & CXL_PSL_SPAP_Addr;
- spap |= ((afu->native->spa_size >> (12 - CXL_PSL_SPAP_Size_Shift)) - 1) & CXL_PSL_SPAP_Size;
- spap |= CXL_PSL_SPAP_V;
- pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n",
- afu->native->spa, afu->native->spa_max_procs,
- afu->native->sw_command_status, spap);
- cxl_p1n_write(afu, CXL_PSL_SPAP_An, spap);
-}
-
-void cxl_release_spa(struct cxl_afu *afu)
-{
- if (afu->native->spa) {
- free_pages((unsigned long) afu->native->spa,
- afu->native->spa_order);
- afu->native->spa = NULL;
- }
-}
-
-/*
- * Invalidation of all ERAT entries is no longer required by CAIA2. Use
- * only for debug.
- */
-int cxl_invalidate_all_psl9(struct cxl *adapter)
-{
- unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
- u64 ierat;
-
- pr_devel("CXL adapter - invalidation of all ERAT entries\n");
-
- /* Invalidates all ERAT entries for Radix or HPT */
- ierat = CXL_XSL9_IERAT_IALL;
- if (radix_enabled())
- ierat |= CXL_XSL9_IERAT_INVR;
- cxl_p1_write(adapter, CXL_XSL9_IERAT, ierat);
-
- while (cxl_p1_read(adapter, CXL_XSL9_IERAT) & CXL_XSL9_IERAT_IINPROG) {
- if (time_after_eq(jiffies, timeout)) {
- dev_warn(&adapter->dev,
- "WARNING: CXL adapter invalidation of all ERAT entries timed out!\n");
- return -EBUSY;
- }
- if (!cxl_ops->link_ok(adapter, NULL))
- return -EIO;
- cpu_relax();
- }
- return 0;
-}
-
-int cxl_invalidate_all_psl8(struct cxl *adapter)
-{
- unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
-
- pr_devel("CXL adapter wide TLBIA & SLBIA\n");
-
- cxl_p1_write(adapter, CXL_PSL_AFUSEL, CXL_PSL_AFUSEL_A);
-
- cxl_p1_write(adapter, CXL_PSL_TLBIA, CXL_TLB_SLB_IQ_ALL);
- while (cxl_p1_read(adapter, CXL_PSL_TLBIA) & CXL_TLB_SLB_P) {
- if (time_after_eq(jiffies, timeout)) {
- dev_warn(&adapter->dev, "WARNING: CXL adapter wide TLBIA timed out!\n");
- return -EBUSY;
- }
- if (!cxl_ops->link_ok(adapter, NULL))
- return -EIO;
- cpu_relax();
- }
-
- cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_ALL);
- while (cxl_p1_read(adapter, CXL_PSL_SLBIA) & CXL_TLB_SLB_P) {
- if (time_after_eq(jiffies, timeout)) {
- dev_warn(&adapter->dev, "WARNING: CXL adapter wide SLBIA timed out!\n");
- return -EBUSY;
- }
- if (!cxl_ops->link_ok(adapter, NULL))
- return -EIO;
- cpu_relax();
- }
- return 0;
-}
-
-int cxl_data_cache_flush(struct cxl *adapter)
-{
- u64 reg;
- unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
-
- /*
- * Do a datacache flush only if datacache is available.
- * In case of PSL9D datacache absent hence flush operation.
- * would timeout.
- */
- if (adapter->native->no_data_cache) {
- pr_devel("No PSL data cache. Ignoring cache flush req.\n");
- return 0;
- }
-
- pr_devel("Flushing data cache\n");
- reg = cxl_p1_read(adapter, CXL_PSL_Control);
- reg |= CXL_PSL_Control_Fr;
- cxl_p1_write(adapter, CXL_PSL_Control, reg);
-
- reg = cxl_p1_read(adapter, CXL_PSL_Control);
- while ((reg & CXL_PSL_Control_Fs_MASK) != CXL_PSL_Control_Fs_Complete) {
- if (time_after_eq(jiffies, timeout)) {
- dev_warn(&adapter->dev, "WARNING: cache flush timed out!\n");
- return -EBUSY;
- }
-
- if (!cxl_ops->link_ok(adapter, NULL)) {
- dev_warn(&adapter->dev, "WARNING: link down when flushing cache\n");
- return -EIO;
- }
- cpu_relax();
- reg = cxl_p1_read(adapter, CXL_PSL_Control);
- }
-
- reg &= ~CXL_PSL_Control_Fr;
- cxl_p1_write(adapter, CXL_PSL_Control, reg);
- return 0;
-}
-
-static int cxl_write_sstp(struct cxl_afu *afu, u64 sstp0, u64 sstp1)
-{
- int rc;
-
- /* 1. Disable SSTP by writing 0 to SSTP1[V] */
- cxl_p2n_write(afu, CXL_SSTP1_An, 0);
-
- /* 2. Invalidate all SLB entries */
- if ((rc = cxl_afu_slbia(afu)))
- return rc;
-
- /* 3. Set SSTP0_An */
- cxl_p2n_write(afu, CXL_SSTP0_An, sstp0);
-
- /* 4. Set SSTP1_An */
- cxl_p2n_write(afu, CXL_SSTP1_An, sstp1);
-
- return 0;
-}
-
-/* Using per slice version may improve performance here. (ie. SLBIA_An) */
-static void slb_invalid(struct cxl_context *ctx)
-{
- struct cxl *adapter = ctx->afu->adapter;
- u64 slbia;
-
- WARN_ON(!mutex_is_locked(&ctx->afu->native->spa_mutex));
-
- cxl_p1_write(adapter, CXL_PSL_LBISEL,
- ((u64)be32_to_cpu(ctx->elem->common.pid) << 32) |
- be32_to_cpu(ctx->elem->lpid));
- cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_LPIDPID);
-
- while (1) {
- if (!cxl_ops->link_ok(adapter, NULL))
- break;
- slbia = cxl_p1_read(adapter, CXL_PSL_SLBIA);
- if (!(slbia & CXL_TLB_SLB_P))
- break;
- cpu_relax();
- }
-}
-
-static int do_process_element_cmd(struct cxl_context *ctx,
- u64 cmd, u64 pe_state)
-{
- u64 state;
- unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
- int rc = 0;
-
- trace_cxl_llcmd(ctx, cmd);
-
- WARN_ON(!ctx->afu->enabled);
-
- ctx->elem->software_state = cpu_to_be32(pe_state);
- smp_wmb();
- *(ctx->afu->native->sw_command_status) = cpu_to_be64(cmd | 0 | ctx->pe);
- smp_mb();
- cxl_p1n_write(ctx->afu, CXL_PSL_LLCMD_An, cmd | ctx->pe);
- while (1) {
- if (time_after_eq(jiffies, timeout)) {
- dev_warn(&ctx->afu->dev, "WARNING: Process Element Command timed out!\n");
- rc = -EBUSY;
- goto out;
- }
- if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
- dev_warn(&ctx->afu->dev, "WARNING: Device link down, aborting Process Element Command!\n");
- rc = -EIO;
- goto out;
- }
- state = be64_to_cpup(ctx->afu->native->sw_command_status);
- if (state == ~0ULL) {
- pr_err("cxl: Error adding process element to AFU\n");
- rc = -1;
- goto out;
- }
- if ((state & (CXL_SPA_SW_CMD_MASK | CXL_SPA_SW_STATE_MASK | CXL_SPA_SW_LINK_MASK)) ==
- (cmd | (cmd >> 16) | ctx->pe))
- break;
- /*
- * The command won't finish in the PSL if there are
- * outstanding DSIs. Hence we need to yield here in
- * case there are outstanding DSIs that we need to
- * service. Tuning possiblity: we could wait for a
- * while before sched
- */
- schedule();
-
- }
-out:
- trace_cxl_llcmd_done(ctx, cmd, rc);
- return rc;
-}
-
-static int add_process_element(struct cxl_context *ctx)
-{
- int rc = 0;
-
- mutex_lock(&ctx->afu->native->spa_mutex);
- pr_devel("%s Adding pe: %i started\n", __func__, ctx->pe);
- if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_ADD, CXL_PE_SOFTWARE_STATE_V)))
- ctx->pe_inserted = true;
- pr_devel("%s Adding pe: %i finished\n", __func__, ctx->pe);
- mutex_unlock(&ctx->afu->native->spa_mutex);
- return rc;
-}
-
-static int terminate_process_element(struct cxl_context *ctx)
-{
- int rc = 0;
-
- /* fast path terminate if it's already invalid */
- if (!(ctx->elem->software_state & cpu_to_be32(CXL_PE_SOFTWARE_STATE_V)))
- return rc;
-
- mutex_lock(&ctx->afu->native->spa_mutex);
- pr_devel("%s Terminate pe: %i started\n", __func__, ctx->pe);
- /* We could be asked to terminate when the hw is down. That
- * should always succeed: it's not running if the hw has gone
- * away and is being reset.
- */
- if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
- rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_TERMINATE,
- CXL_PE_SOFTWARE_STATE_V | CXL_PE_SOFTWARE_STATE_T);
- ctx->elem->software_state = 0; /* Remove Valid bit */
- pr_devel("%s Terminate pe: %i finished\n", __func__, ctx->pe);
- mutex_unlock(&ctx->afu->native->spa_mutex);
- return rc;
-}
-
-static int remove_process_element(struct cxl_context *ctx)
-{
- int rc = 0;
-
- mutex_lock(&ctx->afu->native->spa_mutex);
- pr_devel("%s Remove pe: %i started\n", __func__, ctx->pe);
-
- /* We could be asked to remove when the hw is down. Again, if
- * the hw is down, the PE is gone, so we succeed.
- */
- if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
- rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_REMOVE, 0);
-
- if (!rc)
- ctx->pe_inserted = false;
- if (cxl_is_power8())
- slb_invalid(ctx);
- pr_devel("%s Remove pe: %i finished\n", __func__, ctx->pe);
- mutex_unlock(&ctx->afu->native->spa_mutex);
-
- return rc;
-}
-
-void cxl_assign_psn_space(struct cxl_context *ctx)
-{
- if (!ctx->afu->pp_size || ctx->master) {
- ctx->psn_phys = ctx->afu->psn_phys;
- ctx->psn_size = ctx->afu->adapter->ps_size;
- } else {
- ctx->psn_phys = ctx->afu->psn_phys +
- (ctx->afu->native->pp_offset + ctx->afu->pp_size * ctx->pe);
- ctx->psn_size = ctx->afu->pp_size;
- }
-}
-
-static int activate_afu_directed(struct cxl_afu *afu)
-{
- int rc;
-
- dev_info(&afu->dev, "Activating AFU directed mode\n");
-
- afu->num_procs = afu->max_procs_virtualised;
- if (afu->native->spa == NULL) {
- if (cxl_alloc_spa(afu, CXL_MODE_DIRECTED))
- return -ENOMEM;
- }
- attach_spa(afu);
-
- cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_AFU);
- if (cxl_is_power8())
- cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL);
- cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L);
-
- afu->current_mode = CXL_MODE_DIRECTED;
-
- if ((rc = cxl_chardev_m_afu_add(afu)))
- return rc;
-
- if ((rc = cxl_sysfs_afu_m_add(afu)))
- goto err;
-
- if ((rc = cxl_chardev_s_afu_add(afu)))
- goto err1;
-
- return 0;
-err1:
- cxl_sysfs_afu_m_remove(afu);
-err:
- cxl_chardev_afu_remove(afu);
- return rc;
-}
-
-#ifdef CONFIG_CPU_LITTLE_ENDIAN
-#define set_endian(sr) ((sr) |= CXL_PSL_SR_An_LE)
-#else
-#define set_endian(sr) ((sr) &= ~(CXL_PSL_SR_An_LE))
-#endif
-
-u64 cxl_calculate_sr(bool master, bool kernel, bool real_mode, bool p9)
-{
- u64 sr = 0;
-
- set_endian(sr);
- if (master)
- sr |= CXL_PSL_SR_An_MP;
- if (mfspr(SPRN_LPCR) & LPCR_TC)
- sr |= CXL_PSL_SR_An_TC;
-
- if (kernel) {
- if (!real_mode)
- sr |= CXL_PSL_SR_An_R;
- sr |= (mfmsr() & MSR_SF) | CXL_PSL_SR_An_HV;
- } else {
- sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R;
- if (radix_enabled())
- sr |= CXL_PSL_SR_An_HV;
- else
- sr &= ~(CXL_PSL_SR_An_HV);
- if (!test_tsk_thread_flag(current, TIF_32BIT))
- sr |= CXL_PSL_SR_An_SF;
- }
- if (p9) {
- if (radix_enabled())
- sr |= CXL_PSL_SR_An_XLAT_ror;
- else
- sr |= CXL_PSL_SR_An_XLAT_hpt;
- }
- return sr;
-}
-
-static u64 calculate_sr(struct cxl_context *ctx)
-{
- return cxl_calculate_sr(ctx->master, ctx->kernel, false,
- cxl_is_power9());
-}
-
-static void update_ivtes_directed(struct cxl_context *ctx)
-{
- bool need_update = (ctx->status == STARTED);
- int r;
-
- if (need_update) {
- WARN_ON(terminate_process_element(ctx));
- WARN_ON(remove_process_element(ctx));
- }
-
- for (r = 0; r < CXL_IRQ_RANGES; r++) {
- ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]);
- ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]);
- }
-
- /*
- * Theoretically we could use the update llcmd, instead of a
- * terminate/remove/add (or if an atomic update was required we could
- * do a suspend/update/resume), however it seems there might be issues
- * with the update llcmd on some cards (including those using an XSL on
- * an ASIC) so for now it's safest to go with the commands that are
- * known to work. In the future if we come across a situation where the
- * card may be performing transactions using the same PE while we are
- * doing this update we might need to revisit this.
- */
- if (need_update)
- WARN_ON(add_process_element(ctx));
-}
-
-static int process_element_entry_psl9(struct cxl_context *ctx, u64 wed, u64 amr)
-{
- u32 pid;
- int rc;
-
- cxl_assign_psn_space(ctx);
-
- ctx->elem->ctxtime = 0; /* disable */
- ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID));
- ctx->elem->haurp = 0; /* disable */
-
- if (ctx->kernel)
- pid = 0;
- else {
- if (ctx->mm == NULL) {
- pr_devel("%s: unable to get mm for pe=%d pid=%i\n",
- __func__, ctx->pe, pid_nr(ctx->pid));
- return -EINVAL;
- }
- pid = ctx->mm->context.id;
- }
-
- /* Assign a unique TIDR (thread id) for the current thread */
- if (!(ctx->tidr) && (ctx->assign_tidr)) {
- rc = set_thread_tidr(current);
- if (rc)
- return -ENODEV;
- ctx->tidr = current->thread.tidr;
- pr_devel("%s: current tidr: %d\n", __func__, ctx->tidr);
- }
-
- ctx->elem->common.tid = cpu_to_be32(ctx->tidr);
- ctx->elem->common.pid = cpu_to_be32(pid);
-
- ctx->elem->sr = cpu_to_be64(calculate_sr(ctx));
-
- ctx->elem->common.csrp = 0; /* disable */
-
- cxl_prefault(ctx, wed);
-
- /*
- * Ensure we have the multiplexed PSL interrupt set up to take faults
- * for kernel contexts that may not have allocated any AFU IRQs at all:
- */
- if (ctx->irqs.range[0] == 0) {
- ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq;
- ctx->irqs.range[0] = 1;
- }
-
- ctx->elem->common.amr = cpu_to_be64(amr);
- ctx->elem->common.wed = cpu_to_be64(wed);
-
- return 0;
-}
-
-int cxl_attach_afu_directed_psl9(struct cxl_context *ctx, u64 wed, u64 amr)
-{
- int result;
-
- /* fill the process element entry */
- result = process_element_entry_psl9(ctx, wed, amr);
- if (result)
- return result;
-
- update_ivtes_directed(ctx);
-
- /* first guy needs to enable */
- result = cxl_ops->afu_check_and_enable(ctx->afu);
- if (result)
- return result;
-
- return add_process_element(ctx);
-}
-
-int cxl_attach_afu_directed_psl8(struct cxl_context *ctx, u64 wed, u64 amr)
-{
- u32 pid;
- int result;
-
- cxl_assign_psn_space(ctx);
-
- ctx->elem->ctxtime = 0; /* disable */
- ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID));
- ctx->elem->haurp = 0; /* disable */
- ctx->elem->u.sdr = cpu_to_be64(mfspr(SPRN_SDR1));
-
- pid = current->pid;
- if (ctx->kernel)
- pid = 0;
- ctx->elem->common.tid = 0;
- ctx->elem->common.pid = cpu_to_be32(pid);
-
- ctx->elem->sr = cpu_to_be64(calculate_sr(ctx));
-
- ctx->elem->common.csrp = 0; /* disable */
- ctx->elem->common.u.psl8.aurp0 = 0; /* disable */
- ctx->elem->common.u.psl8.aurp1 = 0; /* disable */
-
- cxl_prefault(ctx, wed);
-
- ctx->elem->common.u.psl8.sstp0 = cpu_to_be64(ctx->sstp0);
- ctx->elem->common.u.psl8.sstp1 = cpu_to_be64(ctx->sstp1);
-
- /*
- * Ensure we have the multiplexed PSL interrupt set up to take faults
- * for kernel contexts that may not have allocated any AFU IRQs at all:
- */
- if (ctx->irqs.range[0] == 0) {
- ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq;
- ctx->irqs.range[0] = 1;
- }
-
- update_ivtes_directed(ctx);
-
- ctx->elem->common.amr = cpu_to_be64(amr);
- ctx->elem->common.wed = cpu_to_be64(wed);
-
- /* first guy needs to enable */
- if ((result = cxl_ops->afu_check_and_enable(ctx->afu)))
- return result;
-
- return add_process_element(ctx);
-}
-
-static int deactivate_afu_directed(struct cxl_afu *afu)
-{
- dev_info(&afu->dev, "Deactivating AFU directed mode\n");
-
- afu->current_mode = 0;
- afu->num_procs = 0;
-
- cxl_sysfs_afu_m_remove(afu);
- cxl_chardev_afu_remove(afu);
-
- /*
- * The CAIA section 2.2.1 indicates that the procedure for starting and
- * stopping an AFU in AFU directed mode is AFU specific, which is not
- * ideal since this code is generic and with one exception has no
- * knowledge of the AFU. This is in contrast to the procedure for
- * disabling a dedicated process AFU, which is documented to just
- * require a reset. The architecture does indicate that both an AFU
- * reset and an AFU disable should result in the AFU being disabled and
- * we do both followed by a PSL purge for safety.
- *
- * Notably we used to have some issues with the disable sequence on PSL
- * cards, which is why we ended up using this heavy weight procedure in
- * the first place, however a bug was discovered that had rendered the
- * disable operation ineffective, so it is conceivable that was the
- * sole explanation for those difficulties. Careful regression testing
- * is recommended if anyone attempts to remove or reorder these
- * operations.
- *
- * The XSL on the Mellanox CX4 behaves a little differently from the
- * PSL based cards and will time out an AFU reset if the AFU is still
- * enabled. That card is special in that we do have a means to identify
- * it from this code, so in that case we skip the reset and just use a
- * disable/purge to avoid the timeout and corresponding noise in the
- * kernel log.
- */
- if (afu->adapter->native->sl_ops->needs_reset_before_disable)
- cxl_ops->afu_reset(afu);
- cxl_afu_disable(afu);
- cxl_psl_purge(afu);
-
- return 0;
-}
-
-int cxl_activate_dedicated_process_psl9(struct cxl_afu *afu)
-{
- dev_info(&afu->dev, "Activating dedicated process mode\n");
-
- /*
- * If XSL is set to dedicated mode (Set in PSL_SCNTL reg), the
- * XSL and AFU are programmed to work with a single context.
- * The context information should be configured in the SPA area
- * index 0 (so PSL_SPAP must be configured before enabling the
- * AFU).
- */
- afu->num_procs = 1;
- if (afu->native->spa == NULL) {
- if (cxl_alloc_spa(afu, CXL_MODE_DEDICATED))
- return -ENOMEM;
- }
- attach_spa(afu);
-
- cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_Process);
- cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L);
-
- afu->current_mode = CXL_MODE_DEDICATED;
-
- return cxl_chardev_d_afu_add(afu);
-}
-
-int cxl_activate_dedicated_process_psl8(struct cxl_afu *afu)
-{
- dev_info(&afu->dev, "Activating dedicated process mode\n");
-
- cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_Process);
-
- cxl_p1n_write(afu, CXL_PSL_CtxTime_An, 0); /* disable */
- cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0); /* disable */
- cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL);
- cxl_p1n_write(afu, CXL_PSL_LPID_An, mfspr(SPRN_LPID));
- cxl_p1n_write(afu, CXL_HAURP_An, 0); /* disable */
- cxl_p1n_write(afu, CXL_PSL_SDR_An, mfspr(SPRN_SDR1));
-
- cxl_p2n_write(afu, CXL_CSRP_An, 0); /* disable */
- cxl_p2n_write(afu, CXL_AURP0_An, 0); /* disable */
- cxl_p2n_write(afu, CXL_AURP1_An, 0); /* disable */
-
- afu->current_mode = CXL_MODE_DEDICATED;
- afu->num_procs = 1;
-
- return cxl_chardev_d_afu_add(afu);
-}
-
-void cxl_update_dedicated_ivtes_psl9(struct cxl_context *ctx)
-{
- int r;
-
- for (r = 0; r < CXL_IRQ_RANGES; r++) {
- ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]);
- ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]);
- }
-}
-
-void cxl_update_dedicated_ivtes_psl8(struct cxl_context *ctx)
-{
- struct cxl_afu *afu = ctx->afu;
-
- cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An,
- (((u64)ctx->irqs.offset[0] & 0xffff) << 48) |
- (((u64)ctx->irqs.offset[1] & 0xffff) << 32) |
- (((u64)ctx->irqs.offset[2] & 0xffff) << 16) |
- ((u64)ctx->irqs.offset[3] & 0xffff));
- cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, (u64)
- (((u64)ctx->irqs.range[0] & 0xffff) << 48) |
- (((u64)ctx->irqs.range[1] & 0xffff) << 32) |
- (((u64)ctx->irqs.range[2] & 0xffff) << 16) |
- ((u64)ctx->irqs.range[3] & 0xffff));
-}
-
-int cxl_attach_dedicated_process_psl9(struct cxl_context *ctx, u64 wed, u64 amr)
-{
- struct cxl_afu *afu = ctx->afu;
- int result;
-
- /* fill the process element entry */
- result = process_element_entry_psl9(ctx, wed, amr);
- if (result)
- return result;
-
- if (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes)
- afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx);
-
- ctx->elem->software_state = cpu_to_be32(CXL_PE_SOFTWARE_STATE_V);
- /*
- * Ideally we should do a wmb() here to make sure the changes to the
- * PE are visible to the card before we call afu_enable.
- * On ppc64 though all mmios are preceded by a 'sync' instruction hence
- * we dont dont need one here.
- */
-
- result = cxl_ops->afu_reset(afu);
- if (result)
- return result;
-
- return afu_enable(afu);
-}
-
-int cxl_attach_dedicated_process_psl8(struct cxl_context *ctx, u64 wed, u64 amr)
-{
- struct cxl_afu *afu = ctx->afu;
- u64 pid;
- int rc;
-
- pid = (u64)current->pid << 32;
- if (ctx->kernel)
- pid = 0;
- cxl_p2n_write(afu, CXL_PSL_PID_TID_An, pid);
-
- cxl_p1n_write(afu, CXL_PSL_SR_An, calculate_sr(ctx));
-
- if ((rc = cxl_write_sstp(afu, ctx->sstp0, ctx->sstp1)))
- return rc;
-
- cxl_prefault(ctx, wed);
-
- if (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes)
- afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx);
-
- cxl_p2n_write(afu, CXL_PSL_AMR_An, amr);
-
- /* master only context for dedicated */
- cxl_assign_psn_space(ctx);
-
- if ((rc = cxl_ops->afu_reset(afu)))
- return rc;
-
- cxl_p2n_write(afu, CXL_PSL_WED_An, wed);
-
- return afu_enable(afu);
-}
-
-static int deactivate_dedicated_process(struct cxl_afu *afu)
-{
- dev_info(&afu->dev, "Deactivating dedicated process mode\n");
-
- afu->current_mode = 0;
- afu->num_procs = 0;
-
- cxl_chardev_afu_remove(afu);
-
- return 0;
-}
-
-static int native_afu_deactivate_mode(struct cxl_afu *afu, int mode)
-{
- if (mode == CXL_MODE_DIRECTED)
- return deactivate_afu_directed(afu);
- if (mode == CXL_MODE_DEDICATED)
- return deactivate_dedicated_process(afu);
- return 0;
-}
-
-static int native_afu_activate_mode(struct cxl_afu *afu, int mode)
-{
- if (!mode)
- return 0;
- if (!(mode & afu->modes_supported))
- return -EINVAL;
-
- if (!cxl_ops->link_ok(afu->adapter, afu)) {
- WARN(1, "Device link is down, refusing to activate!\n");
- return -EIO;
- }
-
- if (mode == CXL_MODE_DIRECTED)
- return activate_afu_directed(afu);
- if ((mode == CXL_MODE_DEDICATED) &&
- (afu->adapter->native->sl_ops->activate_dedicated_process))
- return afu->adapter->native->sl_ops->activate_dedicated_process(afu);
-
- return -EINVAL;
-}
-
-static int native_attach_process(struct cxl_context *ctx, bool kernel,
- u64 wed, u64 amr)
-{
- if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
- WARN(1, "Device link is down, refusing to attach process!\n");
- return -EIO;
- }
-
- ctx->kernel = kernel;
- if ((ctx->afu->current_mode == CXL_MODE_DIRECTED) &&
- (ctx->afu->adapter->native->sl_ops->attach_afu_directed))
- return ctx->afu->adapter->native->sl_ops->attach_afu_directed(ctx, wed, amr);
-
- if ((ctx->afu->current_mode == CXL_MODE_DEDICATED) &&
- (ctx->afu->adapter->native->sl_ops->attach_dedicated_process))
- return ctx->afu->adapter->native->sl_ops->attach_dedicated_process(ctx, wed, amr);
-
- return -EINVAL;
-}
-
-static inline int detach_process_native_dedicated(struct cxl_context *ctx)
-{
- /*
- * The CAIA section 2.1.1 indicates that we need to do an AFU reset to
- * stop the AFU in dedicated mode (we therefore do not make that
- * optional like we do in the afu directed path). It does not indicate
- * that we need to do an explicit disable (which should occur
- * implicitly as part of the reset) or purge, but we do these as well
- * to be on the safe side.
- *
- * Notably we used to have some issues with the disable sequence
- * (before the sequence was spelled out in the architecture) which is
- * why we were so heavy weight in the first place, however a bug was
- * discovered that had rendered the disable operation ineffective, so
- * it is conceivable that was the sole explanation for those
- * difficulties. Point is, we should be careful and do some regression
- * testing if we ever attempt to remove any part of this procedure.
- */
- cxl_ops->afu_reset(ctx->afu);
- cxl_afu_disable(ctx->afu);
- cxl_psl_purge(ctx->afu);
- return 0;
-}
-
-static void native_update_ivtes(struct cxl_context *ctx)
-{
- if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
- return update_ivtes_directed(ctx);
- if ((ctx->afu->current_mode == CXL_MODE_DEDICATED) &&
- (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes))
- return ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx);
- WARN(1, "native_update_ivtes: Bad mode\n");
-}
-
-static inline int detach_process_native_afu_directed(struct cxl_context *ctx)
-{
- if (!ctx->pe_inserted)
- return 0;
- if (terminate_process_element(ctx))
- return -1;
- if (remove_process_element(ctx))
- return -1;
-
- return 0;
-}
-
-static int native_detach_process(struct cxl_context *ctx)
-{
- trace_cxl_detach(ctx);
-
- if (ctx->afu->current_mode == CXL_MODE_DEDICATED)
- return detach_process_native_dedicated(ctx);
-
- return detach_process_native_afu_directed(ctx);
-}
-
-static int native_get_irq_info(struct cxl_afu *afu, struct cxl_irq_info *info)
-{
- /* If the adapter has gone away, we can't get any meaningful
- * information.
- */
- if (!cxl_ops->link_ok(afu->adapter, afu))
- return -EIO;
-
- info->dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
- info->dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
- if (cxl_is_power8())
- info->dsr = cxl_p2n_read(afu, CXL_PSL_DSR_An);
- info->afu_err = cxl_p2n_read(afu, CXL_AFU_ERR_An);
- info->errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
- info->proc_handle = 0;
-
- return 0;
-}
-
-void cxl_native_irq_dump_regs_psl9(struct cxl_context *ctx)
-{
- u64 fir1, serr;
-
- fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL9_FIR1);
-
- dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1);
- if (ctx->afu->adapter->native->sl_ops->register_serr_irq) {
- serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
- cxl_afu_decode_psl_serr(ctx->afu, serr);
- }
-}
-
-void cxl_native_irq_dump_regs_psl8(struct cxl_context *ctx)
-{
- u64 fir1, fir2, fir_slice, serr, afu_debug;
-
- fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1);
- fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2);
- fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An);
- afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An);
-
- dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1);
- dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2);
- if (ctx->afu->adapter->native->sl_ops->register_serr_irq) {
- serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
- cxl_afu_decode_psl_serr(ctx->afu, serr);
- }
- dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
- dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
-}
-
-static irqreturn_t native_handle_psl_slice_error(struct cxl_context *ctx,
- u64 dsisr, u64 errstat)
-{
-
- dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat);
-
- if (ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers)
- ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers(ctx);
-
- if (ctx->afu->adapter->native->sl_ops->debugfs_stop_trace) {
- dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n");
- ctx->afu->adapter->native->sl_ops->debugfs_stop_trace(ctx->afu->adapter);
- }
-
- return cxl_ops->ack_irq(ctx, 0, errstat);
-}
-
-static bool cxl_is_translation_fault(struct cxl_afu *afu, u64 dsisr)
-{
- if ((cxl_is_power8()) && (dsisr & CXL_PSL_DSISR_TRANS))
- return true;
-
- if ((cxl_is_power9()) && (dsisr & CXL_PSL9_DSISR_An_TF))
- return true;
-
- return false;
-}
-
-irqreturn_t cxl_fail_irq_psl(struct cxl_afu *afu, struct cxl_irq_info *irq_info)
-{
- if (cxl_is_translation_fault(afu, irq_info->dsisr))
- cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
- else
- cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t native_irq_multiplexed(int irq, void *data)
-{
- struct cxl_afu *afu = data;
- struct cxl_context *ctx;
- struct cxl_irq_info irq_info;
- u64 phreg = cxl_p2n_read(afu, CXL_PSL_PEHandle_An);
- int ph, ret = IRQ_HANDLED, res;
-
- /* check if eeh kicked in while the interrupt was in flight */
- if (unlikely(phreg == ~0ULL)) {
- dev_warn(&afu->dev,
- "Ignoring slice interrupt(%d) due to fenced card",
- irq);
- return IRQ_HANDLED;
- }
- /* Mask the pe-handle from register value */
- ph = phreg & 0xffff;
- if ((res = native_get_irq_info(afu, &irq_info))) {
- WARN(1, "Unable to get CXL IRQ Info: %i\n", res);
- if (afu->adapter->native->sl_ops->fail_irq)
- return afu->adapter->native->sl_ops->fail_irq(afu, &irq_info);
- return ret;
- }
-
- rcu_read_lock();
- ctx = idr_find(&afu->contexts_idr, ph);
- if (ctx) {
- if (afu->adapter->native->sl_ops->handle_interrupt)
- ret = afu->adapter->native->sl_ops->handle_interrupt(irq, ctx, &irq_info);
- rcu_read_unlock();
- return ret;
- }
- rcu_read_unlock();
-
- WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %016llx DAR"
- " %016llx\n(Possible AFU HW issue - was a term/remove acked"
- " with outstanding transactions?)\n", ph, irq_info.dsisr,
- irq_info.dar);
- if (afu->adapter->native->sl_ops->fail_irq)
- ret = afu->adapter->native->sl_ops->fail_irq(afu, &irq_info);
- return ret;
-}
-
-static void native_irq_wait(struct cxl_context *ctx)
-{
- u64 dsisr;
- int timeout = 1000;
- int ph;
-
- /*
- * Wait until no further interrupts are presented by the PSL
- * for this context.
- */
- while (timeout--) {
- ph = cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) & 0xffff;
- if (ph != ctx->pe)
- return;
- dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An);
- if (cxl_is_power8() &&
- ((dsisr & CXL_PSL_DSISR_PENDING) == 0))
- return;
- if (cxl_is_power9() &&
- ((dsisr & CXL_PSL9_DSISR_PENDING) == 0))
- return;
- /*
- * We are waiting for the workqueue to process our
- * irq, so need to let that run here.
- */
- msleep(1);
- }
-
- dev_warn(&ctx->afu->dev, "WARNING: waiting on DSI for PE %i"
- " DSISR %016llx!\n", ph, dsisr);
- return;
-}
-
-static irqreturn_t native_slice_irq_err(int irq, void *data)
-{
- struct cxl_afu *afu = data;
- u64 errstat, serr, afu_error, dsisr;
- u64 fir_slice, afu_debug, irq_mask;
-
- /*
- * slice err interrupt is only used with full PSL (no XSL)
- */
- serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
- errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
- afu_error = cxl_p2n_read(afu, CXL_AFU_ERR_An);
- dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
- cxl_afu_decode_psl_serr(afu, serr);
-
- if (cxl_is_power8()) {
- fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An);
- afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An);
- dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
- dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
- }
- dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat);
- dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n", afu_error);
- dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n", dsisr);
-
- /* mask off the IRQ so it won't retrigger until the AFU is reset */
- irq_mask = (serr & CXL_PSL_SERR_An_IRQS) >> 32;
- serr |= irq_mask;
- cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
- dev_info(&afu->dev, "Further such interrupts will be masked until the AFU is reset\n");
-
- return IRQ_HANDLED;
-}
-
-void cxl_native_err_irq_dump_regs_psl9(struct cxl *adapter)
-{
- u64 fir1;
-
- fir1 = cxl_p1_read(adapter, CXL_PSL9_FIR1);
- dev_crit(&adapter->dev, "PSL_FIR: 0x%016llx\n", fir1);
-}
-
-void cxl_native_err_irq_dump_regs_psl8(struct cxl *adapter)
-{
- u64 fir1, fir2;
-
- fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1);
- fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2);
- dev_crit(&adapter->dev,
- "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n",
- fir1, fir2);
-}
-
-static irqreturn_t native_irq_err(int irq, void *data)
-{
- struct cxl *adapter = data;
- u64 err_ivte;
-
- WARN(1, "CXL ERROR interrupt %i\n", irq);
-
- err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE);
- dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%016llx\n", err_ivte);
-
- if (adapter->native->sl_ops->debugfs_stop_trace) {
- dev_crit(&adapter->dev, "STOPPING CXL TRACE\n");
- adapter->native->sl_ops->debugfs_stop_trace(adapter);
- }
-
- if (adapter->native->sl_ops->err_irq_dump_registers)
- adapter->native->sl_ops->err_irq_dump_registers(adapter);
-
- return IRQ_HANDLED;
-}
-
-int cxl_native_register_psl_err_irq(struct cxl *adapter)
-{
- int rc;
-
- adapter->irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
- dev_name(&adapter->dev));
- if (!adapter->irq_name)
- return -ENOMEM;
-
- if ((rc = cxl_register_one_irq(adapter, native_irq_err, adapter,
- &adapter->native->err_hwirq,
- &adapter->native->err_virq,
- adapter->irq_name))) {
- kfree(adapter->irq_name);
- adapter->irq_name = NULL;
- return rc;
- }
-
- cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->native->err_hwirq & 0xffff);
-
- return 0;
-}
-
-void cxl_native_release_psl_err_irq(struct cxl *adapter)
-{
- if (adapter->native->err_virq == 0 ||
- adapter->native->err_virq !=
- irq_find_mapping(NULL, adapter->native->err_hwirq))
- return;
-
- cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
- cxl_unmap_irq(adapter->native->err_virq, adapter);
- cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq);
- kfree(adapter->irq_name);
- adapter->native->err_virq = 0;
-}
-
-int cxl_native_register_serr_irq(struct cxl_afu *afu)
-{
- u64 serr;
- int rc;
-
- afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
- dev_name(&afu->dev));
- if (!afu->err_irq_name)
- return -ENOMEM;
-
- if ((rc = cxl_register_one_irq(afu->adapter, native_slice_irq_err, afu,
- &afu->serr_hwirq,
- &afu->serr_virq, afu->err_irq_name))) {
- kfree(afu->err_irq_name);
- afu->err_irq_name = NULL;
- return rc;
- }
-
- serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
- if (cxl_is_power8())
- serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff);
- if (cxl_is_power9()) {
- /*
- * By default, all errors are masked. So don't set all masks.
- * Slice errors will be transfered.
- */
- serr = (serr & ~0xff0000007fffffffULL) | (afu->serr_hwirq & 0xffff);
- }
- cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
-
- return 0;
-}
-
-void cxl_native_release_serr_irq(struct cxl_afu *afu)
-{
- if (afu->serr_virq == 0 ||
- afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
- return;
-
- cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
- cxl_unmap_irq(afu->serr_virq, afu);
- cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
- kfree(afu->err_irq_name);
- afu->serr_virq = 0;
-}
-
-int cxl_native_register_psl_irq(struct cxl_afu *afu)
-{
- int rc;
-
- afu->psl_irq_name = kasprintf(GFP_KERNEL, "cxl-%s",
- dev_name(&afu->dev));
- if (!afu->psl_irq_name)
- return -ENOMEM;
-
- if ((rc = cxl_register_one_irq(afu->adapter, native_irq_multiplexed,
- afu, &afu->native->psl_hwirq, &afu->native->psl_virq,
- afu->psl_irq_name))) {
- kfree(afu->psl_irq_name);
- afu->psl_irq_name = NULL;
- }
- return rc;
-}
-
-void cxl_native_release_psl_irq(struct cxl_afu *afu)
-{
- if (afu->native->psl_virq == 0 ||
- afu->native->psl_virq !=
- irq_find_mapping(NULL, afu->native->psl_hwirq))
- return;
-
- cxl_unmap_irq(afu->native->psl_virq, afu);
- cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq);
- kfree(afu->psl_irq_name);
- afu->native->psl_virq = 0;
-}
-
-static void recover_psl_err(struct cxl_afu *afu, u64 errstat)
-{
- u64 dsisr;
-
- pr_devel("RECOVERING FROM PSL ERROR... (0x%016llx)\n", errstat);
-
- /* Clear PSL_DSISR[PE] */
- dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
- cxl_p2n_write(afu, CXL_PSL_DSISR_An, dsisr & ~CXL_PSL_DSISR_An_PE);
-
- /* Write 1s to clear error status bits */
- cxl_p2n_write(afu, CXL_PSL_ErrStat_An, errstat);
-}
-
-static int native_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask)
-{
- trace_cxl_psl_irq_ack(ctx, tfc);
- if (tfc)
- cxl_p2n_write(ctx->afu, CXL_PSL_TFC_An, tfc);
- if (psl_reset_mask)
- recover_psl_err(ctx->afu, psl_reset_mask);
-
- return 0;
-}
-
-int cxl_check_error(struct cxl_afu *afu)
-{
- return (cxl_p1n_read(afu, CXL_PSL_SCNTL_An) == ~0ULL);
-}
-
-static bool native_support_attributes(const char *attr_name,
- enum cxl_attrs type)
-{
- return true;
-}
-
-static int native_afu_cr_read64(struct cxl_afu *afu, int cr, u64 off, u64 *out)
-{
- if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
- return -EIO;
- if (unlikely(off >= afu->crs_len))
- return -ERANGE;
- *out = in_le64(afu->native->afu_desc_mmio + afu->crs_offset +
- (cr * afu->crs_len) + off);
- return 0;
-}
-
-static int native_afu_cr_read32(struct cxl_afu *afu, int cr, u64 off, u32 *out)
-{
- if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
- return -EIO;
- if (unlikely(off >= afu->crs_len))
- return -ERANGE;
- *out = in_le32(afu->native->afu_desc_mmio + afu->crs_offset +
- (cr * afu->crs_len) + off);
- return 0;
-}
-
-static int native_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off, u16 *out)
-{
- u64 aligned_off = off & ~0x3L;
- u32 val;
- int rc;
-
- rc = native_afu_cr_read32(afu, cr, aligned_off, &val);
- if (!rc)
- *out = (val >> ((off & 0x3) * 8)) & 0xffff;
- return rc;
-}
-
-static int native_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off, u8 *out)
-{
- u64 aligned_off = off & ~0x3L;
- u32 val;
- int rc;
-
- rc = native_afu_cr_read32(afu, cr, aligned_off, &val);
- if (!rc)
- *out = (val >> ((off & 0x3) * 8)) & 0xff;
- return rc;
-}
-
-static int native_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in)
-{
- if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
- return -EIO;
- if (unlikely(off >= afu->crs_len))
- return -ERANGE;
- out_le32(afu->native->afu_desc_mmio + afu->crs_offset +
- (cr * afu->crs_len) + off, in);
- return 0;
-}
-
-static int native_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in)
-{
- u64 aligned_off = off & ~0x3L;
- u32 val32, mask, shift;
- int rc;
-
- rc = native_afu_cr_read32(afu, cr, aligned_off, &val32);
- if (rc)
- return rc;
- shift = (off & 0x3) * 8;
- WARN_ON(shift == 24);
- mask = 0xffff << shift;
- val32 = (val32 & ~mask) | (in << shift);
-
- rc = native_afu_cr_write32(afu, cr, aligned_off, val32);
- return rc;
-}
-
-static int native_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in)
-{
- u64 aligned_off = off & ~0x3L;
- u32 val32, mask, shift;
- int rc;
-
- rc = native_afu_cr_read32(afu, cr, aligned_off, &val32);
- if (rc)
- return rc;
- shift = (off & 0x3) * 8;
- mask = 0xff << shift;
- val32 = (val32 & ~mask) | (in << shift);
-
- rc = native_afu_cr_write32(afu, cr, aligned_off, val32);
- return rc;
-}
-
-const struct cxl_backend_ops cxl_native_ops = {
- .module = THIS_MODULE,
- .adapter_reset = cxl_pci_reset,
- .alloc_one_irq = cxl_pci_alloc_one_irq,
- .release_one_irq = cxl_pci_release_one_irq,
- .alloc_irq_ranges = cxl_pci_alloc_irq_ranges,
- .release_irq_ranges = cxl_pci_release_irq_ranges,
- .setup_irq = cxl_pci_setup_irq,
- .handle_psl_slice_error = native_handle_psl_slice_error,
- .psl_interrupt = NULL,
- .ack_irq = native_ack_irq,
- .irq_wait = native_irq_wait,
- .attach_process = native_attach_process,
- .detach_process = native_detach_process,
- .update_ivtes = native_update_ivtes,
- .support_attributes = native_support_attributes,
- .link_ok = cxl_adapter_link_ok,
- .release_afu = cxl_pci_release_afu,
- .afu_read_err_buffer = cxl_pci_afu_read_err_buffer,
- .afu_check_and_enable = native_afu_check_and_enable,
- .afu_activate_mode = native_afu_activate_mode,
- .afu_deactivate_mode = native_afu_deactivate_mode,
- .afu_reset = native_afu_reset,
- .afu_cr_read8 = native_afu_cr_read8,
- .afu_cr_read16 = native_afu_cr_read16,
- .afu_cr_read32 = native_afu_cr_read32,
- .afu_cr_read64 = native_afu_cr_read64,
- .afu_cr_write8 = native_afu_cr_write8,
- .afu_cr_write16 = native_afu_cr_write16,
- .afu_cr_write32 = native_afu_cr_write32,
- .read_adapter_vpd = cxl_pci_read_adapter_vpd,
-};
diff --git a/drivers/misc/cxl/of.c b/drivers/misc/cxl/of.c
deleted file mode 100644
index e26ee85279fa..000000000000
--- a/drivers/misc/cxl/of.c
+++ /dev/null
@@ -1,346 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2015 IBM Corp.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-
-#include "cxl.h"
-
-static int read_phys_addr(struct device_node *np, char *prop_name,
- struct cxl_afu *afu)
-{
- int i, len, entry_size, naddr, nsize, type;
- u64 addr, size;
- const __be32 *prop;
-
- naddr = of_n_addr_cells(np);
- nsize = of_n_size_cells(np);
-
- prop = of_get_property(np, prop_name, &len);
- if (prop) {
- entry_size = naddr + nsize;
- for (i = 0; i < (len / 4); i += entry_size, prop += entry_size) {
- type = be32_to_cpu(prop[0]);
- addr = of_read_number(prop, naddr);
- size = of_read_number(&prop[naddr], nsize);
- switch (type) {
- case 0: /* unit address */
- afu->guest->handle = addr;
- break;
- case 1: /* p2 area */
- afu->guest->p2n_phys += addr;
- afu->guest->p2n_size = size;
- break;
- case 2: /* problem state area */
- afu->psn_phys += addr;
- afu->adapter->ps_size = size;
- break;
- default:
- pr_err("Invalid address type %d found in %s property of AFU\n",
- type, prop_name);
- return -EINVAL;
- }
- }
- }
- return 0;
-}
-
-static int read_vpd(struct cxl *adapter, struct cxl_afu *afu)
-{
- char vpd[256];
- int rc;
- size_t len = sizeof(vpd);
-
- memset(vpd, 0, len);
-
- if (adapter)
- rc = cxl_guest_read_adapter_vpd(adapter, vpd, len);
- else
- rc = cxl_guest_read_afu_vpd(afu, vpd, len);
-
- if (rc > 0) {
- cxl_dump_debug_buffer(vpd, rc);
- rc = 0;
- }
- return rc;
-}
-
-int cxl_of_read_afu_handle(struct cxl_afu *afu, struct device_node *afu_np)
-{
- return of_property_read_reg(afu_np, 0, &afu->guest->handle, NULL);
-}
-
-int cxl_of_read_afu_properties(struct cxl_afu *afu, struct device_node *np)
-{
- int i, rc;
- u16 device_id, vendor_id;
- u32 val = 0, class_code;
-
- /* Properties are read in the same order as listed in PAPR */
-
- rc = read_phys_addr(np, "reg", afu);
- if (rc)
- return rc;
-
- rc = read_phys_addr(np, "assigned-addresses", afu);
- if (rc)
- return rc;
-
- if (afu->psn_phys == 0)
- afu->psa = false;
- else
- afu->psa = true;
-
- of_property_read_u32(np, "ibm,#processes", &afu->max_procs_virtualised);
-
- if (cxl_verbose)
- read_vpd(NULL, afu);
-
- of_property_read_u32(np, "ibm,max-ints-per-process", &afu->guest->max_ints);
- afu->irqs_max = afu->guest->max_ints;
-
- if (!of_property_read_u32(np, "ibm,min-ints-per-process", &afu->pp_irqs)) {
- /* One extra interrupt for the PSL interrupt is already
- * included. Remove it now to keep only AFU interrupts and
- * match the native case.
- */
- afu->pp_irqs--;
- }
-
- of_property_read_u64(np, "ibm,error-buffer-size", &afu->eb_len);
- afu->eb_offset = 0;
-
- of_property_read_u64(np, "ibm,config-record-size", &afu->crs_len);
- afu->crs_offset = 0;
-
- of_property_read_u32(np, "ibm,#config-records", &afu->crs_num);
-
- if (cxl_verbose) {
- for (i = 0; i < afu->crs_num; i++) {
- rc = cxl_ops->afu_cr_read16(afu, i, PCI_DEVICE_ID,
- &device_id);
- if (!rc)
- pr_info("record %d - device-id: %#x\n",
- i, device_id);
- rc = cxl_ops->afu_cr_read16(afu, i, PCI_VENDOR_ID,
- &vendor_id);
- if (!rc)
- pr_info("record %d - vendor-id: %#x\n",
- i, vendor_id);
- rc = cxl_ops->afu_cr_read32(afu, i, PCI_CLASS_REVISION,
- &class_code);
- if (!rc) {
- class_code >>= 8;
- pr_info("record %d - class-code: %#x\n",
- i, class_code);
- }
- }
- }
- /*
- * if "ibm,process-mmio" doesn't exist then per-process mmio is
- * not supported
- */
- val = 0;
- if (!of_property_read_u32(np, "ibm,process-mmio", &val) && val == 1)
- afu->pp_psa = true;
- else
- afu->pp_psa = false;
-
- if (!of_property_read_u32(np, "ibm,function-error-interrupt", &val))
- afu->serr_hwirq = val;
-
- pr_devel("AFU handle: %#llx\n", afu->guest->handle);
- pr_devel("p2n_phys: %#llx (size %#llx)\n",
- afu->guest->p2n_phys, afu->guest->p2n_size);
- pr_devel("psn_phys: %#llx (size %#llx)\n",
- afu->psn_phys, afu->adapter->ps_size);
- pr_devel("Max number of processes virtualised=%i\n",
- afu->max_procs_virtualised);
- pr_devel("Per-process irqs min=%i, max=%i\n", afu->pp_irqs,
- afu->irqs_max);
- pr_devel("Slice error interrupt=%#lx\n", afu->serr_hwirq);
-
- return 0;
-}
-
-static int read_adapter_irq_config(struct cxl *adapter, struct device_node *np)
-{
- const __be32 *ranges;
- int len, nranges, i;
- struct irq_avail *cur;
-
- ranges = of_get_property(np, "interrupt-ranges", &len);
- if (ranges == NULL || len < (2 * sizeof(int)))
- return -EINVAL;
-
- /*
- * encoded array of two cells per entry, each cell encoded as
- * with encode-int
- */
- nranges = len / (2 * sizeof(int));
- if (nranges == 0 || (nranges * 2 * sizeof(int)) != len)
- return -EINVAL;
-
- adapter->guest->irq_avail = kcalloc(nranges, sizeof(struct irq_avail),
- GFP_KERNEL);
- if (adapter->guest->irq_avail == NULL)
- return -ENOMEM;
-
- adapter->guest->irq_base_offset = be32_to_cpu(ranges[0]);
- for (i = 0; i < nranges; i++) {
- cur = &adapter->guest->irq_avail[i];
- cur->offset = be32_to_cpu(ranges[i * 2]);
- cur->range = be32_to_cpu(ranges[i * 2 + 1]);
- cur->bitmap = bitmap_zalloc(cur->range, GFP_KERNEL);
- if (cur->bitmap == NULL)
- goto err;
- if (cur->offset < adapter->guest->irq_base_offset)
- adapter->guest->irq_base_offset = cur->offset;
- if (cxl_verbose)
- pr_info("available IRQ range: %#lx-%#lx (%lu)\n",
- cur->offset, cur->offset + cur->range - 1,
- cur->range);
- }
- adapter->guest->irq_nranges = nranges;
- spin_lock_init(&adapter->guest->irq_alloc_lock);
-
- return 0;
-err:
- for (i--; i >= 0; i--) {
- cur = &adapter->guest->irq_avail[i];
- bitmap_free(cur->bitmap);
- }
- kfree(adapter->guest->irq_avail);
- adapter->guest->irq_avail = NULL;
- return -ENOMEM;
-}
-
-int cxl_of_read_adapter_handle(struct cxl *adapter, struct device_node *np)
-{
- return of_property_read_reg(np, 0, &adapter->guest->handle, NULL);
-}
-
-int cxl_of_read_adapter_properties(struct cxl *adapter, struct device_node *np)
-{
- int rc;
- const char *p;
- u32 val = 0;
-
- /* Properties are read in the same order as listed in PAPR */
-
- if ((rc = read_adapter_irq_config(adapter, np)))
- return rc;
-
- if (!of_property_read_u32(np, "ibm,caia-version", &val)) {
- adapter->caia_major = (val & 0xFF00) >> 8;
- adapter->caia_minor = val & 0xFF;
- }
-
- if (!of_property_read_u32(np, "ibm,psl-revision", &val))
- adapter->psl_rev = val;
-
- if (!of_property_read_string(np, "status", &p)) {
- adapter->guest->status = kasprintf(GFP_KERNEL, "%s", p);
- if (adapter->guest->status == NULL)
- return -ENOMEM;
- }
-
- if (!of_property_read_u32(np, "vendor-id", &val))
- adapter->guest->vendor = val;
-
- if (!of_property_read_u32(np, "device-id", &val))
- adapter->guest->device = val;
-
- if (!of_property_read_u32(np, "subsystem-vendor-id", &val))
- adapter->guest->subsystem_vendor = val;
-
- if (!of_property_read_u32(np, "subsystem-id", &val))
- adapter->guest->subsystem = val;
-
- if (cxl_verbose)
- read_vpd(adapter, NULL);
-
- return 0;
-}
-
-static void cxl_of_remove(struct platform_device *pdev)
-{
- struct cxl *adapter;
- int afu;
-
- adapter = dev_get_drvdata(&pdev->dev);
- for (afu = 0; afu < adapter->slices; afu++)
- cxl_guest_remove_afu(adapter->afu[afu]);
-
- cxl_guest_remove_adapter(adapter);
-}
-
-static void cxl_of_shutdown(struct platform_device *pdev)
-{
- cxl_of_remove(pdev);
-}
-
-int cxl_of_probe(struct platform_device *pdev)
-{
- struct device_node *np = NULL;
- struct device_node *afu_np = NULL;
- struct cxl *adapter = NULL;
- int ret;
- int slice = 0, slice_ok = 0;
-
- dev_err_once(&pdev->dev, "DEPRECATION: cxl is deprecated and will be removed in a future kernel release\n");
-
- pr_devel("in %s\n", __func__);
-
- np = pdev->dev.of_node;
- if (np == NULL)
- return -ENODEV;
-
- /* init adapter */
- adapter = cxl_guest_init_adapter(np, pdev);
- if (IS_ERR(adapter)) {
- dev_err(&pdev->dev, "guest_init_adapter failed: %li\n", PTR_ERR(adapter));
- return PTR_ERR(adapter);
- }
-
- /* init afu */
- for_each_child_of_node(np, afu_np) {
- if ((ret = cxl_guest_init_afu(adapter, slice, afu_np)))
- dev_err(&pdev->dev, "AFU %i failed to initialise: %i\n",
- slice, ret);
- else
- slice_ok++;
- slice++;
- }
-
- if (slice_ok == 0) {
- dev_info(&pdev->dev, "No active AFU");
- adapter->slices = 0;
- }
-
- return 0;
-}
-
-static const struct of_device_id cxl_of_match[] = {
- { .compatible = "ibm,coherent-platform-facility",},
- {},
-};
-MODULE_DEVICE_TABLE(of, cxl_of_match);
-
-struct platform_driver cxl_of_driver = {
- .driver = {
- .name = "cxl_of",
- .of_match_table = cxl_of_match,
- .owner = THIS_MODULE
- },
- .probe = cxl_of_probe,
- .remove = cxl_of_remove,
- .shutdown = cxl_of_shutdown,
-};
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
deleted file mode 100644
index 92bf7c5c7b35..000000000000
--- a/drivers/misc/cxl/pci.c
+++ /dev/null
@@ -1,2103 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2014 IBM Corp.
- */
-
-#include <linux/pci_regs.h>
-#include <linux/pci_ids.h>
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/sort.h>
-#include <linux/pci.h>
-#include <linux/of.h>
-#include <linux/delay.h>
-#include <asm/opal.h>
-#include <asm/msi_bitmap.h>
-#include <asm/pnv-pci.h>
-#include <asm/io.h>
-#include <asm/reg.h>
-
-#include "cxl.h"
-#include <misc/cxl.h>
-
-
-#define CXL_PCI_VSEC_ID 0x1280
-#define CXL_VSEC_MIN_SIZE 0x80
-
-#define CXL_READ_VSEC_LENGTH(dev, vsec, dest) \
- { \
- pci_read_config_word(dev, vsec + 0x6, dest); \
- *dest >>= 4; \
- }
-#define CXL_READ_VSEC_NAFUS(dev, vsec, dest) \
- pci_read_config_byte(dev, vsec + 0x8, dest)
-
-#define CXL_READ_VSEC_STATUS(dev, vsec, dest) \
- pci_read_config_byte(dev, vsec + 0x9, dest)
-#define CXL_STATUS_SECOND_PORT 0x80
-#define CXL_STATUS_MSI_X_FULL 0x40
-#define CXL_STATUS_MSI_X_SINGLE 0x20
-#define CXL_STATUS_FLASH_RW 0x08
-#define CXL_STATUS_FLASH_RO 0x04
-#define CXL_STATUS_LOADABLE_AFU 0x02
-#define CXL_STATUS_LOADABLE_PSL 0x01
-/* If we see these features we won't try to use the card */
-#define CXL_UNSUPPORTED_FEATURES \
- (CXL_STATUS_MSI_X_FULL | CXL_STATUS_MSI_X_SINGLE)
-
-#define CXL_READ_VSEC_MODE_CONTROL(dev, vsec, dest) \
- pci_read_config_byte(dev, vsec + 0xa, dest)
-#define CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val) \
- pci_write_config_byte(dev, vsec + 0xa, val)
-#define CXL_VSEC_PROTOCOL_MASK 0xe0
-#define CXL_VSEC_PROTOCOL_1024TB 0x80
-#define CXL_VSEC_PROTOCOL_512TB 0x40
-#define CXL_VSEC_PROTOCOL_256TB 0x20 /* Power 8/9 uses this */
-#define CXL_VSEC_PROTOCOL_ENABLE 0x01
-
-#define CXL_READ_VSEC_PSL_REVISION(dev, vsec, dest) \
- pci_read_config_word(dev, vsec + 0xc, dest)
-#define CXL_READ_VSEC_CAIA_MINOR(dev, vsec, dest) \
- pci_read_config_byte(dev, vsec + 0xe, dest)
-#define CXL_READ_VSEC_CAIA_MAJOR(dev, vsec, dest) \
- pci_read_config_byte(dev, vsec + 0xf, dest)
-#define CXL_READ_VSEC_BASE_IMAGE(dev, vsec, dest) \
- pci_read_config_word(dev, vsec + 0x10, dest)
-
-#define CXL_READ_VSEC_IMAGE_STATE(dev, vsec, dest) \
- pci_read_config_byte(dev, vsec + 0x13, dest)
-#define CXL_WRITE_VSEC_IMAGE_STATE(dev, vsec, val) \
- pci_write_config_byte(dev, vsec + 0x13, val)
-#define CXL_VSEC_USER_IMAGE_LOADED 0x80 /* RO */
-#define CXL_VSEC_PERST_LOADS_IMAGE 0x20 /* RW */
-#define CXL_VSEC_PERST_SELECT_USER 0x10 /* RW */
-
-#define CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, dest) \
- pci_read_config_dword(dev, vsec + 0x20, dest)
-#define CXL_READ_VSEC_AFU_DESC_SIZE(dev, vsec, dest) \
- pci_read_config_dword(dev, vsec + 0x24, dest)
-#define CXL_READ_VSEC_PS_OFF(dev, vsec, dest) \
- pci_read_config_dword(dev, vsec + 0x28, dest)
-#define CXL_READ_VSEC_PS_SIZE(dev, vsec, dest) \
- pci_read_config_dword(dev, vsec + 0x2c, dest)
-
-
-/* This works a little different than the p1/p2 register accesses to make it
- * easier to pull out individual fields */
-#define AFUD_READ(afu, off) in_be64(afu->native->afu_desc_mmio + off)
-#define AFUD_READ_LE(afu, off) in_le64(afu->native->afu_desc_mmio + off)
-#define EXTRACT_PPC_BIT(val, bit) (!!(val & PPC_BIT(bit)))
-#define EXTRACT_PPC_BITS(val, bs, be) ((val & PPC_BITMASK(bs, be)) >> PPC_BITLSHIFT(be))
-
-#define AFUD_READ_INFO(afu) AFUD_READ(afu, 0x0)
-#define AFUD_NUM_INTS_PER_PROC(val) EXTRACT_PPC_BITS(val, 0, 15)
-#define AFUD_NUM_PROCS(val) EXTRACT_PPC_BITS(val, 16, 31)
-#define AFUD_NUM_CRS(val) EXTRACT_PPC_BITS(val, 32, 47)
-#define AFUD_MULTIMODE(val) EXTRACT_PPC_BIT(val, 48)
-#define AFUD_PUSH_BLOCK_TRANSFER(val) EXTRACT_PPC_BIT(val, 55)
-#define AFUD_DEDICATED_PROCESS(val) EXTRACT_PPC_BIT(val, 59)
-#define AFUD_AFU_DIRECTED(val) EXTRACT_PPC_BIT(val, 61)
-#define AFUD_TIME_SLICED(val) EXTRACT_PPC_BIT(val, 63)
-#define AFUD_READ_CR(afu) AFUD_READ(afu, 0x20)
-#define AFUD_CR_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
-#define AFUD_READ_CR_OFF(afu) AFUD_READ(afu, 0x28)
-#define AFUD_READ_PPPSA(afu) AFUD_READ(afu, 0x30)
-#define AFUD_PPPSA_PP(val) EXTRACT_PPC_BIT(val, 6)
-#define AFUD_PPPSA_PSA(val) EXTRACT_PPC_BIT(val, 7)
-#define AFUD_PPPSA_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
-#define AFUD_READ_PPPSA_OFF(afu) AFUD_READ(afu, 0x38)
-#define AFUD_READ_EB(afu) AFUD_READ(afu, 0x40)
-#define AFUD_EB_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
-#define AFUD_READ_EB_OFF(afu) AFUD_READ(afu, 0x48)
-
-static const struct pci_device_id cxl_pci_tbl[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0477), },
- { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x044b), },
- { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x04cf), },
- { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0601), },
- { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0623), },
- { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0628), },
- { }
-};
-MODULE_DEVICE_TABLE(pci, cxl_pci_tbl);
-
-
-/*
- * Mostly using these wrappers to avoid confusion:
- * priv 1 is BAR2, while priv 2 is BAR0
- */
-static inline resource_size_t p1_base(struct pci_dev *dev)
-{
- return pci_resource_start(dev, 2);
-}
-
-static inline resource_size_t p1_size(struct pci_dev *dev)
-{
- return pci_resource_len(dev, 2);
-}
-
-static inline resource_size_t p2_base(struct pci_dev *dev)
-{
- return pci_resource_start(dev, 0);
-}
-
-static inline resource_size_t p2_size(struct pci_dev *dev)
-{
- return pci_resource_len(dev, 0);
-}
-
-static int find_cxl_vsec(struct pci_dev *dev)
-{
- return pci_find_vsec_capability(dev, PCI_VENDOR_ID_IBM, CXL_PCI_VSEC_ID);
-}
-
-static void dump_cxl_config_space(struct pci_dev *dev)
-{
- int vsec;
- u32 val;
-
- dev_info(&dev->dev, "dump_cxl_config_space\n");
-
- pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &val);
- dev_info(&dev->dev, "BAR0: %#.8x\n", val);
- pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, &val);
- dev_info(&dev->dev, "BAR1: %#.8x\n", val);
- pci_read_config_dword(dev, PCI_BASE_ADDRESS_2, &val);
- dev_info(&dev->dev, "BAR2: %#.8x\n", val);
- pci_read_config_dword(dev, PCI_BASE_ADDRESS_3, &val);
- dev_info(&dev->dev, "BAR3: %#.8x\n", val);
- pci_read_config_dword(dev, PCI_BASE_ADDRESS_4, &val);
- dev_info(&dev->dev, "BAR4: %#.8x\n", val);
- pci_read_config_dword(dev, PCI_BASE_ADDRESS_5, &val);
- dev_info(&dev->dev, "BAR5: %#.8x\n", val);
-
- dev_info(&dev->dev, "p1 regs: %#llx, len: %#llx\n",
- p1_base(dev), p1_size(dev));
- dev_info(&dev->dev, "p2 regs: %#llx, len: %#llx\n",
- p2_base(dev), p2_size(dev));
- dev_info(&dev->dev, "BAR 4/5: %#llx, len: %#llx\n",
- pci_resource_start(dev, 4), pci_resource_len(dev, 4));
-
- if (!(vsec = find_cxl_vsec(dev)))
- return;
-
-#define show_reg(name, what) \
- dev_info(&dev->dev, "cxl vsec: %30s: %#x\n", name, what)
-
- pci_read_config_dword(dev, vsec + 0x0, &val);
- show_reg("Cap ID", (val >> 0) & 0xffff);
- show_reg("Cap Ver", (val >> 16) & 0xf);
- show_reg("Next Cap Ptr", (val >> 20) & 0xfff);
- pci_read_config_dword(dev, vsec + 0x4, &val);
- show_reg("VSEC ID", (val >> 0) & 0xffff);
- show_reg("VSEC Rev", (val >> 16) & 0xf);
- show_reg("VSEC Length", (val >> 20) & 0xfff);
- pci_read_config_dword(dev, vsec + 0x8, &val);
- show_reg("Num AFUs", (val >> 0) & 0xff);
- show_reg("Status", (val >> 8) & 0xff);
- show_reg("Mode Control", (val >> 16) & 0xff);
- show_reg("Reserved", (val >> 24) & 0xff);
- pci_read_config_dword(dev, vsec + 0xc, &val);
- show_reg("PSL Rev", (val >> 0) & 0xffff);
- show_reg("CAIA Ver", (val >> 16) & 0xffff);
- pci_read_config_dword(dev, vsec + 0x10, &val);
- show_reg("Base Image Rev", (val >> 0) & 0xffff);
- show_reg("Reserved", (val >> 16) & 0x0fff);
- show_reg("Image Control", (val >> 28) & 0x3);
- show_reg("Reserved", (val >> 30) & 0x1);
- show_reg("Image Loaded", (val >> 31) & 0x1);
-
- pci_read_config_dword(dev, vsec + 0x14, &val);
- show_reg("Reserved", val);
- pci_read_config_dword(dev, vsec + 0x18, &val);
- show_reg("Reserved", val);
- pci_read_config_dword(dev, vsec + 0x1c, &val);
- show_reg("Reserved", val);
-
- pci_read_config_dword(dev, vsec + 0x20, &val);
- show_reg("AFU Descriptor Offset", val);
- pci_read_config_dword(dev, vsec + 0x24, &val);
- show_reg("AFU Descriptor Size", val);
- pci_read_config_dword(dev, vsec + 0x28, &val);
- show_reg("Problem State Offset", val);
- pci_read_config_dword(dev, vsec + 0x2c, &val);
- show_reg("Problem State Size", val);
-
- pci_read_config_dword(dev, vsec + 0x30, &val);
- show_reg("Reserved", val);
- pci_read_config_dword(dev, vsec + 0x34, &val);
- show_reg("Reserved", val);
- pci_read_config_dword(dev, vsec + 0x38, &val);
- show_reg("Reserved", val);
- pci_read_config_dword(dev, vsec + 0x3c, &val);
- show_reg("Reserved", val);
-
- pci_read_config_dword(dev, vsec + 0x40, &val);
- show_reg("PSL Programming Port", val);
- pci_read_config_dword(dev, vsec + 0x44, &val);
- show_reg("PSL Programming Control", val);
-
- pci_read_config_dword(dev, vsec + 0x48, &val);
- show_reg("Reserved", val);
- pci_read_config_dword(dev, vsec + 0x4c, &val);
- show_reg("Reserved", val);
-
- pci_read_config_dword(dev, vsec + 0x50, &val);
- show_reg("Flash Address Register", val);
- pci_read_config_dword(dev, vsec + 0x54, &val);
- show_reg("Flash Size Register", val);
- pci_read_config_dword(dev, vsec + 0x58, &val);
- show_reg("Flash Status/Control Register", val);
- pci_read_config_dword(dev, vsec + 0x58, &val);
- show_reg("Flash Data Port", val);
-
-#undef show_reg
-}
-
-static void dump_afu_descriptor(struct cxl_afu *afu)
-{
- u64 val, afu_cr_num, afu_cr_off, afu_cr_len;
- int i;
-
-#define show_reg(name, what) \
- dev_info(&afu->dev, "afu desc: %30s: %#llx\n", name, what)
-
- val = AFUD_READ_INFO(afu);
- show_reg("num_ints_per_process", AFUD_NUM_INTS_PER_PROC(val));
- show_reg("num_of_processes", AFUD_NUM_PROCS(val));
- show_reg("num_of_afu_CRs", AFUD_NUM_CRS(val));
- show_reg("req_prog_mode", val & 0xffffULL);
- afu_cr_num = AFUD_NUM_CRS(val);
-
- val = AFUD_READ(afu, 0x8);
- show_reg("Reserved", val);
- val = AFUD_READ(afu, 0x10);
- show_reg("Reserved", val);
- val = AFUD_READ(afu, 0x18);
- show_reg("Reserved", val);
-
- val = AFUD_READ_CR(afu);
- show_reg("Reserved", (val >> (63-7)) & 0xff);
- show_reg("AFU_CR_len", AFUD_CR_LEN(val));
- afu_cr_len = AFUD_CR_LEN(val) * 256;
-
- val = AFUD_READ_CR_OFF(afu);
- afu_cr_off = val;
- show_reg("AFU_CR_offset", val);
-
- val = AFUD_READ_PPPSA(afu);
- show_reg("PerProcessPSA_control", (val >> (63-7)) & 0xff);
- show_reg("PerProcessPSA Length", AFUD_PPPSA_LEN(val));
-
- val = AFUD_READ_PPPSA_OFF(afu);
- show_reg("PerProcessPSA_offset", val);
-
- val = AFUD_READ_EB(afu);
- show_reg("Reserved", (val >> (63-7)) & 0xff);
- show_reg("AFU_EB_len", AFUD_EB_LEN(val));
-
- val = AFUD_READ_EB_OFF(afu);
- show_reg("AFU_EB_offset", val);
-
- for (i = 0; i < afu_cr_num; i++) {
- val = AFUD_READ_LE(afu, afu_cr_off + i * afu_cr_len);
- show_reg("CR Vendor", val & 0xffff);
- show_reg("CR Device", (val >> 16) & 0xffff);
- }
-#undef show_reg
-}
-
-#define P8_CAPP_UNIT0_ID 0xBA
-#define P8_CAPP_UNIT1_ID 0XBE
-#define P9_CAPP_UNIT0_ID 0xC0
-#define P9_CAPP_UNIT1_ID 0xE0
-
-static int get_phb_index(struct device_node *np, u32 *phb_index)
-{
- if (of_property_read_u32(np, "ibm,phb-index", phb_index))
- return -ENODEV;
- return 0;
-}
-
-static u64 get_capp_unit_id(struct device_node *np, u32 phb_index)
-{
- /*
- * POWER 8:
- * - For chips other than POWER8NVL, we only have CAPP 0,
- * irrespective of which PHB is used.
- * - For POWER8NVL, assume CAPP 0 is attached to PHB0 and
- * CAPP 1 is attached to PHB1.
- */
- if (cxl_is_power8()) {
- if (!pvr_version_is(PVR_POWER8NVL))
- return P8_CAPP_UNIT0_ID;
-
- if (phb_index == 0)
- return P8_CAPP_UNIT0_ID;
-
- if (phb_index == 1)
- return P8_CAPP_UNIT1_ID;
- }
-
- /*
- * POWER 9:
- * PEC0 (PHB0). Capp ID = CAPP0 (0b1100_0000)
- * PEC1 (PHB1 - PHB2). No capi mode
- * PEC2 (PHB3 - PHB4 - PHB5): Capi mode on PHB3 only. Capp ID = CAPP1 (0b1110_0000)
- */
- if (cxl_is_power9()) {
- if (phb_index == 0)
- return P9_CAPP_UNIT0_ID;
-
- if (phb_index == 3)
- return P9_CAPP_UNIT1_ID;
- }
-
- return 0;
-}
-
-int cxl_calc_capp_routing(struct pci_dev *dev, u64 *chipid,
- u32 *phb_index, u64 *capp_unit_id)
-{
- int rc;
- struct device_node *np;
- u32 id;
-
- if (!(np = pnv_pci_get_phb_node(dev)))
- return -ENODEV;
-
- while (np && of_property_read_u32(np, "ibm,chip-id", &id))
- np = of_get_next_parent(np);
- if (!np)
- return -ENODEV;
-
- *chipid = id;
-
- rc = get_phb_index(np, phb_index);
- if (rc) {
- pr_err("cxl: invalid phb index\n");
- of_node_put(np);
- return rc;
- }
-
- *capp_unit_id = get_capp_unit_id(np, *phb_index);
- of_node_put(np);
- if (!*capp_unit_id) {
- pr_err("cxl: No capp unit found for PHB[%lld,%d]. Make sure the adapter is on a capi-compatible slot\n",
- *chipid, *phb_index);
- return -ENODEV;
- }
-
- return 0;
-}
-
-static DEFINE_MUTEX(indications_mutex);
-
-static int get_phb_indications(struct pci_dev *dev, u64 *capiind, u64 *asnind,
- u64 *nbwind)
-{
- static u32 val[3];
- struct device_node *np;
-
- mutex_lock(&indications_mutex);
- if (!val[0]) {
- if (!(np = pnv_pci_get_phb_node(dev))) {
- mutex_unlock(&indications_mutex);
- return -ENODEV;
- }
-
- if (of_property_read_u32_array(np, "ibm,phb-indications", val, 3)) {
- val[2] = 0x0300UL; /* legacy values */
- val[1] = 0x0400UL;
- val[0] = 0x0200UL;
- }
- of_node_put(np);
- }
- *capiind = val[0];
- *asnind = val[1];
- *nbwind = val[2];
- mutex_unlock(&indications_mutex);
- return 0;
-}
-
-int cxl_get_xsl9_dsnctl(struct pci_dev *dev, u64 capp_unit_id, u64 *reg)
-{
- u64 xsl_dsnctl;
- u64 capiind, asnind, nbwind;
-
- /*
- * CAPI Identifier bits [0:7]
- * bit 61:60 MSI bits --> 0
- * bit 59 TVT selector --> 0
- */
- if (get_phb_indications(dev, &capiind, &asnind, &nbwind))
- return -ENODEV;
-
- /*
- * Tell XSL where to route data to.
- * The field chipid should match the PHB CAPI_CMPM register
- */
- xsl_dsnctl = (capiind << (63-15)); /* Bit 57 */
- xsl_dsnctl |= (capp_unit_id << (63-15));
-
- /* nMMU_ID Defaults to: b’000001001’*/
- xsl_dsnctl |= ((u64)0x09 << (63-28));
-
- /*
- * Used to identify CAPI packets which should be sorted into
- * the Non-Blocking queues by the PHB. This field should match
- * the PHB PBL_NBW_CMPM register
- * nbwind=0x03, bits [57:58], must include capi indicator.
- * Not supported on P9 DD1.
- */
- xsl_dsnctl |= (nbwind << (63-55));
-
- /*
- * Upper 16b address bits of ASB_Notify messages sent to the
- * system. Need to match the PHB’s ASN Compare/Mask Register.
- * Not supported on P9 DD1.
- */
- xsl_dsnctl |= asnind;
-
- *reg = xsl_dsnctl;
- return 0;
-}
-
-static int init_implementation_adapter_regs_psl9(struct cxl *adapter,
- struct pci_dev *dev)
-{
- u64 xsl_dsnctl, psl_fircntl;
- u64 chipid;
- u32 phb_index;
- u64 capp_unit_id;
- u64 psl_debug;
- int rc;
-
- rc = cxl_calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id);
- if (rc)
- return rc;
-
- rc = cxl_get_xsl9_dsnctl(dev, capp_unit_id, &xsl_dsnctl);
- if (rc)
- return rc;
-
- cxl_p1_write(adapter, CXL_XSL9_DSNCTL, xsl_dsnctl);
-
- /* Set fir_cntl to recommended value for production env */
- psl_fircntl = (0x2ULL << (63-3)); /* ce_report */
- psl_fircntl |= (0x1ULL << (63-6)); /* FIR_report */
- psl_fircntl |= 0x1ULL; /* ce_thresh */
- cxl_p1_write(adapter, CXL_PSL9_FIR_CNTL, psl_fircntl);
-
- /* Setup the PSL to transmit packets on the PCIe before the
- * CAPP is enabled. Make sure that CAPP virtual machines are disabled
- */
- cxl_p1_write(adapter, CXL_PSL9_DSNDCTL, 0x0001001000012A10ULL);
-
- /*
- * A response to an ASB_Notify request is returned by the
- * system as an MMIO write to the address defined in
- * the PSL_TNR_ADDR register.
- * keep the Reset Value: 0x00020000E0000000
- */
-
- /* Enable XSL rty limit */
- cxl_p1_write(adapter, CXL_XSL9_DEF, 0x51F8000000000005ULL);
-
- /* Change XSL_INV dummy read threshold */
- cxl_p1_write(adapter, CXL_XSL9_INV, 0x0000040007FFC200ULL);
-
- if (phb_index == 3) {
- /* disable machines 31-47 and 20-27 for DMA */
- cxl_p1_write(adapter, CXL_PSL9_APCDEDTYPE, 0x40000FF3FFFF0000ULL);
- }
-
- /* Snoop machines */
- cxl_p1_write(adapter, CXL_PSL9_APCDEDALLOC, 0x800F000200000000ULL);
-
- /* Enable NORST and DD2 features */
- cxl_p1_write(adapter, CXL_PSL9_DEBUG, 0xC000000000000000ULL);
-
- /*
- * Check if PSL has data-cache. We need to flush adapter datacache
- * when as its about to be removed.
- */
- psl_debug = cxl_p1_read(adapter, CXL_PSL9_DEBUG);
- if (psl_debug & CXL_PSL_DEBUG_CDC) {
- dev_dbg(&dev->dev, "No data-cache present\n");
- adapter->native->no_data_cache = true;
- }
-
- return 0;
-}
-
-static int init_implementation_adapter_regs_psl8(struct cxl *adapter, struct pci_dev *dev)
-{
- u64 psl_dsnctl, psl_fircntl;
- u64 chipid;
- u32 phb_index;
- u64 capp_unit_id;
- int rc;
-
- rc = cxl_calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id);
- if (rc)
- return rc;
-
- psl_dsnctl = 0x0000900000000000ULL; /* pteupd ttype, scdone */
- psl_dsnctl |= (0x2ULL << (63-38)); /* MMIO hang pulse: 256 us */
- /* Tell PSL where to route data to */
- psl_dsnctl |= (chipid << (63-5));
- psl_dsnctl |= (capp_unit_id << (63-13));
-
- cxl_p1_write(adapter, CXL_PSL_DSNDCTL, psl_dsnctl);
- cxl_p1_write(adapter, CXL_PSL_RESLCKTO, 0x20000000200ULL);
- /* snoop write mask */
- cxl_p1_write(adapter, CXL_PSL_SNWRALLOC, 0x00000000FFFFFFFFULL);
- /* set fir_cntl to recommended value for production env */
- psl_fircntl = (0x2ULL << (63-3)); /* ce_report */
- psl_fircntl |= (0x1ULL << (63-6)); /* FIR_report */
- psl_fircntl |= 0x1ULL; /* ce_thresh */
- cxl_p1_write(adapter, CXL_PSL_FIR_CNTL, psl_fircntl);
- /* for debugging with trace arrays */
- cxl_p1_write(adapter, CXL_PSL_TRACE, 0x0000FF7C00000000ULL);
-
- return 0;
-}
-
-/* PSL */
-#define TBSYNC_CAL(n) (((u64)n & 0x7) << (63-3))
-#define TBSYNC_CNT(n) (((u64)n & 0x7) << (63-6))
-/* For the PSL this is a multiple for 0 < n <= 7: */
-#define PSL_2048_250MHZ_CYCLES 1
-
-static void write_timebase_ctrl_psl8(struct cxl *adapter)
-{
- cxl_p1_write(adapter, CXL_PSL_TB_CTLSTAT,
- TBSYNC_CNT(2 * PSL_2048_250MHZ_CYCLES));
-}
-
-static u64 timebase_read_psl9(struct cxl *adapter)
-{
- return cxl_p1_read(adapter, CXL_PSL9_Timebase);
-}
-
-static u64 timebase_read_psl8(struct cxl *adapter)
-{
- return cxl_p1_read(adapter, CXL_PSL_Timebase);
-}
-
-static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev)
-{
- struct device_node *np;
-
- adapter->psl_timebase_synced = false;
-
- if (!(np = pnv_pci_get_phb_node(dev)))
- return;
-
- /* Do not fail when CAPP timebase sync is not supported by OPAL */
- of_node_get(np);
- if (!of_property_present(np, "ibm,capp-timebase-sync")) {
- of_node_put(np);
- dev_info(&dev->dev, "PSL timebase inactive: OPAL support missing\n");
- return;
- }
- of_node_put(np);
-
- /*
- * Setup PSL Timebase Control and Status register
- * with the recommended Timebase Sync Count value
- */
- if (adapter->native->sl_ops->write_timebase_ctrl)
- adapter->native->sl_ops->write_timebase_ctrl(adapter);
-
- /* Enable PSL Timebase */
- cxl_p1_write(adapter, CXL_PSL_Control, 0x0000000000000000);
- cxl_p1_write(adapter, CXL_PSL_Control, CXL_PSL_Control_tb);
-
- return;
-}
-
-static int init_implementation_afu_regs_psl9(struct cxl_afu *afu)
-{
- return 0;
-}
-
-static int init_implementation_afu_regs_psl8(struct cxl_afu *afu)
-{
- /* read/write masks for this slice */
- cxl_p1n_write(afu, CXL_PSL_APCALLOC_A, 0xFFFFFFFEFEFEFEFEULL);
- /* APC read/write masks for this slice */
- cxl_p1n_write(afu, CXL_PSL_COALLOC_A, 0xFF000000FEFEFEFEULL);
- /* for debugging with trace arrays */
- cxl_p1n_write(afu, CXL_PSL_SLICE_TRACE, 0x0000FFFF00000000ULL);
- cxl_p1n_write(afu, CXL_PSL_RXCTL_A, CXL_PSL_RXCTL_AFUHP_4S);
-
- return 0;
-}
-
-int cxl_pci_setup_irq(struct cxl *adapter, unsigned int hwirq,
- unsigned int virq)
-{
- struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
-
- return pnv_cxl_ioda_msi_setup(dev, hwirq, virq);
-}
-
-int cxl_update_image_control(struct cxl *adapter)
-{
- struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
- int rc;
- int vsec;
- u8 image_state;
-
- if (!(vsec = find_cxl_vsec(dev))) {
- dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
- return -ENODEV;
- }
-
- if ((rc = CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state))) {
- dev_err(&dev->dev, "failed to read image state: %i\n", rc);
- return rc;
- }
-
- if (adapter->perst_loads_image)
- image_state |= CXL_VSEC_PERST_LOADS_IMAGE;
- else
- image_state &= ~CXL_VSEC_PERST_LOADS_IMAGE;
-
- if (adapter->perst_select_user)
- image_state |= CXL_VSEC_PERST_SELECT_USER;
- else
- image_state &= ~CXL_VSEC_PERST_SELECT_USER;
-
- if ((rc = CXL_WRITE_VSEC_IMAGE_STATE(dev, vsec, image_state))) {
- dev_err(&dev->dev, "failed to update image control: %i\n", rc);
- return rc;
- }
-
- return 0;
-}
-
-int cxl_pci_alloc_one_irq(struct cxl *adapter)
-{
- struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
-
- return pnv_cxl_alloc_hwirqs(dev, 1);
-}
-
-void cxl_pci_release_one_irq(struct cxl *adapter, int hwirq)
-{
- struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
-
- return pnv_cxl_release_hwirqs(dev, hwirq, 1);
-}
-
-int cxl_pci_alloc_irq_ranges(struct cxl_irq_ranges *irqs,
- struct cxl *adapter, unsigned int num)
-{
- struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
-
- return pnv_cxl_alloc_hwirq_ranges(irqs, dev, num);
-}
-
-void cxl_pci_release_irq_ranges(struct cxl_irq_ranges *irqs,
- struct cxl *adapter)
-{
- struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
-
- pnv_cxl_release_hwirq_ranges(irqs, dev);
-}
-
-static int setup_cxl_bars(struct pci_dev *dev)
-{
- /* Safety check in case we get backported to < 3.17 without M64 */
- if ((p1_base(dev) < 0x100000000ULL) ||
- (p2_base(dev) < 0x100000000ULL)) {
- dev_err(&dev->dev, "ABORTING: M32 BAR assignment incompatible with CXL\n");
- return -ENODEV;
- }
-
- /*
- * BAR 4/5 has a special meaning for CXL and must be programmed with a
- * special value corresponding to the CXL protocol address range.
- * For POWER 8/9 that means bits 48:49 must be set to 10
- */
- pci_write_config_dword(dev, PCI_BASE_ADDRESS_4, 0x00000000);
- pci_write_config_dword(dev, PCI_BASE_ADDRESS_5, 0x00020000);
-
- return 0;
-}
-
-/* pciex node: ibm,opal-m64-window = <0x3d058 0x0 0x3d058 0x0 0x8 0x0>; */
-static int switch_card_to_cxl(struct pci_dev *dev)
-{
- int vsec;
- u8 val;
- int rc;
-
- dev_info(&dev->dev, "switch card to CXL\n");
-
- if (!(vsec = find_cxl_vsec(dev))) {
- dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
- return -ENODEV;
- }
-
- if ((rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val))) {
- dev_err(&dev->dev, "failed to read current mode control: %i", rc);
- return rc;
- }
- val &= ~CXL_VSEC_PROTOCOL_MASK;
- val |= CXL_VSEC_PROTOCOL_256TB | CXL_VSEC_PROTOCOL_ENABLE;
- if ((rc = CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val))) {
- dev_err(&dev->dev, "failed to enable CXL protocol: %i", rc);
- return rc;
- }
- /*
- * The CAIA spec (v0.12 11.6 Bi-modal Device Support) states
- * we must wait 100ms after this mode switch before touching
- * PCIe config space.
- */
- msleep(100);
-
- return 0;
-}
-
-static int pci_map_slice_regs(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev)
-{
- u64 p1n_base, p2n_base, afu_desc;
- const u64 p1n_size = 0x100;
- const u64 p2n_size = 0x1000;
-
- p1n_base = p1_base(dev) + 0x10000 + (afu->slice * p1n_size);
- p2n_base = p2_base(dev) + (afu->slice * p2n_size);
- afu->psn_phys = p2_base(dev) + (adapter->native->ps_off + (afu->slice * adapter->ps_size));
- afu_desc = p2_base(dev) + adapter->native->afu_desc_off + (afu->slice * adapter->native->afu_desc_size);
-
- if (!(afu->native->p1n_mmio = ioremap(p1n_base, p1n_size)))
- goto err;
- if (!(afu->p2n_mmio = ioremap(p2n_base, p2n_size)))
- goto err1;
- if (afu_desc) {
- if (!(afu->native->afu_desc_mmio = ioremap(afu_desc, adapter->native->afu_desc_size)))
- goto err2;
- }
-
- return 0;
-err2:
- iounmap(afu->p2n_mmio);
-err1:
- iounmap(afu->native->p1n_mmio);
-err:
- dev_err(&afu->dev, "Error mapping AFU MMIO regions\n");
- return -ENOMEM;
-}
-
-static void pci_unmap_slice_regs(struct cxl_afu *afu)
-{
- if (afu->p2n_mmio) {
- iounmap(afu->p2n_mmio);
- afu->p2n_mmio = NULL;
- }
- if (afu->native->p1n_mmio) {
- iounmap(afu->native->p1n_mmio);
- afu->native->p1n_mmio = NULL;
- }
- if (afu->native->afu_desc_mmio) {
- iounmap(afu->native->afu_desc_mmio);
- afu->native->afu_desc_mmio = NULL;
- }
-}
-
-void cxl_pci_release_afu(struct device *dev)
-{
- struct cxl_afu *afu = to_cxl_afu(dev);
-
- pr_devel("%s\n", __func__);
-
- idr_destroy(&afu->contexts_idr);
- cxl_release_spa(afu);
-
- kfree(afu->native);
- kfree(afu);
-}
-
-/* Expects AFU struct to have recently been zeroed out */
-static int cxl_read_afu_descriptor(struct cxl_afu *afu)
-{
- u64 val;
-
- val = AFUD_READ_INFO(afu);
- afu->pp_irqs = AFUD_NUM_INTS_PER_PROC(val);
- afu->max_procs_virtualised = AFUD_NUM_PROCS(val);
- afu->crs_num = AFUD_NUM_CRS(val);
-
- if (AFUD_AFU_DIRECTED(val))
- afu->modes_supported |= CXL_MODE_DIRECTED;
- if (AFUD_DEDICATED_PROCESS(val))
- afu->modes_supported |= CXL_MODE_DEDICATED;
- if (AFUD_TIME_SLICED(val))
- afu->modes_supported |= CXL_MODE_TIME_SLICED;
-
- val = AFUD_READ_PPPSA(afu);
- afu->pp_size = AFUD_PPPSA_LEN(val) * 4096;
- afu->psa = AFUD_PPPSA_PSA(val);
- if ((afu->pp_psa = AFUD_PPPSA_PP(val)))
- afu->native->pp_offset = AFUD_READ_PPPSA_OFF(afu);
-
- val = AFUD_READ_CR(afu);
- afu->crs_len = AFUD_CR_LEN(val) * 256;
- afu->crs_offset = AFUD_READ_CR_OFF(afu);
-
-
- /* eb_len is in multiple of 4K */
- afu->eb_len = AFUD_EB_LEN(AFUD_READ_EB(afu)) * 4096;
- afu->eb_offset = AFUD_READ_EB_OFF(afu);
-
- /* eb_off is 4K aligned so lower 12 bits are always zero */
- if (EXTRACT_PPC_BITS(afu->eb_offset, 0, 11) != 0) {
- dev_warn(&afu->dev,
- "Invalid AFU error buffer offset %Lx\n",
- afu->eb_offset);
- dev_info(&afu->dev,
- "Ignoring AFU error buffer in the descriptor\n");
- /* indicate that no afu buffer exists */
- afu->eb_len = 0;
- }
-
- return 0;
-}
-
-static int cxl_afu_descriptor_looks_ok(struct cxl_afu *afu)
-{
- int i, rc;
- u32 val;
-
- if (afu->psa && afu->adapter->ps_size <
- (afu->native->pp_offset + afu->pp_size*afu->max_procs_virtualised)) {
- dev_err(&afu->dev, "per-process PSA can't fit inside the PSA!\n");
- return -ENODEV;
- }
-
- if (afu->pp_psa && (afu->pp_size < PAGE_SIZE))
- dev_warn(&afu->dev, "AFU uses pp_size(%#016llx) < PAGE_SIZE per-process PSA!\n", afu->pp_size);
-
- for (i = 0; i < afu->crs_num; i++) {
- rc = cxl_ops->afu_cr_read32(afu, i, 0, &val);
- if (rc || val == 0) {
- dev_err(&afu->dev, "ABORTING: AFU configuration record %i is invalid\n", i);
- return -EINVAL;
- }
- }
-
- if ((afu->modes_supported & ~CXL_MODE_DEDICATED) && afu->max_procs_virtualised == 0) {
- /*
- * We could also check this for the dedicated process model
- * since the architecture indicates it should be set to 1, but
- * in that case we ignore the value and I'd rather not risk
- * breaking any existing dedicated process AFUs that left it as
- * 0 (not that I'm aware of any). It is clearly an error for an
- * AFU directed AFU to set this to 0, and would have previously
- * triggered a bug resulting in the maximum not being enforced
- * at all since idr_alloc treats 0 as no maximum.
- */
- dev_err(&afu->dev, "AFU does not support any processes\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int sanitise_afu_regs_psl9(struct cxl_afu *afu)
-{
- u64 reg;
-
- /*
- * Clear out any regs that contain either an IVTE or address or may be
- * waiting on an acknowledgment to try to be a bit safer as we bring
- * it online
- */
- reg = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
- if ((reg & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
- dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#016llx\n", reg);
- if (cxl_ops->afu_reset(afu))
- return -EIO;
- if (cxl_afu_disable(afu))
- return -EIO;
- if (cxl_psl_purge(afu))
- return -EIO;
- }
- cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0x0000000000000000);
- cxl_p1n_write(afu, CXL_PSL_AMBAR_An, 0x0000000000000000);
- reg = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
- if (reg) {
- dev_warn(&afu->dev, "AFU had pending DSISR: %#016llx\n", reg);
- if (reg & CXL_PSL9_DSISR_An_TF)
- cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
- else
- cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
- }
- if (afu->adapter->native->sl_ops->register_serr_irq) {
- reg = cxl_p1n_read(afu, CXL_PSL_SERR_An);
- if (reg) {
- if (reg & ~0x000000007fffffff)
- dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg);
- cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff);
- }
- }
- reg = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
- if (reg) {
- dev_warn(&afu->dev, "AFU had pending error status: %#016llx\n", reg);
- cxl_p2n_write(afu, CXL_PSL_ErrStat_An, reg);
- }
-
- return 0;
-}
-
-static int sanitise_afu_regs_psl8(struct cxl_afu *afu)
-{
- u64 reg;
-
- /*
- * Clear out any regs that contain either an IVTE or address or may be
- * waiting on an acknowledgement to try to be a bit safer as we bring
- * it online
- */
- reg = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
- if ((reg & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
- dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#016llx\n", reg);
- if (cxl_ops->afu_reset(afu))
- return -EIO;
- if (cxl_afu_disable(afu))
- return -EIO;
- if (cxl_psl_purge(afu))
- return -EIO;
- }
- cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0x0000000000000000);
- cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, 0x0000000000000000);
- cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An, 0x0000000000000000);
- cxl_p1n_write(afu, CXL_PSL_AMBAR_An, 0x0000000000000000);
- cxl_p1n_write(afu, CXL_PSL_SPOffset_An, 0x0000000000000000);
- cxl_p1n_write(afu, CXL_HAURP_An, 0x0000000000000000);
- cxl_p2n_write(afu, CXL_CSRP_An, 0x0000000000000000);
- cxl_p2n_write(afu, CXL_AURP1_An, 0x0000000000000000);
- cxl_p2n_write(afu, CXL_AURP0_An, 0x0000000000000000);
- cxl_p2n_write(afu, CXL_SSTP1_An, 0x0000000000000000);
- cxl_p2n_write(afu, CXL_SSTP0_An, 0x0000000000000000);
- reg = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
- if (reg) {
- dev_warn(&afu->dev, "AFU had pending DSISR: %#016llx\n", reg);
- if (reg & CXL_PSL_DSISR_TRANS)
- cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
- else
- cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
- }
- if (afu->adapter->native->sl_ops->register_serr_irq) {
- reg = cxl_p1n_read(afu, CXL_PSL_SERR_An);
- if (reg) {
- if (reg & ~0xffff)
- dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg);
- cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff);
- }
- }
- reg = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
- if (reg) {
- dev_warn(&afu->dev, "AFU had pending error status: %#016llx\n", reg);
- cxl_p2n_write(afu, CXL_PSL_ErrStat_An, reg);
- }
-
- return 0;
-}
-
-#define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE
-/*
- * afu_eb_read:
- * Called from sysfs and reads the afu error info buffer. The h/w only supports
- * 4/8 bytes aligned access. So in case the requested offset/count arent 8 byte
- * aligned the function uses a bounce buffer which can be max PAGE_SIZE.
- */
-ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
- loff_t off, size_t count)
-{
- loff_t aligned_start, aligned_end;
- size_t aligned_length;
- void *tbuf;
- const void __iomem *ebuf = afu->native->afu_desc_mmio + afu->eb_offset;
-
- if (count == 0 || off < 0 || (size_t)off >= afu->eb_len)
- return 0;
-
- /* calculate aligned read window */
- count = min((size_t)(afu->eb_len - off), count);
- aligned_start = round_down(off, 8);
- aligned_end = round_up(off + count, 8);
- aligned_length = aligned_end - aligned_start;
-
- /* max we can copy in one read is PAGE_SIZE */
- if (aligned_length > ERR_BUFF_MAX_COPY_SIZE) {
- aligned_length = ERR_BUFF_MAX_COPY_SIZE;
- count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7);
- }
-
- /* use bounce buffer for copy */
- tbuf = (void *)__get_free_page(GFP_KERNEL);
- if (!tbuf)
- return -ENOMEM;
-
- /* perform aligned read from the mmio region */
- memcpy_fromio(tbuf, ebuf + aligned_start, aligned_length);
- memcpy(buf, tbuf + (off & 0x7), count);
-
- free_page((unsigned long)tbuf);
-
- return count;
-}
-
-static int pci_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev)
-{
- int rc;
-
- if ((rc = pci_map_slice_regs(afu, adapter, dev)))
- return rc;
-
- if (adapter->native->sl_ops->sanitise_afu_regs) {
- rc = adapter->native->sl_ops->sanitise_afu_regs(afu);
- if (rc)
- goto err1;
- }
-
- /* We need to reset the AFU before we can read the AFU descriptor */
- if ((rc = cxl_ops->afu_reset(afu)))
- goto err1;
-
- if (cxl_verbose)
- dump_afu_descriptor(afu);
-
- if ((rc = cxl_read_afu_descriptor(afu)))
- goto err1;
-
- if ((rc = cxl_afu_descriptor_looks_ok(afu)))
- goto err1;
-
- if (adapter->native->sl_ops->afu_regs_init)
- if ((rc = adapter->native->sl_ops->afu_regs_init(afu)))
- goto err1;
-
- if (adapter->native->sl_ops->register_serr_irq)
- if ((rc = adapter->native->sl_ops->register_serr_irq(afu)))
- goto err1;
-
- if ((rc = cxl_native_register_psl_irq(afu)))
- goto err2;
-
- atomic_set(&afu->configured_state, 0);
- return 0;
-
-err2:
- if (adapter->native->sl_ops->release_serr_irq)
- adapter->native->sl_ops->release_serr_irq(afu);
-err1:
- pci_unmap_slice_regs(afu);
- return rc;
-}
-
-static void pci_deconfigure_afu(struct cxl_afu *afu)
-{
- /*
- * It's okay to deconfigure when AFU is already locked, otherwise wait
- * until there are no readers
- */
- if (atomic_read(&afu->configured_state) != -1) {
- while (atomic_cmpxchg(&afu->configured_state, 0, -1) != -1)
- schedule();
- }
- cxl_native_release_psl_irq(afu);
- if (afu->adapter->native->sl_ops->release_serr_irq)
- afu->adapter->native->sl_ops->release_serr_irq(afu);
- pci_unmap_slice_regs(afu);
-}
-
-static int pci_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev)
-{
- struct cxl_afu *afu;
- int rc = -ENOMEM;
-
- afu = cxl_alloc_afu(adapter, slice);
- if (!afu)
- return -ENOMEM;
-
- afu->native = kzalloc(sizeof(struct cxl_afu_native), GFP_KERNEL);
- if (!afu->native)
- goto err_free_afu;
-
- mutex_init(&afu->native->spa_mutex);
-
- rc = dev_set_name(&afu->dev, "afu%i.%i", adapter->adapter_num, slice);
- if (rc)
- goto err_free_native;
-
- rc = pci_configure_afu(afu, adapter, dev);
- if (rc)
- goto err_free_native;
-
- /* Don't care if this fails */
- cxl_debugfs_afu_add(afu);
-
- /*
- * After we call this function we must not free the afu directly, even
- * if it returns an error!
- */
- if ((rc = cxl_register_afu(afu)))
- goto err_put_dev;
-
- if ((rc = cxl_sysfs_afu_add(afu)))
- goto err_del_dev;
-
- adapter->afu[afu->slice] = afu;
-
- if ((rc = cxl_pci_vphb_add(afu)))
- dev_info(&afu->dev, "Can't register vPHB\n");
-
- return 0;
-
-err_del_dev:
- device_del(&afu->dev);
-err_put_dev:
- pci_deconfigure_afu(afu);
- cxl_debugfs_afu_remove(afu);
- put_device(&afu->dev);
- return rc;
-
-err_free_native:
- kfree(afu->native);
-err_free_afu:
- kfree(afu);
- return rc;
-
-}
-
-static void cxl_pci_remove_afu(struct cxl_afu *afu)
-{
- pr_devel("%s\n", __func__);
-
- if (!afu)
- return;
-
- cxl_pci_vphb_remove(afu);
- cxl_sysfs_afu_remove(afu);
- cxl_debugfs_afu_remove(afu);
-
- spin_lock(&afu->adapter->afu_list_lock);
- afu->adapter->afu[afu->slice] = NULL;
- spin_unlock(&afu->adapter->afu_list_lock);
-
- cxl_context_detach_all(afu);
- cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
-
- pci_deconfigure_afu(afu);
- device_unregister(&afu->dev);
-}
-
-int cxl_pci_reset(struct cxl *adapter)
-{
- struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
- int rc;
-
- if (adapter->perst_same_image) {
- dev_warn(&dev->dev,
- "cxl: refusing to reset/reflash when perst_reloads_same_image is set.\n");
- return -EINVAL;
- }
-
- dev_info(&dev->dev, "CXL reset\n");
-
- /*
- * The adapter is about to be reset, so ignore errors.
- */
- cxl_data_cache_flush(adapter);
-
- /* pcie_warm_reset requests a fundamental pci reset which includes a
- * PERST assert/deassert. PERST triggers a loading of the image
- * if "user" or "factory" is selected in sysfs */
- if ((rc = pci_set_pcie_reset_state(dev, pcie_warm_reset))) {
- dev_err(&dev->dev, "cxl: pcie_warm_reset failed\n");
- return rc;
- }
-
- return rc;
-}
-
-static int cxl_map_adapter_regs(struct cxl *adapter, struct pci_dev *dev)
-{
- if (pci_request_region(dev, 2, "priv 2 regs"))
- goto err1;
- if (pci_request_region(dev, 0, "priv 1 regs"))
- goto err2;
-
- pr_devel("cxl_map_adapter_regs: p1: %#016llx %#llx, p2: %#016llx %#llx",
- p1_base(dev), p1_size(dev), p2_base(dev), p2_size(dev));
-
- if (!(adapter->native->p1_mmio = ioremap(p1_base(dev), p1_size(dev))))
- goto err3;
-
- if (!(adapter->native->p2_mmio = ioremap(p2_base(dev), p2_size(dev))))
- goto err4;
-
- return 0;
-
-err4:
- iounmap(adapter->native->p1_mmio);
- adapter->native->p1_mmio = NULL;
-err3:
- pci_release_region(dev, 0);
-err2:
- pci_release_region(dev, 2);
-err1:
- return -ENOMEM;
-}
-
-static void cxl_unmap_adapter_regs(struct cxl *adapter)
-{
- if (adapter->native->p1_mmio) {
- iounmap(adapter->native->p1_mmio);
- adapter->native->p1_mmio = NULL;
- pci_release_region(to_pci_dev(adapter->dev.parent), 2);
- }
- if (adapter->native->p2_mmio) {
- iounmap(adapter->native->p2_mmio);
- adapter->native->p2_mmio = NULL;
- pci_release_region(to_pci_dev(adapter->dev.parent), 0);
- }
-}
-
-static int cxl_read_vsec(struct cxl *adapter, struct pci_dev *dev)
-{
- int vsec;
- u32 afu_desc_off, afu_desc_size;
- u32 ps_off, ps_size;
- u16 vseclen;
- u8 image_state;
-
- if (!(vsec = find_cxl_vsec(dev))) {
- dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
- return -ENODEV;
- }
-
- CXL_READ_VSEC_LENGTH(dev, vsec, &vseclen);
- if (vseclen < CXL_VSEC_MIN_SIZE) {
- dev_err(&dev->dev, "ABORTING: CXL VSEC too short\n");
- return -EINVAL;
- }
-
- CXL_READ_VSEC_STATUS(dev, vsec, &adapter->vsec_status);
- CXL_READ_VSEC_PSL_REVISION(dev, vsec, &adapter->psl_rev);
- CXL_READ_VSEC_CAIA_MAJOR(dev, vsec, &adapter->caia_major);
- CXL_READ_VSEC_CAIA_MINOR(dev, vsec, &adapter->caia_minor);
- CXL_READ_VSEC_BASE_IMAGE(dev, vsec, &adapter->base_image);
- CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state);
- adapter->user_image_loaded = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
- adapter->perst_select_user = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
- adapter->perst_loads_image = !!(image_state & CXL_VSEC_PERST_LOADS_IMAGE);
-
- CXL_READ_VSEC_NAFUS(dev, vsec, &adapter->slices);
- CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, &afu_desc_off);
- CXL_READ_VSEC_AFU_DESC_SIZE(dev, vsec, &afu_desc_size);
- CXL_READ_VSEC_PS_OFF(dev, vsec, &ps_off);
- CXL_READ_VSEC_PS_SIZE(dev, vsec, &ps_size);
-
- /* Convert everything to bytes, because there is NO WAY I'd look at the
- * code a month later and forget what units these are in ;-) */
- adapter->native->ps_off = ps_off * 64 * 1024;
- adapter->ps_size = ps_size * 64 * 1024;
- adapter->native->afu_desc_off = afu_desc_off * 64 * 1024;
- adapter->native->afu_desc_size = afu_desc_size * 64 * 1024;
-
- /* Total IRQs - 1 PSL ERROR - #AFU*(1 slice error + 1 DSI) */
- adapter->user_irqs = pnv_cxl_get_irq_count(dev) - 1 - 2*adapter->slices;
-
- return 0;
-}
-
-/*
- * Workaround a PCIe Host Bridge defect on some cards, that can cause
- * malformed Transaction Layer Packet (TLP) errors to be erroneously
- * reported. Mask this error in the Uncorrectable Error Mask Register.
- *
- * The upper nibble of the PSL revision is used to distinguish between
- * different cards. The affected ones have it set to 0.
- */
-static void cxl_fixup_malformed_tlp(struct cxl *adapter, struct pci_dev *dev)
-{
- int aer;
- u32 data;
-
- if (adapter->psl_rev & 0xf000)
- return;
- if (!(aer = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)))
- return;
- pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, &data);
- if (data & PCI_ERR_UNC_MALF_TLP)
- if (data & PCI_ERR_UNC_INTN)
- return;
- data |= PCI_ERR_UNC_MALF_TLP;
- data |= PCI_ERR_UNC_INTN;
- pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, data);
-}
-
-static bool cxl_compatible_caia_version(struct cxl *adapter)
-{
- if (cxl_is_power8() && (adapter->caia_major == 1))
- return true;
-
- if (cxl_is_power9() && (adapter->caia_major == 2))
- return true;
-
- return false;
-}
-
-static int cxl_vsec_looks_ok(struct cxl *adapter, struct pci_dev *dev)
-{
- if (adapter->vsec_status & CXL_STATUS_SECOND_PORT)
- return -EBUSY;
-
- if (adapter->vsec_status & CXL_UNSUPPORTED_FEATURES) {
- dev_err(&dev->dev, "ABORTING: CXL requires unsupported features\n");
- return -EINVAL;
- }
-
- if (!cxl_compatible_caia_version(adapter)) {
- dev_info(&dev->dev, "Ignoring card. PSL type is not supported (caia version: %d)\n",
- adapter->caia_major);
- return -ENODEV;
- }
-
- if (!adapter->slices) {
- /* Once we support dynamic reprogramming we can use the card if
- * it supports loadable AFUs */
- dev_err(&dev->dev, "ABORTING: Device has no AFUs\n");
- return -EINVAL;
- }
-
- if (!adapter->native->afu_desc_off || !adapter->native->afu_desc_size) {
- dev_err(&dev->dev, "ABORTING: VSEC shows no AFU descriptors\n");
- return -EINVAL;
- }
-
- if (adapter->ps_size > p2_size(dev) - adapter->native->ps_off) {
- dev_err(&dev->dev, "ABORTING: Problem state size larger than "
- "available in BAR2: 0x%llx > 0x%llx\n",
- adapter->ps_size, p2_size(dev) - adapter->native->ps_off);
- return -EINVAL;
- }
-
- return 0;
-}
-
-ssize_t cxl_pci_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len)
-{
- return pci_read_vpd(to_pci_dev(adapter->dev.parent), 0, len, buf);
-}
-
-static void cxl_release_adapter(struct device *dev)
-{
- struct cxl *adapter = to_cxl_adapter(dev);
-
- pr_devel("cxl_release_adapter\n");
-
- cxl_remove_adapter_nr(adapter);
-
- kfree(adapter->native);
- kfree(adapter);
-}
-
-#define CXL_PSL_ErrIVTE_tberror (0x1ull << (63-31))
-
-static int sanitise_adapter_regs(struct cxl *adapter)
-{
- int rc = 0;
-
- /* Clear PSL tberror bit by writing 1 to it */
- cxl_p1_write(adapter, CXL_PSL_ErrIVTE, CXL_PSL_ErrIVTE_tberror);
-
- if (adapter->native->sl_ops->invalidate_all) {
- /* do not invalidate ERAT entries when not reloading on PERST */
- if (cxl_is_power9() && (adapter->perst_loads_image))
- return 0;
- rc = adapter->native->sl_ops->invalidate_all(adapter);
- }
-
- return rc;
-}
-
-/* This should contain *only* operations that can safely be done in
- * both creation and recovery.
- */
-static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev)
-{
- int rc;
-
- adapter->dev.parent = &dev->dev;
- adapter->dev.release = cxl_release_adapter;
- pci_set_drvdata(dev, adapter);
-
- rc = pci_enable_device(dev);
- if (rc) {
- dev_err(&dev->dev, "pci_enable_device failed: %i\n", rc);
- return rc;
- }
-
- if ((rc = cxl_read_vsec(adapter, dev)))
- return rc;
-
- if ((rc = cxl_vsec_looks_ok(adapter, dev)))
- return rc;
-
- cxl_fixup_malformed_tlp(adapter, dev);
-
- if ((rc = setup_cxl_bars(dev)))
- return rc;
-
- if ((rc = switch_card_to_cxl(dev)))
- return rc;
-
- if ((rc = cxl_update_image_control(adapter)))
- return rc;
-
- if ((rc = cxl_map_adapter_regs(adapter, dev)))
- return rc;
-
- if ((rc = sanitise_adapter_regs(adapter)))
- goto err;
-
- if ((rc = adapter->native->sl_ops->adapter_regs_init(adapter, dev)))
- goto err;
-
- /* Required for devices using CAPP DMA mode, harmless for others */
- pci_set_master(dev);
-
- adapter->tunneled_ops_supported = false;
-
- if (cxl_is_power9()) {
- if (pnv_pci_set_tunnel_bar(dev, 0x00020000E0000000ull, 1))
- dev_info(&dev->dev, "Tunneled operations unsupported\n");
- else
- adapter->tunneled_ops_supported = true;
- }
-
- if ((rc = pnv_phb_to_cxl_mode(dev, adapter->native->sl_ops->capi_mode)))
- goto err;
-
- /* If recovery happened, the last step is to turn on snooping.
- * In the non-recovery case this has no effect */
- if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_ON)))
- goto err;
-
- /* Ignore error, adapter init is not dependant on timebase sync */
- cxl_setup_psl_timebase(adapter, dev);
-
- if ((rc = cxl_native_register_psl_err_irq(adapter)))
- goto err;
-
- return 0;
-
-err:
- cxl_unmap_adapter_regs(adapter);
- return rc;
-
-}
-
-static void cxl_deconfigure_adapter(struct cxl *adapter)
-{
- struct pci_dev *pdev = to_pci_dev(adapter->dev.parent);
-
- if (cxl_is_power9())
- pnv_pci_set_tunnel_bar(pdev, 0x00020000E0000000ull, 0);
-
- cxl_native_release_psl_err_irq(adapter);
- cxl_unmap_adapter_regs(adapter);
-
- pci_disable_device(pdev);
-}
-
-static void cxl_stop_trace_psl9(struct cxl *adapter)
-{
- int traceid;
- u64 trace_state, trace_mask;
- struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
-
- /* read each tracearray state and issue mmio to stop them is needed */
- for (traceid = 0; traceid <= CXL_PSL9_TRACEID_MAX; ++traceid) {
- trace_state = cxl_p1_read(adapter, CXL_PSL9_CTCCFG);
- trace_mask = (0x3ULL << (62 - traceid * 2));
- trace_state = (trace_state & trace_mask) >> (62 - traceid * 2);
- dev_dbg(&dev->dev, "cxl: Traceid-%d trace_state=0x%0llX\n",
- traceid, trace_state);
-
- /* issue mmio if the trace array isn't in FIN state */
- if (trace_state != CXL_PSL9_TRACESTATE_FIN)
- cxl_p1_write(adapter, CXL_PSL9_TRACECFG,
- 0x8400000000000000ULL | traceid);
- }
-}
-
-static void cxl_stop_trace_psl8(struct cxl *adapter)
-{
- int slice;
-
- /* Stop the trace */
- cxl_p1_write(adapter, CXL_PSL_TRACE, 0x8000000000000017LL);
-
- /* Stop the slice traces */
- spin_lock(&adapter->afu_list_lock);
- for (slice = 0; slice < adapter->slices; slice++) {
- if (adapter->afu[slice])
- cxl_p1n_write(adapter->afu[slice], CXL_PSL_SLICE_TRACE,
- 0x8000000000000000LL);
- }
- spin_unlock(&adapter->afu_list_lock);
-}
-
-static const struct cxl_service_layer_ops psl9_ops = {
- .adapter_regs_init = init_implementation_adapter_regs_psl9,
- .invalidate_all = cxl_invalidate_all_psl9,
- .afu_regs_init = init_implementation_afu_regs_psl9,
- .sanitise_afu_regs = sanitise_afu_regs_psl9,
- .register_serr_irq = cxl_native_register_serr_irq,
- .release_serr_irq = cxl_native_release_serr_irq,
- .handle_interrupt = cxl_irq_psl9,
- .fail_irq = cxl_fail_irq_psl,
- .activate_dedicated_process = cxl_activate_dedicated_process_psl9,
- .attach_afu_directed = cxl_attach_afu_directed_psl9,
- .attach_dedicated_process = cxl_attach_dedicated_process_psl9,
- .update_dedicated_ivtes = cxl_update_dedicated_ivtes_psl9,
- .debugfs_add_adapter_regs = cxl_debugfs_add_adapter_regs_psl9,
- .debugfs_add_afu_regs = cxl_debugfs_add_afu_regs_psl9,
- .psl_irq_dump_registers = cxl_native_irq_dump_regs_psl9,
- .err_irq_dump_registers = cxl_native_err_irq_dump_regs_psl9,
- .debugfs_stop_trace = cxl_stop_trace_psl9,
- .timebase_read = timebase_read_psl9,
- .capi_mode = OPAL_PHB_CAPI_MODE_CAPI,
- .needs_reset_before_disable = true,
-};
-
-static const struct cxl_service_layer_ops psl8_ops = {
- .adapter_regs_init = init_implementation_adapter_regs_psl8,
- .invalidate_all = cxl_invalidate_all_psl8,
- .afu_regs_init = init_implementation_afu_regs_psl8,
- .sanitise_afu_regs = sanitise_afu_regs_psl8,
- .register_serr_irq = cxl_native_register_serr_irq,
- .release_serr_irq = cxl_native_release_serr_irq,
- .handle_interrupt = cxl_irq_psl8,
- .fail_irq = cxl_fail_irq_psl,
- .activate_dedicated_process = cxl_activate_dedicated_process_psl8,
- .attach_afu_directed = cxl_attach_afu_directed_psl8,
- .attach_dedicated_process = cxl_attach_dedicated_process_psl8,
- .update_dedicated_ivtes = cxl_update_dedicated_ivtes_psl8,
- .debugfs_add_adapter_regs = cxl_debugfs_add_adapter_regs_psl8,
- .debugfs_add_afu_regs = cxl_debugfs_add_afu_regs_psl8,
- .psl_irq_dump_registers = cxl_native_irq_dump_regs_psl8,
- .err_irq_dump_registers = cxl_native_err_irq_dump_regs_psl8,
- .debugfs_stop_trace = cxl_stop_trace_psl8,
- .write_timebase_ctrl = write_timebase_ctrl_psl8,
- .timebase_read = timebase_read_psl8,
- .capi_mode = OPAL_PHB_CAPI_MODE_CAPI,
- .needs_reset_before_disable = true,
-};
-
-static void set_sl_ops(struct cxl *adapter, struct pci_dev *dev)
-{
- if (cxl_is_power8()) {
- dev_info(&dev->dev, "Device uses a PSL8\n");
- adapter->native->sl_ops = &psl8_ops;
- } else {
- dev_info(&dev->dev, "Device uses a PSL9\n");
- adapter->native->sl_ops = &psl9_ops;
- }
-}
-
-
-static struct cxl *cxl_pci_init_adapter(struct pci_dev *dev)
-{
- struct cxl *adapter;
- int rc;
-
- adapter = cxl_alloc_adapter();
- if (!adapter)
- return ERR_PTR(-ENOMEM);
-
- adapter->native = kzalloc(sizeof(struct cxl_native), GFP_KERNEL);
- if (!adapter->native) {
- rc = -ENOMEM;
- goto err_release;
- }
-
- set_sl_ops(adapter, dev);
-
- /* Set defaults for parameters which need to persist over
- * configure/reconfigure
- */
- adapter->perst_loads_image = true;
- adapter->perst_same_image = false;
-
- rc = cxl_configure_adapter(adapter, dev);
- if (rc) {
- pci_disable_device(dev);
- goto err_release;
- }
-
- /* Don't care if this one fails: */
- cxl_debugfs_adapter_add(adapter);
-
- /*
- * After we call this function we must not free the adapter directly,
- * even if it returns an error!
- */
- if ((rc = cxl_register_adapter(adapter)))
- goto err_put_dev;
-
- if ((rc = cxl_sysfs_adapter_add(adapter)))
- goto err_del_dev;
-
- /* Release the context lock as adapter is configured */
- cxl_adapter_context_unlock(adapter);
-
- return adapter;
-
-err_del_dev:
- device_del(&adapter->dev);
-err_put_dev:
- /* This should mirror cxl_remove_adapter, except without the
- * sysfs parts
- */
- cxl_debugfs_adapter_remove(adapter);
- cxl_deconfigure_adapter(adapter);
- put_device(&adapter->dev);
- return ERR_PTR(rc);
-
-err_release:
- cxl_release_adapter(&adapter->dev);
- return ERR_PTR(rc);
-}
-
-static void cxl_pci_remove_adapter(struct cxl *adapter)
-{
- pr_devel("cxl_remove_adapter\n");
-
- cxl_sysfs_adapter_remove(adapter);
- cxl_debugfs_adapter_remove(adapter);
-
- /*
- * Flush adapter datacache as its about to be removed.
- */
- cxl_data_cache_flush(adapter);
-
- cxl_deconfigure_adapter(adapter);
-
- device_unregister(&adapter->dev);
-}
-
-#define CXL_MAX_PCIEX_PARENT 2
-
-int cxl_slot_is_switched(struct pci_dev *dev)
-{
- struct device_node *np;
- int depth = 0;
-
- if (!(np = pci_device_to_OF_node(dev))) {
- pr_err("cxl: np = NULL\n");
- return -ENODEV;
- }
- of_node_get(np);
- while (np) {
- np = of_get_next_parent(np);
- if (!of_node_is_type(np, "pciex"))
- break;
- depth++;
- }
- of_node_put(np);
- return (depth > CXL_MAX_PCIEX_PARENT);
-}
-
-static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
-{
- struct cxl *adapter;
- int slice;
- int rc;
-
- dev_err_once(&dev->dev, "DEPRECATED: cxl is deprecated and will be removed in a future kernel release\n");
-
- if (cxl_pci_is_vphb_device(dev)) {
- dev_dbg(&dev->dev, "cxl_init_adapter: Ignoring cxl vphb device\n");
- return -ENODEV;
- }
-
- if (cxl_slot_is_switched(dev)) {
- dev_info(&dev->dev, "Ignoring card on incompatible PCI slot\n");
- return -ENODEV;
- }
-
- if (cxl_is_power9() && !radix_enabled()) {
- dev_info(&dev->dev, "Only Radix mode supported\n");
- return -ENODEV;
- }
-
- if (cxl_verbose)
- dump_cxl_config_space(dev);
-
- adapter = cxl_pci_init_adapter(dev);
- if (IS_ERR(adapter)) {
- dev_err(&dev->dev, "cxl_init_adapter failed: %li\n", PTR_ERR(adapter));
- return PTR_ERR(adapter);
- }
-
- for (slice = 0; slice < adapter->slices; slice++) {
- if ((rc = pci_init_afu(adapter, slice, dev))) {
- dev_err(&dev->dev, "AFU %i failed to initialise: %i\n", slice, rc);
- continue;
- }
-
- rc = cxl_afu_select_best_mode(adapter->afu[slice]);
- if (rc)
- dev_err(&dev->dev, "AFU %i failed to start: %i\n", slice, rc);
- }
-
- return 0;
-}
-
-static void cxl_remove(struct pci_dev *dev)
-{
- struct cxl *adapter = pci_get_drvdata(dev);
- struct cxl_afu *afu;
- int i;
-
- /*
- * Lock to prevent someone grabbing a ref through the adapter list as
- * we are removing it
- */
- for (i = 0; i < adapter->slices; i++) {
- afu = adapter->afu[i];
- cxl_pci_remove_afu(afu);
- }
- cxl_pci_remove_adapter(adapter);
-}
-
-static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu,
- pci_channel_state_t state)
-{
- struct pci_dev *afu_dev;
- struct pci_driver *afu_drv;
- const struct pci_error_handlers *err_handler;
- pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET;
- pci_ers_result_t afu_result = PCI_ERS_RESULT_NEED_RESET;
-
- /* There should only be one entry, but go through the list
- * anyway
- */
- if (afu == NULL || afu->phb == NULL)
- return result;
-
- list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
- afu_drv = to_pci_driver(afu_dev->dev.driver);
- if (!afu_drv)
- continue;
-
- afu_dev->error_state = state;
-
- err_handler = afu_drv->err_handler;
- if (err_handler)
- afu_result = err_handler->error_detected(afu_dev,
- state);
- /* Disconnect trumps all, NONE trumps NEED_RESET */
- if (afu_result == PCI_ERS_RESULT_DISCONNECT)
- result = PCI_ERS_RESULT_DISCONNECT;
- else if ((afu_result == PCI_ERS_RESULT_NONE) &&
- (result == PCI_ERS_RESULT_NEED_RESET))
- result = PCI_ERS_RESULT_NONE;
- }
- return result;
-}
-
-static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
-{
- struct cxl *adapter = pci_get_drvdata(pdev);
- struct cxl_afu *afu;
- pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET;
- pci_ers_result_t afu_result = PCI_ERS_RESULT_NEED_RESET;
- int i;
-
- /* At this point, we could still have an interrupt pending.
- * Let's try to get them out of the way before they do
- * anything we don't like.
- */
- schedule();
-
- /* If we're permanently dead, give up. */
- if (state == pci_channel_io_perm_failure) {
- spin_lock(&adapter->afu_list_lock);
- for (i = 0; i < adapter->slices; i++) {
- afu = adapter->afu[i];
- /*
- * Tell the AFU drivers; but we don't care what they
- * say, we're going away.
- */
- cxl_vphb_error_detected(afu, state);
- }
- spin_unlock(&adapter->afu_list_lock);
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- /* Are we reflashing?
- *
- * If we reflash, we could come back as something entirely
- * different, including a non-CAPI card. As such, by default
- * we don't participate in the process. We'll be unbound and
- * the slot re-probed. (TODO: check EEH doesn't blindly rebind
- * us!)
- *
- * However, this isn't the entire story: for reliablity
- * reasons, we usually want to reflash the FPGA on PERST in
- * order to get back to a more reliable known-good state.
- *
- * This causes us a bit of a problem: if we reflash we can't
- * trust that we'll come back the same - we could have a new
- * image and been PERSTed in order to load that
- * image. However, most of the time we actually *will* come
- * back the same - for example a regular EEH event.
- *
- * Therefore, we allow the user to assert that the image is
- * indeed the same and that we should continue on into EEH
- * anyway.
- */
- if (adapter->perst_loads_image && !adapter->perst_same_image) {
- /* TODO take the PHB out of CXL mode */
- dev_info(&pdev->dev, "reflashing, so opting out of EEH!\n");
- return PCI_ERS_RESULT_NONE;
- }
-
- /*
- * At this point, we want to try to recover. We'll always
- * need a complete slot reset: we don't trust any other reset.
- *
- * Now, we go through each AFU:
- * - We send the driver, if bound, an error_detected callback.
- * We expect it to clean up, but it can also tell us to give
- * up and permanently detach the card. To simplify things, if
- * any bound AFU driver doesn't support EEH, we give up on EEH.
- *
- * - We detach all contexts associated with the AFU. This
- * does not free them, but puts them into a CLOSED state
- * which causes any the associated files to return useful
- * errors to userland. It also unmaps, but does not free,
- * any IRQs.
- *
- * - We clean up our side: releasing and unmapping resources we hold
- * so we can wire them up again when the hardware comes back up.
- *
- * Driver authors should note:
- *
- * - Any contexts you create in your kernel driver (except
- * those associated with anonymous file descriptors) are
- * your responsibility to free and recreate. Likewise with
- * any attached resources.
- *
- * - We will take responsibility for re-initialising the
- * device context (the one set up for you in
- * cxl_pci_enable_device_hook and accessed through
- * cxl_get_context). If you've attached IRQs or other
- * resources to it, they remains yours to free.
- *
- * You can call the same functions to release resources as you
- * normally would: we make sure that these functions continue
- * to work when the hardware is down.
- *
- * Two examples:
- *
- * 1) If you normally free all your resources at the end of
- * each request, or if you use anonymous FDs, your
- * error_detected callback can simply set a flag to tell
- * your driver not to start any new calls. You can then
- * clear the flag in the resume callback.
- *
- * 2) If you normally allocate your resources on startup:
- * * Set a flag in error_detected as above.
- * * Let CXL detach your contexts.
- * * In slot_reset, free the old resources and allocate new ones.
- * * In resume, clear the flag to allow things to start.
- */
-
- /* Make sure no one else changes the afu list */
- spin_lock(&adapter->afu_list_lock);
-
- for (i = 0; i < adapter->slices; i++) {
- afu = adapter->afu[i];
-
- if (afu == NULL)
- continue;
-
- afu_result = cxl_vphb_error_detected(afu, state);
- cxl_context_detach_all(afu);
- cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
- pci_deconfigure_afu(afu);
-
- /* Disconnect trumps all, NONE trumps NEED_RESET */
- if (afu_result == PCI_ERS_RESULT_DISCONNECT)
- result = PCI_ERS_RESULT_DISCONNECT;
- else if ((afu_result == PCI_ERS_RESULT_NONE) &&
- (result == PCI_ERS_RESULT_NEED_RESET))
- result = PCI_ERS_RESULT_NONE;
- }
- spin_unlock(&adapter->afu_list_lock);
-
- /* should take the context lock here */
- if (cxl_adapter_context_lock(adapter) != 0)
- dev_warn(&adapter->dev,
- "Couldn't take context lock with %d active-contexts\n",
- atomic_read(&adapter->contexts_num));
-
- cxl_deconfigure_adapter(adapter);
-
- return result;
-}
-
-static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
-{
- struct cxl *adapter = pci_get_drvdata(pdev);
- struct cxl_afu *afu;
- struct cxl_context *ctx;
- struct pci_dev *afu_dev;
- struct pci_driver *afu_drv;
- const struct pci_error_handlers *err_handler;
- pci_ers_result_t afu_result = PCI_ERS_RESULT_RECOVERED;
- pci_ers_result_t result = PCI_ERS_RESULT_RECOVERED;
- int i;
-
- if (cxl_configure_adapter(adapter, pdev))
- goto err;
-
- /*
- * Unlock context activation for the adapter. Ideally this should be
- * done in cxl_pci_resume but cxlflash module tries to activate the
- * master context as part of slot_reset callback.
- */
- cxl_adapter_context_unlock(adapter);
-
- spin_lock(&adapter->afu_list_lock);
- for (i = 0; i < adapter->slices; i++) {
- afu = adapter->afu[i];
-
- if (afu == NULL)
- continue;
-
- if (pci_configure_afu(afu, adapter, pdev))
- goto err_unlock;
-
- if (cxl_afu_select_best_mode(afu))
- goto err_unlock;
-
- if (afu->phb == NULL)
- continue;
-
- list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
- /* Reset the device context.
- * TODO: make this less disruptive
- */
- ctx = cxl_get_context(afu_dev);
-
- if (ctx && cxl_release_context(ctx))
- goto err_unlock;
-
- ctx = cxl_dev_context_init(afu_dev);
- if (IS_ERR(ctx))
- goto err_unlock;
-
- afu_dev->dev.archdata.cxl_ctx = ctx;
-
- if (cxl_ops->afu_check_and_enable(afu))
- goto err_unlock;
-
- afu_dev->error_state = pci_channel_io_normal;
-
- /* If there's a driver attached, allow it to
- * chime in on recovery. Drivers should check
- * if everything has come back OK, but
- * shouldn't start new work until we call
- * their resume function.
- */
- afu_drv = to_pci_driver(afu_dev->dev.driver);
- if (!afu_drv)
- continue;
-
- err_handler = afu_drv->err_handler;
- if (err_handler && err_handler->slot_reset)
- afu_result = err_handler->slot_reset(afu_dev);
-
- if (afu_result == PCI_ERS_RESULT_DISCONNECT)
- result = PCI_ERS_RESULT_DISCONNECT;
- }
- }
-
- spin_unlock(&adapter->afu_list_lock);
- return result;
-
-err_unlock:
- spin_unlock(&adapter->afu_list_lock);
-
-err:
- /* All the bits that happen in both error_detected and cxl_remove
- * should be idempotent, so we don't need to worry about leaving a mix
- * of unconfigured and reconfigured resources.
- */
- dev_err(&pdev->dev, "EEH recovery failed. Asking to be disconnected.\n");
- return PCI_ERS_RESULT_DISCONNECT;
-}
-
-static void cxl_pci_resume(struct pci_dev *pdev)
-{
- struct cxl *adapter = pci_get_drvdata(pdev);
- struct cxl_afu *afu;
- struct pci_dev *afu_dev;
- struct pci_driver *afu_drv;
- const struct pci_error_handlers *err_handler;
- int i;
-
- /* Everything is back now. Drivers should restart work now.
- * This is not the place to be checking if everything came back up
- * properly, because there's no return value: do that in slot_reset.
- */
- spin_lock(&adapter->afu_list_lock);
- for (i = 0; i < adapter->slices; i++) {
- afu = adapter->afu[i];
-
- if (afu == NULL || afu->phb == NULL)
- continue;
-
- list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
- afu_drv = to_pci_driver(afu_dev->dev.driver);
- if (!afu_drv)
- continue;
-
- err_handler = afu_drv->err_handler;
- if (err_handler && err_handler->resume)
- err_handler->resume(afu_dev);
- }
- }
- spin_unlock(&adapter->afu_list_lock);
-}
-
-static const struct pci_error_handlers cxl_err_handler = {
- .error_detected = cxl_pci_error_detected,
- .slot_reset = cxl_pci_slot_reset,
- .resume = cxl_pci_resume,
-};
-
-struct pci_driver cxl_pci_driver = {
- .name = "cxl-pci",
- .id_table = cxl_pci_tbl,
- .probe = cxl_probe,
- .remove = cxl_remove,
- .shutdown = cxl_remove,
- .err_handler = &cxl_err_handler,
-};
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
deleted file mode 100644
index b1fc6446bd4b..000000000000
--- a/drivers/misc/cxl/sysfs.c
+++ /dev/null
@@ -1,771 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2014 IBM Corp.
- */
-
-#include <linux/kernel.h>
-#include <linux/device.h>
-#include <linux/sysfs.h>
-#include <linux/pci_regs.h>
-
-#include "cxl.h"
-
-#define to_afu_chardev_m(d) dev_get_drvdata(d)
-
-/********* Adapter attributes **********************************************/
-
-static ssize_t caia_version_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxl *adapter = to_cxl_adapter(device);
-
- return scnprintf(buf, PAGE_SIZE, "%i.%i\n", adapter->caia_major,
- adapter->caia_minor);
-}
-
-static ssize_t psl_revision_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxl *adapter = to_cxl_adapter(device);
-
- return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_rev);
-}
-
-static ssize_t base_image_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxl *adapter = to_cxl_adapter(device);
-
- return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->base_image);
-}
-
-static ssize_t image_loaded_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxl *adapter = to_cxl_adapter(device);
-
- if (adapter->user_image_loaded)
- return scnprintf(buf, PAGE_SIZE, "user\n");
- return scnprintf(buf, PAGE_SIZE, "factory\n");
-}
-
-static ssize_t psl_timebase_synced_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxl *adapter = to_cxl_adapter(device);
- u64 psl_tb, delta;
-
- /* Recompute the status only in native mode */
- if (cpu_has_feature(CPU_FTR_HVMODE)) {
- psl_tb = adapter->native->sl_ops->timebase_read(adapter);
- delta = abs(mftb() - psl_tb);
-
- /* CORE TB and PSL TB difference <= 16usecs ? */
- adapter->psl_timebase_synced = (tb_to_ns(delta) < 16000) ? true : false;
- pr_devel("PSL timebase %s - delta: 0x%016llx\n",
- (tb_to_ns(delta) < 16000) ? "synchronized" :
- "not synchronized", tb_to_ns(delta));
- }
- return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_timebase_synced);
-}
-
-static ssize_t tunneled_ops_supported_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxl *adapter = to_cxl_adapter(device);
-
- return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->tunneled_ops_supported);
-}
-
-static ssize_t reset_adapter_store(struct device *device,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cxl *adapter = to_cxl_adapter(device);
- int rc;
- int val;
-
- rc = sscanf(buf, "%i", &val);
- if ((rc != 1) || (val != 1 && val != -1))
- return -EINVAL;
-
- /*
- * See if we can lock the context mapping that's only allowed
- * when there are no contexts attached to the adapter. Once
- * taken this will also prevent any context from getting activated.
- */
- if (val == 1) {
- rc = cxl_adapter_context_lock(adapter);
- if (rc)
- goto out;
-
- rc = cxl_ops->adapter_reset(adapter);
- /* In case reset failed release context lock */
- if (rc)
- cxl_adapter_context_unlock(adapter);
-
- } else if (val == -1) {
- /* Perform a forced adapter reset */
- rc = cxl_ops->adapter_reset(adapter);
- }
-
-out:
- return rc ? rc : count;
-}
-
-static ssize_t load_image_on_perst_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxl *adapter = to_cxl_adapter(device);
-
- if (!adapter->perst_loads_image)
- return scnprintf(buf, PAGE_SIZE, "none\n");
-
- if (adapter->perst_select_user)
- return scnprintf(buf, PAGE_SIZE, "user\n");
- return scnprintf(buf, PAGE_SIZE, "factory\n");
-}
-
-static ssize_t load_image_on_perst_store(struct device *device,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cxl *adapter = to_cxl_adapter(device);
- int rc;
-
- if (!strncmp(buf, "none", 4))
- adapter->perst_loads_image = false;
- else if (!strncmp(buf, "user", 4)) {
- adapter->perst_select_user = true;
- adapter->perst_loads_image = true;
- } else if (!strncmp(buf, "factory", 7)) {
- adapter->perst_select_user = false;
- adapter->perst_loads_image = true;
- } else
- return -EINVAL;
-
- if ((rc = cxl_update_image_control(adapter)))
- return rc;
-
- return count;
-}
-
-static ssize_t perst_reloads_same_image_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxl *adapter = to_cxl_adapter(device);
-
- return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->perst_same_image);
-}
-
-static ssize_t perst_reloads_same_image_store(struct device *device,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cxl *adapter = to_cxl_adapter(device);
- int rc;
- int val;
-
- rc = sscanf(buf, "%i", &val);
- if ((rc != 1) || !(val == 1 || val == 0))
- return -EINVAL;
-
- adapter->perst_same_image = (val == 1);
- return count;
-}
-
-static struct device_attribute adapter_attrs[] = {
- __ATTR_RO(caia_version),
- __ATTR_RO(psl_revision),
- __ATTR_RO(base_image),
- __ATTR_RO(image_loaded),
- __ATTR_RO(psl_timebase_synced),
- __ATTR_RO(tunneled_ops_supported),
- __ATTR_RW(load_image_on_perst),
- __ATTR_RW(perst_reloads_same_image),
- __ATTR(reset, S_IWUSR, NULL, reset_adapter_store),
-};
-
-
-/********* AFU master specific attributes **********************************/
-
-static ssize_t mmio_size_show_master(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxl_afu *afu = to_afu_chardev_m(device);
-
- return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->adapter->ps_size);
-}
-
-static ssize_t pp_mmio_off_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxl_afu *afu = to_afu_chardev_m(device);
-
- return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->native->pp_offset);
-}
-
-static ssize_t pp_mmio_len_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxl_afu *afu = to_afu_chardev_m(device);
-
- return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_size);
-}
-
-static struct device_attribute afu_master_attrs[] = {
- __ATTR(mmio_size, S_IRUGO, mmio_size_show_master, NULL),
- __ATTR_RO(pp_mmio_off),
- __ATTR_RO(pp_mmio_len),
-};
-
-
-/********* AFU attributes **************************************************/
-
-static ssize_t mmio_size_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxl_afu *afu = to_cxl_afu(device);
-
- if (afu->pp_size)
- return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_size);
- return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->adapter->ps_size);
-}
-
-static ssize_t reset_store_afu(struct device *device,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cxl_afu *afu = to_cxl_afu(device);
- int rc;
-
- /* Not safe to reset if it is currently in use */
- mutex_lock(&afu->contexts_lock);
- if (!idr_is_empty(&afu->contexts_idr)) {
- rc = -EBUSY;
- goto err;
- }
-
- if ((rc = cxl_ops->afu_reset(afu)))
- goto err;
-
- rc = count;
-err:
- mutex_unlock(&afu->contexts_lock);
- return rc;
-}
-
-static ssize_t irqs_min_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxl_afu *afu = to_cxl_afu(device);
-
- return scnprintf(buf, PAGE_SIZE, "%i\n", afu->pp_irqs);
-}
-
-static ssize_t irqs_max_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxl_afu *afu = to_cxl_afu(device);
-
- return scnprintf(buf, PAGE_SIZE, "%i\n", afu->irqs_max);
-}
-
-static ssize_t irqs_max_store(struct device *device,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cxl_afu *afu = to_cxl_afu(device);
- ssize_t ret;
- int irqs_max;
-
- ret = sscanf(buf, "%i", &irqs_max);
- if (ret != 1)
- return -EINVAL;
-
- if (irqs_max < afu->pp_irqs)
- return -EINVAL;
-
- if (cpu_has_feature(CPU_FTR_HVMODE)) {
- if (irqs_max > afu->adapter->user_irqs)
- return -EINVAL;
- } else {
- /* pHyp sets a per-AFU limit */
- if (irqs_max > afu->guest->max_ints)
- return -EINVAL;
- }
-
- afu->irqs_max = irqs_max;
- return count;
-}
-
-static ssize_t modes_supported_show(struct device *device,
- struct device_attribute *attr, char *buf)
-{
- struct cxl_afu *afu = to_cxl_afu(device);
- char *p = buf, *end = buf + PAGE_SIZE;
-
- if (afu->modes_supported & CXL_MODE_DEDICATED)
- p += scnprintf(p, end - p, "dedicated_process\n");
- if (afu->modes_supported & CXL_MODE_DIRECTED)
- p += scnprintf(p, end - p, "afu_directed\n");
- return (p - buf);
-}
-
-static ssize_t prefault_mode_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxl_afu *afu = to_cxl_afu(device);
-
- switch (afu->prefault_mode) {
- case CXL_PREFAULT_WED:
- return scnprintf(buf, PAGE_SIZE, "work_element_descriptor\n");
- case CXL_PREFAULT_ALL:
- return scnprintf(buf, PAGE_SIZE, "all\n");
- default:
- return scnprintf(buf, PAGE_SIZE, "none\n");
- }
-}
-
-static ssize_t prefault_mode_store(struct device *device,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cxl_afu *afu = to_cxl_afu(device);
- enum prefault_modes mode = -1;
-
- if (!strncmp(buf, "none", 4))
- mode = CXL_PREFAULT_NONE;
- else {
- if (!radix_enabled()) {
-
- /* only allowed when not in radix mode */
- if (!strncmp(buf, "work_element_descriptor", 23))
- mode = CXL_PREFAULT_WED;
- if (!strncmp(buf, "all", 3))
- mode = CXL_PREFAULT_ALL;
- } else {
- dev_err(device, "Cannot prefault with radix enabled\n");
- }
- }
-
- if (mode == -1)
- return -EINVAL;
-
- afu->prefault_mode = mode;
- return count;
-}
-
-static ssize_t mode_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxl_afu *afu = to_cxl_afu(device);
-
- if (afu->current_mode == CXL_MODE_DEDICATED)
- return scnprintf(buf, PAGE_SIZE, "dedicated_process\n");
- if (afu->current_mode == CXL_MODE_DIRECTED)
- return scnprintf(buf, PAGE_SIZE, "afu_directed\n");
- return scnprintf(buf, PAGE_SIZE, "none\n");
-}
-
-static ssize_t mode_store(struct device *device, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cxl_afu *afu = to_cxl_afu(device);
- int old_mode, mode = -1;
- int rc = -EBUSY;
-
- /* can't change this if we have a user */
- mutex_lock(&afu->contexts_lock);
- if (!idr_is_empty(&afu->contexts_idr))
- goto err;
-
- if (!strncmp(buf, "dedicated_process", 17))
- mode = CXL_MODE_DEDICATED;
- if (!strncmp(buf, "afu_directed", 12))
- mode = CXL_MODE_DIRECTED;
- if (!strncmp(buf, "none", 4))
- mode = 0;
-
- if (mode == -1) {
- rc = -EINVAL;
- goto err;
- }
-
- /*
- * afu_deactivate_mode needs to be done outside the lock, prevent
- * other contexts coming in before we are ready:
- */
- old_mode = afu->current_mode;
- afu->current_mode = 0;
- afu->num_procs = 0;
-
- mutex_unlock(&afu->contexts_lock);
-
- if ((rc = cxl_ops->afu_deactivate_mode(afu, old_mode)))
- return rc;
- if ((rc = cxl_ops->afu_activate_mode(afu, mode)))
- return rc;
-
- return count;
-err:
- mutex_unlock(&afu->contexts_lock);
- return rc;
-}
-
-static ssize_t api_version_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- return scnprintf(buf, PAGE_SIZE, "%i\n", CXL_API_VERSION);
-}
-
-static ssize_t api_version_compatible_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- return scnprintf(buf, PAGE_SIZE, "%i\n", CXL_API_VERSION_COMPATIBLE);
-}
-
-static ssize_t afu_eb_read(struct file *filp, struct kobject *kobj,
- const struct bin_attribute *bin_attr, char *buf,
- loff_t off, size_t count)
-{
- struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj));
-
- return cxl_ops->afu_read_err_buffer(afu, buf, off, count);
-}
-
-static struct device_attribute afu_attrs[] = {
- __ATTR_RO(mmio_size),
- __ATTR_RO(irqs_min),
- __ATTR_RW(irqs_max),
- __ATTR_RO(modes_supported),
- __ATTR_RW(mode),
- __ATTR_RW(prefault_mode),
- __ATTR_RO(api_version),
- __ATTR_RO(api_version_compatible),
- __ATTR(reset, S_IWUSR, NULL, reset_store_afu),
-};
-
-int cxl_sysfs_adapter_add(struct cxl *adapter)
-{
- struct device_attribute *dev_attr;
- int i, rc;
-
- for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) {
- dev_attr = &adapter_attrs[i];
- if (cxl_ops->support_attributes(dev_attr->attr.name,
- CXL_ADAPTER_ATTRS)) {
- if ((rc = device_create_file(&adapter->dev, dev_attr)))
- goto err;
- }
- }
- return 0;
-err:
- for (i--; i >= 0; i--) {
- dev_attr = &adapter_attrs[i];
- if (cxl_ops->support_attributes(dev_attr->attr.name,
- CXL_ADAPTER_ATTRS))
- device_remove_file(&adapter->dev, dev_attr);
- }
- return rc;
-}
-
-void cxl_sysfs_adapter_remove(struct cxl *adapter)
-{
- struct device_attribute *dev_attr;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) {
- dev_attr = &adapter_attrs[i];
- if (cxl_ops->support_attributes(dev_attr->attr.name,
- CXL_ADAPTER_ATTRS))
- device_remove_file(&adapter->dev, dev_attr);
- }
-}
-
-struct afu_config_record {
- struct kobject kobj;
- struct bin_attribute config_attr;
- struct list_head list;
- int cr;
- u16 device;
- u16 vendor;
- u32 class;
-};
-
-#define to_cr(obj) container_of(obj, struct afu_config_record, kobj)
-
-static ssize_t vendor_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- struct afu_config_record *cr = to_cr(kobj);
-
- return scnprintf(buf, PAGE_SIZE, "0x%.4x\n", cr->vendor);
-}
-
-static ssize_t device_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- struct afu_config_record *cr = to_cr(kobj);
-
- return scnprintf(buf, PAGE_SIZE, "0x%.4x\n", cr->device);
-}
-
-static ssize_t class_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- struct afu_config_record *cr = to_cr(kobj);
-
- return scnprintf(buf, PAGE_SIZE, "0x%.6x\n", cr->class);
-}
-
-static ssize_t afu_read_config(struct file *filp, struct kobject *kobj,
- const struct bin_attribute *bin_attr, char *buf,
- loff_t off, size_t count)
-{
- struct afu_config_record *cr = to_cr(kobj);
- struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj->parent));
-
- u64 i, j, val, rc;
-
- for (i = 0; i < count;) {
- rc = cxl_ops->afu_cr_read64(afu, cr->cr, off & ~0x7, &val);
- if (rc)
- val = ~0ULL;
- for (j = off & 0x7; j < 8 && i < count; i++, j++, off++)
- buf[i] = (val >> (j * 8)) & 0xff;
- }
-
- return count;
-}
-
-static struct kobj_attribute vendor_attribute =
- __ATTR_RO(vendor);
-static struct kobj_attribute device_attribute =
- __ATTR_RO(device);
-static struct kobj_attribute class_attribute =
- __ATTR_RO(class);
-
-static struct attribute *afu_cr_attrs[] = {
- &vendor_attribute.attr,
- &device_attribute.attr,
- &class_attribute.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(afu_cr);
-
-static void release_afu_config_record(struct kobject *kobj)
-{
- struct afu_config_record *cr = to_cr(kobj);
-
- kfree(cr);
-}
-
-static const struct kobj_type afu_config_record_type = {
- .sysfs_ops = &kobj_sysfs_ops,
- .release = release_afu_config_record,
- .default_groups = afu_cr_groups,
-};
-
-static struct afu_config_record *cxl_sysfs_afu_new_cr(struct cxl_afu *afu, int cr_idx)
-{
- struct afu_config_record *cr;
- int rc;
-
- cr = kzalloc(sizeof(struct afu_config_record), GFP_KERNEL);
- if (!cr)
- return ERR_PTR(-ENOMEM);
-
- cr->cr = cr_idx;
-
- rc = cxl_ops->afu_cr_read16(afu, cr_idx, PCI_DEVICE_ID, &cr->device);
- if (rc)
- goto err;
- rc = cxl_ops->afu_cr_read16(afu, cr_idx, PCI_VENDOR_ID, &cr->vendor);
- if (rc)
- goto err;
- rc = cxl_ops->afu_cr_read32(afu, cr_idx, PCI_CLASS_REVISION, &cr->class);
- if (rc)
- goto err;
- cr->class >>= 8;
-
- /*
- * Export raw AFU PCIe like config record. For now this is read only by
- * root - we can expand that later to be readable by non-root and maybe
- * even writable provided we have a good use-case. Once we support
- * exposing AFUs through a virtual PHB they will get that for free from
- * Linux' PCI infrastructure, but until then it's not clear that we
- * need it for anything since the main use case is just identifying
- * AFUs, which can be done via the vendor, device and class attributes.
- */
- sysfs_bin_attr_init(&cr->config_attr);
- cr->config_attr.attr.name = "config";
- cr->config_attr.attr.mode = S_IRUSR;
- cr->config_attr.size = afu->crs_len;
- cr->config_attr.read_new = afu_read_config;
-
- rc = kobject_init_and_add(&cr->kobj, &afu_config_record_type,
- &afu->dev.kobj, "cr%i", cr->cr);
- if (rc)
- goto err1;
-
- rc = sysfs_create_bin_file(&cr->kobj, &cr->config_attr);
- if (rc)
- goto err1;
-
- rc = kobject_uevent(&cr->kobj, KOBJ_ADD);
- if (rc)
- goto err2;
-
- return cr;
-err2:
- sysfs_remove_bin_file(&cr->kobj, &cr->config_attr);
-err1:
- kobject_put(&cr->kobj);
- return ERR_PTR(rc);
-err:
- kfree(cr);
- return ERR_PTR(rc);
-}
-
-void cxl_sysfs_afu_remove(struct cxl_afu *afu)
-{
- struct device_attribute *dev_attr;
- struct afu_config_record *cr, *tmp;
- int i;
-
- /* remove the err buffer bin attribute */
- if (afu->eb_len)
- device_remove_bin_file(&afu->dev, &afu->attr_eb);
-
- for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) {
- dev_attr = &afu_attrs[i];
- if (cxl_ops->support_attributes(dev_attr->attr.name,
- CXL_AFU_ATTRS))
- device_remove_file(&afu->dev, &afu_attrs[i]);
- }
-
- list_for_each_entry_safe(cr, tmp, &afu->crs, list) {
- sysfs_remove_bin_file(&cr->kobj, &cr->config_attr);
- kobject_put(&cr->kobj);
- }
-}
-
-int cxl_sysfs_afu_add(struct cxl_afu *afu)
-{
- struct device_attribute *dev_attr;
- struct afu_config_record *cr;
- int i, rc;
-
- INIT_LIST_HEAD(&afu->crs);
-
- for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) {
- dev_attr = &afu_attrs[i];
- if (cxl_ops->support_attributes(dev_attr->attr.name,
- CXL_AFU_ATTRS)) {
- if ((rc = device_create_file(&afu->dev, &afu_attrs[i])))
- goto err;
- }
- }
-
- /* conditionally create the add the binary file for error info buffer */
- if (afu->eb_len) {
- sysfs_attr_init(&afu->attr_eb.attr);
-
- afu->attr_eb.attr.name = "afu_err_buff";
- afu->attr_eb.attr.mode = S_IRUGO;
- afu->attr_eb.size = afu->eb_len;
- afu->attr_eb.read_new = afu_eb_read;
-
- rc = device_create_bin_file(&afu->dev, &afu->attr_eb);
- if (rc) {
- dev_err(&afu->dev,
- "Unable to create eb attr for the afu. Err(%d)\n",
- rc);
- goto err;
- }
- }
-
- for (i = 0; i < afu->crs_num; i++) {
- cr = cxl_sysfs_afu_new_cr(afu, i);
- if (IS_ERR(cr)) {
- rc = PTR_ERR(cr);
- goto err1;
- }
- list_add(&cr->list, &afu->crs);
- }
-
- return 0;
-
-err1:
- cxl_sysfs_afu_remove(afu);
- return rc;
-err:
- /* reset the eb_len as we havent created the bin attr */
- afu->eb_len = 0;
-
- for (i--; i >= 0; i--) {
- dev_attr = &afu_attrs[i];
- if (cxl_ops->support_attributes(dev_attr->attr.name,
- CXL_AFU_ATTRS))
- device_remove_file(&afu->dev, &afu_attrs[i]);
- }
- return rc;
-}
-
-int cxl_sysfs_afu_m_add(struct cxl_afu *afu)
-{
- struct device_attribute *dev_attr;
- int i, rc;
-
- for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) {
- dev_attr = &afu_master_attrs[i];
- if (cxl_ops->support_attributes(dev_attr->attr.name,
- CXL_AFU_MASTER_ATTRS)) {
- if ((rc = device_create_file(afu->chardev_m, &afu_master_attrs[i])))
- goto err;
- }
- }
-
- return 0;
-
-err:
- for (i--; i >= 0; i--) {
- dev_attr = &afu_master_attrs[i];
- if (cxl_ops->support_attributes(dev_attr->attr.name,
- CXL_AFU_MASTER_ATTRS))
- device_remove_file(afu->chardev_m, &afu_master_attrs[i]);
- }
- return rc;
-}
-
-void cxl_sysfs_afu_m_remove(struct cxl_afu *afu)
-{
- struct device_attribute *dev_attr;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) {
- dev_attr = &afu_master_attrs[i];
- if (cxl_ops->support_attributes(dev_attr->attr.name,
- CXL_AFU_MASTER_ATTRS))
- device_remove_file(afu->chardev_m, &afu_master_attrs[i]);
- }
-}
diff --git a/drivers/misc/cxl/trace.c b/drivers/misc/cxl/trace.c
deleted file mode 100644
index 86f654b99efb..000000000000
--- a/drivers/misc/cxl/trace.c
+++ /dev/null
@@ -1,9 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2015 IBM Corp.
- */
-
-#ifndef __CHECKER__
-#define CREATE_TRACE_POINTS
-#include "trace.h"
-#endif
diff --git a/drivers/misc/cxl/trace.h b/drivers/misc/cxl/trace.h
deleted file mode 100644
index c474157c6857..000000000000
--- a/drivers/misc/cxl/trace.h
+++ /dev/null
@@ -1,691 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright 2015 IBM Corp.
- */
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM cxl
-
-#if !defined(_CXL_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _CXL_TRACE_H
-
-#include <linux/tracepoint.h>
-
-#include "cxl.h"
-
-#define dsisr_psl9_flags(flags) \
- __print_flags(flags, "|", \
- { CXL_PSL9_DSISR_An_CO_MASK, "FR" }, \
- { CXL_PSL9_DSISR_An_TF, "TF" }, \
- { CXL_PSL9_DSISR_An_PE, "PE" }, \
- { CXL_PSL9_DSISR_An_AE, "AE" }, \
- { CXL_PSL9_DSISR_An_OC, "OC" }, \
- { CXL_PSL9_DSISR_An_S, "S" })
-
-#define DSISR_FLAGS \
- { CXL_PSL_DSISR_An_DS, "DS" }, \
- { CXL_PSL_DSISR_An_DM, "DM" }, \
- { CXL_PSL_DSISR_An_ST, "ST" }, \
- { CXL_PSL_DSISR_An_UR, "UR" }, \
- { CXL_PSL_DSISR_An_PE, "PE" }, \
- { CXL_PSL_DSISR_An_AE, "AE" }, \
- { CXL_PSL_DSISR_An_OC, "OC" }, \
- { CXL_PSL_DSISR_An_M, "M" }, \
- { CXL_PSL_DSISR_An_P, "P" }, \
- { CXL_PSL_DSISR_An_A, "A" }, \
- { CXL_PSL_DSISR_An_S, "S" }, \
- { CXL_PSL_DSISR_An_K, "K" }
-
-#define TFC_FLAGS \
- { CXL_PSL_TFC_An_A, "A" }, \
- { CXL_PSL_TFC_An_C, "C" }, \
- { CXL_PSL_TFC_An_AE, "AE" }, \
- { CXL_PSL_TFC_An_R, "R" }
-
-#define LLCMD_NAMES \
- { CXL_SPA_SW_CMD_TERMINATE, "TERMINATE" }, \
- { CXL_SPA_SW_CMD_REMOVE, "REMOVE" }, \
- { CXL_SPA_SW_CMD_SUSPEND, "SUSPEND" }, \
- { CXL_SPA_SW_CMD_RESUME, "RESUME" }, \
- { CXL_SPA_SW_CMD_ADD, "ADD" }, \
- { CXL_SPA_SW_CMD_UPDATE, "UPDATE" }
-
-#define AFU_COMMANDS \
- { 0, "DISABLE" }, \
- { CXL_AFU_Cntl_An_E, "ENABLE" }, \
- { CXL_AFU_Cntl_An_RA, "RESET" }
-
-#define PSL_COMMANDS \
- { CXL_PSL_SCNTL_An_Pc, "PURGE" }, \
- { CXL_PSL_SCNTL_An_Sc, "SUSPEND" }
-
-
-DECLARE_EVENT_CLASS(cxl_pe_class,
- TP_PROTO(struct cxl_context *ctx),
-
- TP_ARGS(ctx),
-
- TP_STRUCT__entry(
- __field(u8, card)
- __field(u8, afu)
- __field(u16, pe)
- ),
-
- TP_fast_assign(
- __entry->card = ctx->afu->adapter->adapter_num;
- __entry->afu = ctx->afu->slice;
- __entry->pe = ctx->pe;
- ),
-
- TP_printk("afu%i.%i pe=%i",
- __entry->card,
- __entry->afu,
- __entry->pe
- )
-);
-
-
-TRACE_EVENT(cxl_attach,
- TP_PROTO(struct cxl_context *ctx, u64 wed, s16 num_interrupts, u64 amr),
-
- TP_ARGS(ctx, wed, num_interrupts, amr),
-
- TP_STRUCT__entry(
- __field(u8, card)
- __field(u8, afu)
- __field(u16, pe)
- __field(pid_t, pid)
- __field(u64, wed)
- __field(u64, amr)
- __field(s16, num_interrupts)
- ),
-
- TP_fast_assign(
- __entry->card = ctx->afu->adapter->adapter_num;
- __entry->afu = ctx->afu->slice;
- __entry->pe = ctx->pe;
- __entry->pid = pid_nr(ctx->pid);
- __entry->wed = wed;
- __entry->amr = amr;
- __entry->num_interrupts = num_interrupts;
- ),
-
- TP_printk("afu%i.%i pid=%i pe=%i wed=0x%016llx irqs=%i amr=0x%llx",
- __entry->card,
- __entry->afu,
- __entry->pid,
- __entry->pe,
- __entry->wed,
- __entry->num_interrupts,
- __entry->amr
- )
-);
-
-DEFINE_EVENT(cxl_pe_class, cxl_detach,
- TP_PROTO(struct cxl_context *ctx),
- TP_ARGS(ctx)
-);
-
-TRACE_EVENT(cxl_afu_irq,
- TP_PROTO(struct cxl_context *ctx, int afu_irq, int virq, irq_hw_number_t hwirq),
-
- TP_ARGS(ctx, afu_irq, virq, hwirq),
-
- TP_STRUCT__entry(
- __field(u8, card)
- __field(u8, afu)
- __field(u16, pe)
- __field(u16, afu_irq)
- __field(int, virq)
- __field(irq_hw_number_t, hwirq)
- ),
-
- TP_fast_assign(
- __entry->card = ctx->afu->adapter->adapter_num;
- __entry->afu = ctx->afu->slice;
- __entry->pe = ctx->pe;
- __entry->afu_irq = afu_irq;
- __entry->virq = virq;
- __entry->hwirq = hwirq;
- ),
-
- TP_printk("afu%i.%i pe=%i afu_irq=%i virq=%i hwirq=0x%lx",
- __entry->card,
- __entry->afu,
- __entry->pe,
- __entry->afu_irq,
- __entry->virq,
- __entry->hwirq
- )
-);
-
-TRACE_EVENT(cxl_psl9_irq,
- TP_PROTO(struct cxl_context *ctx, int irq, u64 dsisr, u64 dar),
-
- TP_ARGS(ctx, irq, dsisr, dar),
-
- TP_STRUCT__entry(
- __field(u8, card)
- __field(u8, afu)
- __field(u16, pe)
- __field(int, irq)
- __field(u64, dsisr)
- __field(u64, dar)
- ),
-
- TP_fast_assign(
- __entry->card = ctx->afu->adapter->adapter_num;
- __entry->afu = ctx->afu->slice;
- __entry->pe = ctx->pe;
- __entry->irq = irq;
- __entry->dsisr = dsisr;
- __entry->dar = dar;
- ),
-
- TP_printk("afu%i.%i pe=%i irq=%i dsisr=0x%016llx dsisr=%s dar=0x%016llx",
- __entry->card,
- __entry->afu,
- __entry->pe,
- __entry->irq,
- __entry->dsisr,
- dsisr_psl9_flags(__entry->dsisr),
- __entry->dar
- )
-);
-
-TRACE_EVENT(cxl_psl_irq,
- TP_PROTO(struct cxl_context *ctx, int irq, u64 dsisr, u64 dar),
-
- TP_ARGS(ctx, irq, dsisr, dar),
-
- TP_STRUCT__entry(
- __field(u8, card)
- __field(u8, afu)
- __field(u16, pe)
- __field(int, irq)
- __field(u64, dsisr)
- __field(u64, dar)
- ),
-
- TP_fast_assign(
- __entry->card = ctx->afu->adapter->adapter_num;
- __entry->afu = ctx->afu->slice;
- __entry->pe = ctx->pe;
- __entry->irq = irq;
- __entry->dsisr = dsisr;
- __entry->dar = dar;
- ),
-
- TP_printk("afu%i.%i pe=%i irq=%i dsisr=%s dar=0x%016llx",
- __entry->card,
- __entry->afu,
- __entry->pe,
- __entry->irq,
- __print_flags(__entry->dsisr, "|", DSISR_FLAGS),
- __entry->dar
- )
-);
-
-TRACE_EVENT(cxl_psl_irq_ack,
- TP_PROTO(struct cxl_context *ctx, u64 tfc),
-
- TP_ARGS(ctx, tfc),
-
- TP_STRUCT__entry(
- __field(u8, card)
- __field(u8, afu)
- __field(u16, pe)
- __field(u64, tfc)
- ),
-
- TP_fast_assign(
- __entry->card = ctx->afu->adapter->adapter_num;
- __entry->afu = ctx->afu->slice;
- __entry->pe = ctx->pe;
- __entry->tfc = tfc;
- ),
-
- TP_printk("afu%i.%i pe=%i tfc=%s",
- __entry->card,
- __entry->afu,
- __entry->pe,
- __print_flags(__entry->tfc, "|", TFC_FLAGS)
- )
-);
-
-TRACE_EVENT(cxl_ste_miss,
- TP_PROTO(struct cxl_context *ctx, u64 dar),
-
- TP_ARGS(ctx, dar),
-
- TP_STRUCT__entry(
- __field(u8, card)
- __field(u8, afu)
- __field(u16, pe)
- __field(u64, dar)
- ),
-
- TP_fast_assign(
- __entry->card = ctx->afu->adapter->adapter_num;
- __entry->afu = ctx->afu->slice;
- __entry->pe = ctx->pe;
- __entry->dar = dar;
- ),
-
- TP_printk("afu%i.%i pe=%i dar=0x%016llx",
- __entry->card,
- __entry->afu,
- __entry->pe,
- __entry->dar
- )
-);
-
-TRACE_EVENT(cxl_ste_write,
- TP_PROTO(struct cxl_context *ctx, unsigned int idx, u64 e, u64 v),
-
- TP_ARGS(ctx, idx, e, v),
-
- TP_STRUCT__entry(
- __field(u8, card)
- __field(u8, afu)
- __field(u16, pe)
- __field(unsigned int, idx)
- __field(u64, e)
- __field(u64, v)
- ),
-
- TP_fast_assign(
- __entry->card = ctx->afu->adapter->adapter_num;
- __entry->afu = ctx->afu->slice;
- __entry->pe = ctx->pe;
- __entry->idx = idx;
- __entry->e = e;
- __entry->v = v;
- ),
-
- TP_printk("afu%i.%i pe=%i SSTE[%i] E=0x%016llx V=0x%016llx",
- __entry->card,
- __entry->afu,
- __entry->pe,
- __entry->idx,
- __entry->e,
- __entry->v
- )
-);
-
-TRACE_EVENT(cxl_pte_miss,
- TP_PROTO(struct cxl_context *ctx, u64 dsisr, u64 dar),
-
- TP_ARGS(ctx, dsisr, dar),
-
- TP_STRUCT__entry(
- __field(u8, card)
- __field(u8, afu)
- __field(u16, pe)
- __field(u64, dsisr)
- __field(u64, dar)
- ),
-
- TP_fast_assign(
- __entry->card = ctx->afu->adapter->adapter_num;
- __entry->afu = ctx->afu->slice;
- __entry->pe = ctx->pe;
- __entry->dsisr = dsisr;
- __entry->dar = dar;
- ),
-
- TP_printk("afu%i.%i pe=%i dsisr=%s dar=0x%016llx",
- __entry->card,
- __entry->afu,
- __entry->pe,
- __print_flags(__entry->dsisr, "|", DSISR_FLAGS),
- __entry->dar
- )
-);
-
-TRACE_EVENT(cxl_llcmd,
- TP_PROTO(struct cxl_context *ctx, u64 cmd),
-
- TP_ARGS(ctx, cmd),
-
- TP_STRUCT__entry(
- __field(u8, card)
- __field(u8, afu)
- __field(u16, pe)
- __field(u64, cmd)
- ),
-
- TP_fast_assign(
- __entry->card = ctx->afu->adapter->adapter_num;
- __entry->afu = ctx->afu->slice;
- __entry->pe = ctx->pe;
- __entry->cmd = cmd;
- ),
-
- TP_printk("afu%i.%i pe=%i cmd=%s",
- __entry->card,
- __entry->afu,
- __entry->pe,
- __print_symbolic_u64(__entry->cmd, LLCMD_NAMES)
- )
-);
-
-TRACE_EVENT(cxl_llcmd_done,
- TP_PROTO(struct cxl_context *ctx, u64 cmd, int rc),
-
- TP_ARGS(ctx, cmd, rc),
-
- TP_STRUCT__entry(
- __field(u8, card)
- __field(u8, afu)
- __field(u16, pe)
- __field(u64, cmd)
- __field(int, rc)
- ),
-
- TP_fast_assign(
- __entry->card = ctx->afu->adapter->adapter_num;
- __entry->afu = ctx->afu->slice;
- __entry->pe = ctx->pe;
- __entry->rc = rc;
- __entry->cmd = cmd;
- ),
-
- TP_printk("afu%i.%i pe=%i cmd=%s rc=%i",
- __entry->card,
- __entry->afu,
- __entry->pe,
- __print_symbolic_u64(__entry->cmd, LLCMD_NAMES),
- __entry->rc
- )
-);
-
-DECLARE_EVENT_CLASS(cxl_afu_psl_ctrl,
- TP_PROTO(struct cxl_afu *afu, u64 cmd),
-
- TP_ARGS(afu, cmd),
-
- TP_STRUCT__entry(
- __field(u8, card)
- __field(u8, afu)
- __field(u64, cmd)
- ),
-
- TP_fast_assign(
- __entry->card = afu->adapter->adapter_num;
- __entry->afu = afu->slice;
- __entry->cmd = cmd;
- ),
-
- TP_printk("afu%i.%i cmd=%s",
- __entry->card,
- __entry->afu,
- __print_symbolic_u64(__entry->cmd, AFU_COMMANDS)
- )
-);
-
-DECLARE_EVENT_CLASS(cxl_afu_psl_ctrl_done,
- TP_PROTO(struct cxl_afu *afu, u64 cmd, int rc),
-
- TP_ARGS(afu, cmd, rc),
-
- TP_STRUCT__entry(
- __field(u8, card)
- __field(u8, afu)
- __field(u64, cmd)
- __field(int, rc)
- ),
-
- TP_fast_assign(
- __entry->card = afu->adapter->adapter_num;
- __entry->afu = afu->slice;
- __entry->rc = rc;
- __entry->cmd = cmd;
- ),
-
- TP_printk("afu%i.%i cmd=%s rc=%i",
- __entry->card,
- __entry->afu,
- __print_symbolic_u64(__entry->cmd, AFU_COMMANDS),
- __entry->rc
- )
-);
-
-DEFINE_EVENT(cxl_afu_psl_ctrl, cxl_afu_ctrl,
- TP_PROTO(struct cxl_afu *afu, u64 cmd),
- TP_ARGS(afu, cmd)
-);
-
-DEFINE_EVENT(cxl_afu_psl_ctrl_done, cxl_afu_ctrl_done,
- TP_PROTO(struct cxl_afu *afu, u64 cmd, int rc),
- TP_ARGS(afu, cmd, rc)
-);
-
-DEFINE_EVENT_PRINT(cxl_afu_psl_ctrl, cxl_psl_ctrl,
- TP_PROTO(struct cxl_afu *afu, u64 cmd),
- TP_ARGS(afu, cmd),
-
- TP_printk("psl%i.%i cmd=%s",
- __entry->card,
- __entry->afu,
- __print_symbolic_u64(__entry->cmd, PSL_COMMANDS)
- )
-);
-
-DEFINE_EVENT_PRINT(cxl_afu_psl_ctrl_done, cxl_psl_ctrl_done,
- TP_PROTO(struct cxl_afu *afu, u64 cmd, int rc),
- TP_ARGS(afu, cmd, rc),
-
- TP_printk("psl%i.%i cmd=%s rc=%i",
- __entry->card,
- __entry->afu,
- __print_symbolic_u64(__entry->cmd, PSL_COMMANDS),
- __entry->rc
- )
-);
-
-DEFINE_EVENT(cxl_pe_class, cxl_slbia,
- TP_PROTO(struct cxl_context *ctx),
- TP_ARGS(ctx)
-);
-
-TRACE_EVENT(cxl_hcall,
- TP_PROTO(u64 unit_address, u64 process_token, long rc),
-
- TP_ARGS(unit_address, process_token, rc),
-
- TP_STRUCT__entry(
- __field(u64, unit_address)
- __field(u64, process_token)
- __field(long, rc)
- ),
-
- TP_fast_assign(
- __entry->unit_address = unit_address;
- __entry->process_token = process_token;
- __entry->rc = rc;
- ),
-
- TP_printk("unit_address=0x%016llx process_token=0x%016llx rc=%li",
- __entry->unit_address,
- __entry->process_token,
- __entry->rc
- )
-);
-
-TRACE_EVENT(cxl_hcall_control,
- TP_PROTO(u64 unit_address, char *fct, u64 p1, u64 p2, u64 p3,
- u64 p4, unsigned long r4, long rc),
-
- TP_ARGS(unit_address, fct, p1, p2, p3, p4, r4, rc),
-
- TP_STRUCT__entry(
- __field(u64, unit_address)
- __field(char *, fct)
- __field(u64, p1)
- __field(u64, p2)
- __field(u64, p3)
- __field(u64, p4)
- __field(unsigned long, r4)
- __field(long, rc)
- ),
-
- TP_fast_assign(
- __entry->unit_address = unit_address;
- __entry->fct = fct;
- __entry->p1 = p1;
- __entry->p2 = p2;
- __entry->p3 = p3;
- __entry->p4 = p4;
- __entry->r4 = r4;
- __entry->rc = rc;
- ),
-
- TP_printk("unit_address=%#.16llx %s(%#llx, %#llx, %#llx, %#llx, R4: %#lx)): %li",
- __entry->unit_address,
- __entry->fct,
- __entry->p1,
- __entry->p2,
- __entry->p3,
- __entry->p4,
- __entry->r4,
- __entry->rc
- )
-);
-
-TRACE_EVENT(cxl_hcall_attach,
- TP_PROTO(u64 unit_address, u64 phys_addr, unsigned long process_token,
- unsigned long mmio_addr, unsigned long mmio_size, long rc),
-
- TP_ARGS(unit_address, phys_addr, process_token,
- mmio_addr, mmio_size, rc),
-
- TP_STRUCT__entry(
- __field(u64, unit_address)
- __field(u64, phys_addr)
- __field(unsigned long, process_token)
- __field(unsigned long, mmio_addr)
- __field(unsigned long, mmio_size)
- __field(long, rc)
- ),
-
- TP_fast_assign(
- __entry->unit_address = unit_address;
- __entry->phys_addr = phys_addr;
- __entry->process_token = process_token;
- __entry->mmio_addr = mmio_addr;
- __entry->mmio_size = mmio_size;
- __entry->rc = rc;
- ),
-
- TP_printk("unit_address=0x%016llx phys_addr=0x%016llx "
- "token=0x%.8lx mmio_addr=0x%lx mmio_size=0x%lx rc=%li",
- __entry->unit_address,
- __entry->phys_addr,
- __entry->process_token,
- __entry->mmio_addr,
- __entry->mmio_size,
- __entry->rc
- )
-);
-
-DEFINE_EVENT(cxl_hcall, cxl_hcall_detach,
- TP_PROTO(u64 unit_address, u64 process_token, long rc),
- TP_ARGS(unit_address, process_token, rc)
-);
-
-DEFINE_EVENT(cxl_hcall_control, cxl_hcall_control_function,
- TP_PROTO(u64 unit_address, char *fct, u64 p1, u64 p2, u64 p3,
- u64 p4, unsigned long r4, long rc),
- TP_ARGS(unit_address, fct, p1, p2, p3, p4, r4, rc)
-);
-
-DEFINE_EVENT(cxl_hcall, cxl_hcall_collect_int_info,
- TP_PROTO(u64 unit_address, u64 process_token, long rc),
- TP_ARGS(unit_address, process_token, rc)
-);
-
-TRACE_EVENT(cxl_hcall_control_faults,
- TP_PROTO(u64 unit_address, u64 process_token,
- u64 control_mask, u64 reset_mask, unsigned long r4,
- long rc),
-
- TP_ARGS(unit_address, process_token,
- control_mask, reset_mask, r4, rc),
-
- TP_STRUCT__entry(
- __field(u64, unit_address)
- __field(u64, process_token)
- __field(u64, control_mask)
- __field(u64, reset_mask)
- __field(unsigned long, r4)
- __field(long, rc)
- ),
-
- TP_fast_assign(
- __entry->unit_address = unit_address;
- __entry->process_token = process_token;
- __entry->control_mask = control_mask;
- __entry->reset_mask = reset_mask;
- __entry->r4 = r4;
- __entry->rc = rc;
- ),
-
- TP_printk("unit_address=0x%016llx process_token=0x%llx "
- "control_mask=%#llx reset_mask=%#llx r4=%#lx rc=%li",
- __entry->unit_address,
- __entry->process_token,
- __entry->control_mask,
- __entry->reset_mask,
- __entry->r4,
- __entry->rc
- )
-);
-
-DEFINE_EVENT(cxl_hcall_control, cxl_hcall_control_facility,
- TP_PROTO(u64 unit_address, char *fct, u64 p1, u64 p2, u64 p3,
- u64 p4, unsigned long r4, long rc),
- TP_ARGS(unit_address, fct, p1, p2, p3, p4, r4, rc)
-);
-
-TRACE_EVENT(cxl_hcall_download_facility,
- TP_PROTO(u64 unit_address, char *fct, u64 list_address, u64 num,
- unsigned long r4, long rc),
-
- TP_ARGS(unit_address, fct, list_address, num, r4, rc),
-
- TP_STRUCT__entry(
- __field(u64, unit_address)
- __field(char *, fct)
- __field(u64, list_address)
- __field(u64, num)
- __field(unsigned long, r4)
- __field(long, rc)
- ),
-
- TP_fast_assign(
- __entry->unit_address = unit_address;
- __entry->fct = fct;
- __entry->list_address = list_address;
- __entry->num = num;
- __entry->r4 = r4;
- __entry->rc = rc;
- ),
-
- TP_printk("%#.16llx, %s(%#llx, %#llx), %#lx): %li",
- __entry->unit_address,
- __entry->fct,
- __entry->list_address,
- __entry->num,
- __entry->r4,
- __entry->rc
- )
-);
-
-#endif /* _CXL_TRACE_H */
-
-/* This part must be outside protection */
-#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
-#define TRACE_INCLUDE_FILE trace
-#include <trace/define_trace.h>
diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c
deleted file mode 100644
index 6332db8044bd..000000000000
--- a/drivers/misc/cxl/vphb.c
+++ /dev/null
@@ -1,309 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2014 IBM Corp.
- */
-
-#include <linux/pci.h>
-#include <misc/cxl.h>
-#include "cxl.h"
-
-static int cxl_pci_probe_mode(struct pci_bus *bus)
-{
- return PCI_PROBE_NORMAL;
-}
-
-static int cxl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
-{
- return -ENODEV;
-}
-
-static void cxl_teardown_msi_irqs(struct pci_dev *pdev)
-{
- /*
- * MSI should never be set but need still need to provide this call
- * back.
- */
-}
-
-static bool cxl_pci_enable_device_hook(struct pci_dev *dev)
-{
- struct pci_controller *phb;
- struct cxl_afu *afu;
- struct cxl_context *ctx;
-
- phb = pci_bus_to_host(dev->bus);
- afu = (struct cxl_afu *)phb->private_data;
-
- if (!cxl_ops->link_ok(afu->adapter, afu)) {
- dev_warn(&dev->dev, "%s: Device link is down, refusing to enable AFU\n", __func__);
- return false;
- }
-
- dev->dev.archdata.dma_offset = PAGE_OFFSET;
-
- /*
- * Allocate a context to do cxl things too. If we eventually do real
- * DMA ops, we'll need a default context to attach them to
- */
- ctx = cxl_dev_context_init(dev);
- if (IS_ERR(ctx))
- return false;
- dev->dev.archdata.cxl_ctx = ctx;
-
- return (cxl_ops->afu_check_and_enable(afu) == 0);
-}
-
-static void cxl_pci_disable_device(struct pci_dev *dev)
-{
- struct cxl_context *ctx = cxl_get_context(dev);
-
- if (ctx) {
- if (ctx->status == STARTED) {
- dev_err(&dev->dev, "Default context started\n");
- return;
- }
- dev->dev.archdata.cxl_ctx = NULL;
- cxl_release_context(ctx);
- }
-}
-
-static void cxl_pci_reset_secondary_bus(struct pci_dev *dev)
-{
- /* Should we do an AFU reset here ? */
-}
-
-static int cxl_pcie_cfg_record(u8 bus, u8 devfn)
-{
- return (bus << 8) + devfn;
-}
-
-static inline struct cxl_afu *pci_bus_to_afu(struct pci_bus *bus)
-{
- struct pci_controller *phb = bus ? pci_bus_to_host(bus) : NULL;
-
- return phb ? phb->private_data : NULL;
-}
-
-static void cxl_afu_configured_put(struct cxl_afu *afu)
-{
- atomic_dec_if_positive(&afu->configured_state);
-}
-
-static bool cxl_afu_configured_get(struct cxl_afu *afu)
-{
- return atomic_inc_unless_negative(&afu->configured_state);
-}
-
-static inline int cxl_pcie_config_info(struct pci_bus *bus, unsigned int devfn,
- struct cxl_afu *afu, int *_record)
-{
- int record;
-
- record = cxl_pcie_cfg_record(bus->number, devfn);
- if (record > afu->crs_num)
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- *_record = record;
- return 0;
-}
-
-static int cxl_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
- int offset, int len, u32 *val)
-{
- int rc, record;
- struct cxl_afu *afu;
- u8 val8;
- u16 val16;
- u32 val32;
-
- afu = pci_bus_to_afu(bus);
- /* Grab a reader lock on afu. */
- if (afu == NULL || !cxl_afu_configured_get(afu))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- rc = cxl_pcie_config_info(bus, devfn, afu, &record);
- if (rc)
- goto out;
-
- switch (len) {
- case 1:
- rc = cxl_ops->afu_cr_read8(afu, record, offset, &val8);
- *val = val8;
- break;
- case 2:
- rc = cxl_ops->afu_cr_read16(afu, record, offset, &val16);
- *val = val16;
- break;
- case 4:
- rc = cxl_ops->afu_cr_read32(afu, record, offset, &val32);
- *val = val32;
- break;
- default:
- WARN_ON(1);
- }
-
-out:
- cxl_afu_configured_put(afu);
- return rc ? PCIBIOS_DEVICE_NOT_FOUND : 0;
-}
-
-static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
- int offset, int len, u32 val)
-{
- int rc, record;
- struct cxl_afu *afu;
-
- afu = pci_bus_to_afu(bus);
- /* Grab a reader lock on afu. */
- if (afu == NULL || !cxl_afu_configured_get(afu))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- rc = cxl_pcie_config_info(bus, devfn, afu, &record);
- if (rc)
- goto out;
-
- switch (len) {
- case 1:
- rc = cxl_ops->afu_cr_write8(afu, record, offset, val & 0xff);
- break;
- case 2:
- rc = cxl_ops->afu_cr_write16(afu, record, offset, val & 0xffff);
- break;
- case 4:
- rc = cxl_ops->afu_cr_write32(afu, record, offset, val);
- break;
- default:
- WARN_ON(1);
- }
-
-out:
- cxl_afu_configured_put(afu);
- return rc ? PCIBIOS_SET_FAILED : 0;
-}
-
-static struct pci_ops cxl_pcie_pci_ops =
-{
- .read = cxl_pcie_read_config,
- .write = cxl_pcie_write_config,
-};
-
-
-static struct pci_controller_ops cxl_pci_controller_ops =
-{
- .probe_mode = cxl_pci_probe_mode,
- .enable_device_hook = cxl_pci_enable_device_hook,
- .disable_device = cxl_pci_disable_device,
- .release_device = cxl_pci_disable_device,
- .reset_secondary_bus = cxl_pci_reset_secondary_bus,
- .setup_msi_irqs = cxl_setup_msi_irqs,
- .teardown_msi_irqs = cxl_teardown_msi_irqs,
-};
-
-int cxl_pci_vphb_add(struct cxl_afu *afu)
-{
- struct pci_controller *phb;
- struct device_node *vphb_dn;
- struct device *parent;
-
- /*
- * If there are no AFU configuration records we won't have anything to
- * expose under the vPHB, so skip creating one, returning success since
- * this is still a valid case. This will also opt us out of EEH
- * handling since we won't have anything special to do if there are no
- * kernel drivers attached to the vPHB, and EEH handling is not yet
- * supported in the peer model.
- */
- if (!afu->crs_num)
- return 0;
-
- /* The parent device is the adapter. Reuse the device node of
- * the adapter.
- * We don't seem to care what device node is used for the vPHB,
- * but tools such as lsvpd walk up the device parents looking
- * for a valid location code, so we might as well show devices
- * attached to the adapter as being located on that adapter.
- */
- parent = afu->adapter->dev.parent;
- vphb_dn = parent->of_node;
-
- /* Alloc and setup PHB data structure */
- phb = pcibios_alloc_controller(vphb_dn);
- if (!phb)
- return -ENODEV;
-
- /* Setup parent in sysfs */
- phb->parent = parent;
-
- /* Setup the PHB using arch provided callback */
- phb->ops = &cxl_pcie_pci_ops;
- phb->cfg_addr = NULL;
- phb->cfg_data = NULL;
- phb->private_data = afu;
- phb->controller_ops = cxl_pci_controller_ops;
-
- /* Scan the bus */
- pcibios_scan_phb(phb);
- if (phb->bus == NULL)
- return -ENXIO;
-
- /* Set release hook on root bus */
- pci_set_host_bridge_release(to_pci_host_bridge(phb->bus->bridge),
- pcibios_free_controller_deferred,
- (void *) phb);
-
- /* Claim resources. This might need some rework as well depending
- * whether we are doing probe-only or not, like assigning unassigned
- * resources etc...
- */
- pcibios_claim_one_bus(phb->bus);
-
- /* Add probed PCI devices to the device model */
- pci_bus_add_devices(phb->bus);
-
- afu->phb = phb;
-
- return 0;
-}
-
-void cxl_pci_vphb_remove(struct cxl_afu *afu)
-{
- struct pci_controller *phb;
-
- /* If there is no configuration record we won't have one of these */
- if (!afu || !afu->phb)
- return;
-
- phb = afu->phb;
- afu->phb = NULL;
-
- pci_remove_root_bus(phb->bus);
- /*
- * We don't free phb here - that's handled by
- * pcibios_free_controller_deferred()
- */
-}
-
-bool cxl_pci_is_vphb_device(struct pci_dev *dev)
-{
- struct pci_controller *phb;
-
- phb = pci_bus_to_host(dev->bus);
-
- return (phb->ops == &cxl_pcie_pci_ops);
-}
-
-struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev)
-{
- struct pci_controller *phb;
-
- phb = pci_bus_to_host(dev->bus);
-
- return (struct cxl_afu *)phb->private_data;
-}
-EXPORT_SYMBOL_GPL(cxl_pci_to_afu);
-
-unsigned int cxl_pci_to_cfg_record(struct pci_dev *dev)
-{
- return cxl_pcie_cfg_record(dev->bus->number, dev->devfn);
-}
-EXPORT_SYMBOL_GPL(cxl_pci_to_cfg_record);
diff --git a/drivers/misc/lkdtm/perms.c b/drivers/misc/lkdtm/perms.c
index 5b861dbff27e..6c24426104ba 100644
--- a/drivers/misc/lkdtm/perms.c
+++ b/drivers/misc/lkdtm/perms.c
@@ -29,6 +29,13 @@ static const unsigned long rodata = 0xAA55AA55;
static unsigned long ro_after_init __ro_after_init = 0x55AA5500;
/*
+ * This is a pointer to do_nothing() which is initialized at runtime rather
+ * than build time to avoid objtool IBT validation warnings caused by an
+ * inlined unrolled memcpy() in execute_location().
+ */
+static void __ro_after_init *do_nothing_ptr;
+
+/*
* This just returns to the caller. It is designed to be copied into
* non-executable memory regions.
*/
@@ -65,13 +72,12 @@ static noinline __nocfi void execute_location(void *dst, bool write)
{
void (*func)(void);
func_desc_t fdesc;
- void *do_nothing_text = dereference_function_descriptor(do_nothing);
- pr_info("attempting ok execution at %px\n", do_nothing_text);
+ pr_info("attempting ok execution at %px\n", do_nothing_ptr);
do_nothing();
if (write == CODE_WRITE) {
- memcpy(dst, do_nothing_text, EXEC_SIZE);
+ memcpy(dst, do_nothing_ptr, EXEC_SIZE);
flush_icache_range((unsigned long)dst,
(unsigned long)dst + EXEC_SIZE);
}
@@ -267,6 +273,8 @@ static void lkdtm_ACCESS_NULL(void)
void __init lkdtm_perms_init(void)
{
+ do_nothing_ptr = dereference_function_descriptor(do_nothing);
+
/* Make sure we can write to __ro_after_init values during __init */
ro_after_init |= 0xAA;
}
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 90ea3dc0fb10..c398ac42eae9 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -330,7 +330,7 @@ static int ldisc_open(struct tty_struct *tty)
ser->tty = tty_kref_get(tty);
ser->dev = dev;
debugfs_init(ser, tty);
- tty->receive_room = N_TTY_BUF_SIZE;
+ tty->receive_room = 4096;
tty->disc_data = ser;
set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
rtnl_lock();
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
index 9e84ab411564..51614651d2e7 100644
--- a/drivers/nvdimm/claim.c
+++ b/drivers/nvdimm/claim.c
@@ -56,17 +56,6 @@ bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
return true;
}
-bool nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
- struct nd_namespace_common **_ndns)
-{
- bool claimed;
-
- nvdimm_bus_lock(&attach->dev);
- claimed = __nd_attach_ndns(dev, attach, _ndns);
- nvdimm_bus_unlock(&attach->dev);
- return claimed;
-}
-
static bool is_idle(struct device *dev, struct nd_namespace_common *ndns)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
index 082253a3a956..04f4a049599a 100644
--- a/drivers/nvdimm/label.c
+++ b/drivers/nvdimm/label.c
@@ -442,7 +442,8 @@ int nd_label_data_init(struct nvdimm_drvdata *ndd)
if (ndd->data)
return 0;
- if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0) {
+ if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0 ||
+ ndd->nsarea.config_size == 0) {
dev_dbg(ndd->dev, "failed to init config data area: (%u:%u)\n",
ndd->nsarea.max_xfer, ndd->nsarea.config_size);
return -ENXIO;
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index 86976a9e8a15..bfc6bfeb6e24 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -127,8 +127,6 @@ resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region);
resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
struct nd_mapping *nd_mapping);
resource_size_t nd_region_available_dpa(struct nd_region *nd_region);
-int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
- resource_size_t size);
resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
struct nd_label_id *label_id);
int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd);
@@ -136,8 +134,6 @@ void get_ndd(struct nvdimm_drvdata *ndd);
resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns);
void nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns);
void __nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns);
-bool nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
- struct nd_namespace_common **_ndns);
bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
struct nd_namespace_common **_ndns);
ssize_t nd_namespace_store(struct device *dev,
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index 37417ce5ec7b..de1ee5ebc851 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -1229,45 +1229,4 @@ bool is_nvdimm_sync(struct nd_region *nd_region)
}
EXPORT_SYMBOL_GPL(is_nvdimm_sync);
-struct conflict_context {
- struct nd_region *nd_region;
- resource_size_t start, size;
-};
-
-static int region_conflict(struct device *dev, void *data)
-{
- struct nd_region *nd_region;
- struct conflict_context *ctx = data;
- resource_size_t res_end, region_end, region_start;
-
- if (!is_memory(dev))
- return 0;
-
- nd_region = to_nd_region(dev);
- if (nd_region == ctx->nd_region)
- return 0;
-
- res_end = ctx->start + ctx->size;
- region_start = nd_region->ndr_start;
- region_end = region_start + nd_region->ndr_size;
- if (ctx->start >= region_start && ctx->start < region_end)
- return -EBUSY;
- if (res_end > region_start && res_end <= region_end)
- return -EBUSY;
- return 0;
-}
-
-int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
- resource_size_t size)
-{
- struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
- struct conflict_context ctx = {
- .nd_region = nd_region,
- .start = start,
- .size = size,
- };
-
- return device_for_each_child(&nvdimm_bus->dev, &ctx, region_conflict);
-}
-
MODULE_IMPORT_NS("DEVMEM");
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index 10e453b2436e..d47dfa80fb95 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -18,10 +18,15 @@ config NVME_MULTIPATH
bool "NVMe multipath support"
depends on NVME_CORE
help
- This option enables support for multipath access to NVMe
- subsystems. If this option is enabled only a single
- /dev/nvmeXnY device will show up for each NVMe namespace,
- even if it is accessible through multiple controllers.
+ This option controls support for multipath access to NVMe
+ subsystems. If this option is enabled support for NVMe multipath
+ access is included in the kernel. If this option is disabled support
+ for NVMe multipath access is excluded from the kernel. When this
+ option is disabled each controller/namespace receives its
+ own /dev/nvmeXnY device entry and NVMe multipath access is
+ not supported.
+
+ If unsure, say Y.
config NVME_VERBOSE_ERRORS
bool "NVMe verbose error reporting"
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index c2d89fac86c5..cc23035148b4 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -3822,7 +3822,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info)
"Found shared namespace %d, but multipathing not supported.\n",
info->nsid);
dev_warn_once(ctrl->device,
- "Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0.\n");
+ "Shared namespace support requires core_nvme.multipath=Y.\n");
}
}
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index ecf136489044..ca86d3bf7ea4 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -114,8 +114,7 @@ static struct request *nvme_alloc_user_request(struct request_queue *q,
static int nvme_map_user_request(struct request *req, u64 ubuffer,
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
- struct io_uring_cmd *ioucmd, unsigned int flags,
- unsigned int iou_issue_flags)
+ struct iov_iter *iter, unsigned int flags)
{
struct request_queue *q = req->q;
struct nvme_ns *ns = q->queuedata;
@@ -129,37 +128,23 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
if (!nvme_ctrl_sgl_supported(ctrl))
dev_warn_once(ctrl->device, "using unchecked data buffer\n");
if (has_metadata) {
- if (!supports_metadata) {
- ret = -EINVAL;
- goto out;
- }
+ if (!supports_metadata)
+ return -EINVAL;
+
if (!nvme_ctrl_meta_sgl_supported(ctrl))
dev_warn_once(ctrl->device,
"using unchecked metadata buffer\n");
}
- if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
- struct iov_iter iter;
-
- /* fixedbufs is only for non-vectored io */
- if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC)) {
- ret = -EINVAL;
- goto out;
- }
- ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
- rq_data_dir(req), &iter, ioucmd,
- iou_issue_flags);
- if (ret < 0)
- goto out;
- ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL);
- } else {
+ if (iter)
+ ret = blk_rq_map_user_iov(q, req, NULL, iter, GFP_KERNEL);
+ else
ret = blk_rq_map_user_io(req, NULL, nvme_to_user_ptr(ubuffer),
bufflen, GFP_KERNEL, flags & NVME_IOCTL_VEC, 0,
0, rq_data_dir(req));
- }
if (ret)
- goto out;
+ return ret;
bio = req->bio;
if (bdev)
@@ -176,8 +161,6 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
out_unmap:
if (bio)
blk_rq_unmap_user(bio);
-out:
- blk_mq_free_request(req);
return ret;
}
@@ -200,9 +183,9 @@ static int nvme_submit_user_cmd(struct request_queue *q,
req->timeout = timeout;
if (ubuffer && bufflen) {
ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer,
- meta_len, NULL, flags, 0);
+ meta_len, NULL, flags);
if (ret)
- return ret;
+ goto out_free_req;
}
bio = req->bio;
@@ -218,7 +201,10 @@ static int nvme_submit_user_cmd(struct request_queue *q,
if (effects)
nvme_passthru_end(ctrl, ns, effects, cmd, ret);
+ return ret;
+out_free_req:
+ blk_mq_free_request(req);
return ret;
}
@@ -469,6 +455,8 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
struct request_queue *q = ns ? ns->queue : ctrl->admin_q;
struct nvme_uring_data d;
struct nvme_command c;
+ struct iov_iter iter;
+ struct iov_iter *map_iter = NULL;
struct request *req;
blk_opf_t rq_flags = REQ_ALLOC_CACHE;
blk_mq_req_flags_t blk_flags = 0;
@@ -504,6 +492,20 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
d.metadata_len = READ_ONCE(cmd->metadata_len);
d.timeout_ms = READ_ONCE(cmd->timeout_ms);
+ if (d.data_len && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
+ /* fixedbufs is only for non-vectored io */
+ if (vec)
+ return -EINVAL;
+
+ ret = io_uring_cmd_import_fixed(d.addr, d.data_len,
+ nvme_is_write(&c) ? WRITE : READ, &iter, ioucmd,
+ issue_flags);
+ if (ret < 0)
+ return ret;
+
+ map_iter = &iter;
+ }
+
if (issue_flags & IO_URING_F_NONBLOCK) {
rq_flags |= REQ_NOWAIT;
blk_flags = BLK_MQ_REQ_NOWAIT;
@@ -517,11 +519,11 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
req->timeout = d.timeout_ms ? msecs_to_jiffies(d.timeout_ms) : 0;
if (d.data_len) {
- ret = nvme_map_user_request(req, d.addr,
- d.data_len, nvme_to_user_ptr(d.metadata),
- d.metadata_len, ioucmd, vec, issue_flags);
+ ret = nvme_map_user_request(req, d.addr, d.data_len,
+ nvme_to_user_ptr(d.metadata), d.metadata_len,
+ map_iter, vec);
if (ret)
- return ret;
+ goto out_free_req;
}
/* to free bio on completion, as req->bio will be null at that time */
@@ -531,6 +533,10 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
req->end_io = nvme_uring_cmd_end_io;
blk_execute_rq_nowait(req, false);
return -EIOCBQUEUED;
+
+out_free_req:
+ blk_mq_free_request(req);
+ return ret;
}
static bool is_ctrl_ioctl(unsigned int cmd)
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 2883d17ee1eb..b178d52eac1b 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -986,6 +986,9 @@ static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct rq_list *rqlist)
{
struct request *req;
+ if (rq_list_empty(rqlist))
+ return;
+
spin_lock(&nvmeq->sq_lock);
while ((req = rq_list_pop(rqlist))) {
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
diff --git a/drivers/nvme/target/debugfs.c b/drivers/nvme/target/debugfs.c
index e4300eb95101..5dcbd5aa86e1 100644
--- a/drivers/nvme/target/debugfs.c
+++ b/drivers/nvme/target/debugfs.c
@@ -78,7 +78,7 @@ static int nvmet_ctrl_state_show(struct seq_file *m, void *p)
bool sep = false;
int i;
- for (i = 0; i < 7; i++) {
+ for (i = 0; i < ARRAY_SIZE(csts_state_names); i++) {
int state = BIT(i);
if (!(ctrl->csts & state))
diff --git a/drivers/nvme/target/pci-epf.c b/drivers/nvme/target/pci-epf.c
index b54b3fdbe389..51c27b32248d 100644
--- a/drivers/nvme/target/pci-epf.c
+++ b/drivers/nvme/target/pci-epf.c
@@ -1264,6 +1264,7 @@ static u16 nvmet_pci_epf_create_cq(struct nvmet_ctrl *tctrl,
struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid];
u16 status;
+ int ret;
if (test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags))
return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
@@ -1298,6 +1299,24 @@ static u16 nvmet_pci_epf_create_cq(struct nvmet_ctrl *tctrl,
if (status != NVME_SC_SUCCESS)
goto err;
+ /*
+ * Map the CQ PCI address space and since PCI endpoint controllers may
+ * return a partial mapping, check that the mapping is large enough.
+ */
+ ret = nvmet_pci_epf_mem_map(ctrl->nvme_epf, cq->pci_addr, cq->pci_size,
+ &cq->pci_map);
+ if (ret) {
+ dev_err(ctrl->dev, "Failed to map CQ %u (err=%d)\n",
+ cq->qid, ret);
+ goto err_internal;
+ }
+
+ if (cq->pci_map.pci_size < cq->pci_size) {
+ dev_err(ctrl->dev, "Invalid partial mapping of queue %u\n",
+ cq->qid);
+ goto err_unmap_queue;
+ }
+
set_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags);
dev_dbg(ctrl->dev, "CQ[%u]: %u entries of %zu B, IRQ vector %u\n",
@@ -1305,6 +1324,10 @@ static u16 nvmet_pci_epf_create_cq(struct nvmet_ctrl *tctrl,
return NVME_SC_SUCCESS;
+err_unmap_queue:
+ nvmet_pci_epf_mem_unmap(ctrl->nvme_epf, &cq->pci_map);
+err_internal:
+ status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
err:
if (test_and_clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags))
nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector);
@@ -1322,6 +1345,7 @@ static u16 nvmet_pci_epf_delete_cq(struct nvmet_ctrl *tctrl, u16 cqid)
cancel_delayed_work_sync(&cq->work);
nvmet_pci_epf_drain_queue(cq);
nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector);
+ nvmet_pci_epf_mem_unmap(ctrl->nvme_epf, &cq->pci_map);
return NVME_SC_SUCCESS;
}
@@ -1553,36 +1577,6 @@ static void nvmet_pci_epf_free_queues(struct nvmet_pci_epf_ctrl *ctrl)
ctrl->cq = NULL;
}
-static int nvmet_pci_epf_map_queue(struct nvmet_pci_epf_ctrl *ctrl,
- struct nvmet_pci_epf_queue *queue)
-{
- struct nvmet_pci_epf *nvme_epf = ctrl->nvme_epf;
- int ret;
-
- ret = nvmet_pci_epf_mem_map(nvme_epf, queue->pci_addr,
- queue->pci_size, &queue->pci_map);
- if (ret) {
- dev_err(ctrl->dev, "Failed to map queue %u (err=%d)\n",
- queue->qid, ret);
- return ret;
- }
-
- if (queue->pci_map.pci_size < queue->pci_size) {
- dev_err(ctrl->dev, "Invalid partial mapping of queue %u\n",
- queue->qid);
- nvmet_pci_epf_mem_unmap(nvme_epf, &queue->pci_map);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static inline void nvmet_pci_epf_unmap_queue(struct nvmet_pci_epf_ctrl *ctrl,
- struct nvmet_pci_epf_queue *queue)
-{
- nvmet_pci_epf_mem_unmap(ctrl->nvme_epf, &queue->pci_map);
-}
-
static void nvmet_pci_epf_exec_iod_work(struct work_struct *work)
{
struct nvmet_pci_epf_iod *iod =
@@ -1746,11 +1740,7 @@ static void nvmet_pci_epf_cq_work(struct work_struct *work)
struct nvme_completion *cqe;
struct nvmet_pci_epf_iod *iod;
unsigned long flags;
- int ret, n = 0;
-
- ret = nvmet_pci_epf_map_queue(ctrl, cq);
- if (ret)
- goto again;
+ int ret = 0, n = 0;
while (test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags) && ctrl->link_up) {
@@ -1797,8 +1787,6 @@ static void nvmet_pci_epf_cq_work(struct work_struct *work)
n++;
}
- nvmet_pci_epf_unmap_queue(ctrl, cq);
-
/*
* We do not support precise IRQ coalescing time (100ns units as per
* NVMe specifications). So if we have posted completion entries without
@@ -1807,7 +1795,6 @@ static void nvmet_pci_epf_cq_work(struct work_struct *work)
if (n)
nvmet_pci_epf_raise_irq(ctrl, cq, true);
-again:
if (ret < 0)
queue_delayed_work(system_highpri_wq, &cq->work,
NVMET_PCI_EPF_CQ_RETRY_INTERVAL);
diff --git a/drivers/platform/x86/gigabyte-wmi.c b/drivers/platform/x86/gigabyte-wmi.c
index f6ba88baee4d..f42c85607a6b 100644
--- a/drivers/platform/x86/gigabyte-wmi.c
+++ b/drivers/platform/x86/gigabyte-wmi.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Copyright (C) 2021 Thomas Weißschuh <thomas@weissschuh.net>
+ * Copyright (C) 2021 Thomas Weißschuh <linux@weissschuh.net>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -159,6 +159,6 @@ static struct wmi_driver gigabyte_wmi_driver = {
module_wmi_driver(gigabyte_wmi_driver);
MODULE_DEVICE_TABLE(wmi, gigabyte_wmi_id_table);
-MODULE_AUTHOR("Thomas Weißschuh <thomas@weissschuh.net>");
+MODULE_AUTHOR("Thomas Weißschuh <linux@weissschuh.net>");
MODULE_DESCRIPTION("Gigabyte WMI temperature driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
index dbcd3087aaa4..31239a93dd71 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
@@ -84,7 +84,7 @@ static DECLARE_HASHTABLE(isst_hash, 8);
static DEFINE_MUTEX(isst_hash_lock);
static int isst_store_new_cmd(int cmd, u32 cpu, int mbox_cmd_type, u32 param,
- u32 data)
+ u64 data)
{
struct isst_cmd *sst_cmd;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 0384cf311878..5790095c175e 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -367,6 +367,7 @@ static struct {
u32 beep_needs_two_args:1;
u32 mixer_no_level_control:1;
u32 battery_force_primary:1;
+ u32 platform_drv_registered:1;
u32 hotkey_poll_active:1;
u32 has_adaptive_kbd:1;
u32 kbd_lang:1;
@@ -8793,6 +8794,7 @@ static const struct attribute_group fan_driver_attr_group = {
#define TPACPI_FAN_NS 0x0010 /* For EC with non-Standard register addresses */
#define TPACPI_FAN_DECRPM 0x0020 /* For ECFW's with RPM in register as decimal */
#define TPACPI_FAN_TPR 0x0040 /* Fan speed is in Ticks Per Revolution */
+#define TPACPI_FAN_NOACPI 0x0080 /* Don't use ACPI methods even if detected */
static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
TPACPI_QEC_IBM('1', 'Y', TPACPI_FAN_Q1),
@@ -8823,6 +8825,9 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
TPACPI_Q_LNV3('N', '1', 'O', TPACPI_FAN_NOFAN), /* X1 Tablet (2nd gen) */
TPACPI_Q_LNV3('R', '0', 'Q', TPACPI_FAN_DECRPM),/* L480 */
TPACPI_Q_LNV('8', 'F', TPACPI_FAN_TPR), /* ThinkPad x120e */
+ TPACPI_Q_LNV3('R', '0', '0', TPACPI_FAN_NOACPI),/* E560 */
+ TPACPI_Q_LNV3('R', '1', '2', TPACPI_FAN_NOACPI),/* T495 */
+ TPACPI_Q_LNV3('R', '1', '3', TPACPI_FAN_NOACPI),/* T495s */
};
static int __init fan_init(struct ibm_init_struct *iibm)
@@ -8874,6 +8879,13 @@ static int __init fan_init(struct ibm_init_struct *iibm)
tp_features.fan_ctrl_status_undef = 1;
}
+ if (quirks & TPACPI_FAN_NOACPI) {
+ /* E560, T495, T495s */
+ pr_info("Ignoring buggy ACPI fan access method\n");
+ fang_handle = NULL;
+ fanw_handle = NULL;
+ }
+
if (gfan_handle) {
/* 570, 600e/x, 770e, 770x */
fan_status_access_mode = TPACPI_FAN_RD_ACPI_GFAN;
@@ -11820,10 +11832,10 @@ static void thinkpad_acpi_module_exit(void)
platform_device_unregister(tpacpi_sensors_pdev);
}
- if (tpacpi_pdev) {
+ if (tp_features.platform_drv_registered)
platform_driver_unregister(&tpacpi_pdriver);
+ if (tpacpi_pdev)
platform_device_unregister(tpacpi_pdev);
- }
if (proc_dir)
remove_proc_entry(TPACPI_PROC_DIR, acpi_root_dir);
@@ -11893,9 +11905,8 @@ static int __init tpacpi_pdriver_probe(struct platform_device *pdev)
static int __init tpacpi_hwmon_pdriver_probe(struct platform_device *pdev)
{
- tpacpi_hwmon = devm_hwmon_device_register_with_groups(
- &tpacpi_sensors_pdev->dev, TPACPI_NAME, NULL, tpacpi_hwmon_groups);
-
+ tpacpi_hwmon = devm_hwmon_device_register_with_groups(&pdev->dev, TPACPI_NAME,
+ NULL, tpacpi_hwmon_groups);
if (IS_ERR(tpacpi_hwmon))
pr_err("unable to register hwmon device\n");
@@ -11965,15 +11976,23 @@ static int __init thinkpad_acpi_module_init(void)
tp_features.quirks = dmi_id->driver_data;
/* Device initialization */
- tpacpi_pdev = platform_create_bundle(&tpacpi_pdriver, tpacpi_pdriver_probe,
- NULL, 0, NULL, 0);
+ tpacpi_pdev = platform_device_register_simple(TPACPI_DRVR_NAME, PLATFORM_DEVID_NONE,
+ NULL, 0);
if (IS_ERR(tpacpi_pdev)) {
ret = PTR_ERR(tpacpi_pdev);
tpacpi_pdev = NULL;
- pr_err("unable to register platform device/driver bundle\n");
+ pr_err("unable to register platform device\n");
+ thinkpad_acpi_module_exit();
+ return ret;
+ }
+
+ ret = platform_driver_probe(&tpacpi_pdriver, tpacpi_pdriver_probe);
+ if (ret) {
+ pr_err("unable to register main platform driver\n");
thinkpad_acpi_module_exit();
return ret;
}
+ tp_features.platform_drv_registered = 1;
tpacpi_sensors_pdev = platform_create_bundle(&tpacpi_hwmon_pdriver,
tpacpi_hwmon_pdriver_probe,
diff --git a/drivers/pnp/isapnp/core.c b/drivers/pnp/isapnp/core.c
index c43d8ad02529..d2ff76e74a05 100644
--- a/drivers/pnp/isapnp/core.c
+++ b/drivers/pnp/isapnp/core.c
@@ -843,6 +843,7 @@ EXPORT_SYMBOL(isapnp_protocol);
EXPORT_SYMBOL(isapnp_present);
EXPORT_SYMBOL(isapnp_cfg_begin);
EXPORT_SYMBOL(isapnp_cfg_end);
+EXPORT_SYMBOL(isapnp_read_byte);
EXPORT_SYMBOL(isapnp_write_byte);
static int isapnp_get_resources(struct pnp_dev *dev)
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index 7d82bd1b36df..1e8142479656 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -270,8 +270,8 @@ static const unsigned int rk817_buck1_4_ramp_table[] = {
static int rk806_set_mode_dcdc(struct regulator_dev *rdev, unsigned int mode)
{
- int rid = rdev_get_id(rdev);
- int ctr_bit, reg;
+ unsigned int rid = rdev_get_id(rdev);
+ unsigned int ctr_bit, reg;
reg = RK806_POWER_FPWM_EN0 + rid / 8;
ctr_bit = rid % 8;
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 0bbbf778ecfa..838bdc138ffe 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1321,13 +1321,6 @@ config RTC_DRV_SPEAR
If you say Y here you will get support for the RTC found on
spear
-config RTC_DRV_PCF50633
- depends on MFD_PCF50633
- tristate "NXP PCF50633 RTC"
- help
- If you say yes here you get support for the RTC subsystem of the
- NXP PCF50633 used in embedded systems.
-
config RTC_DRV_AB8500
tristate "ST-Ericsson AB8500 RTC"
depends on AB8500_CORE
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 489b4ab07068..31473b3276d9 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -126,7 +126,6 @@ obj-$(CONFIG_RTC_DRV_PALMAS) += rtc-palmas.o
obj-$(CONFIG_RTC_DRV_PCAP) += rtc-pcap.o
obj-$(CONFIG_RTC_DRV_PCF2123) += rtc-pcf2123.o
obj-$(CONFIG_RTC_DRV_PCF2127) += rtc-pcf2127.o
-obj-$(CONFIG_RTC_DRV_PCF50633) += rtc-pcf50633.o
obj-$(CONFIG_RTC_DRV_PCF85063) += rtc-pcf85063.o
obj-$(CONFIG_RTC_DRV_PCF8523) += rtc-pcf8523.o
obj-$(CONFIG_RTC_DRV_PCF85363) += rtc-pcf85363.o
diff --git a/drivers/rtc/rtc-ab-eoz9.c b/drivers/rtc/rtc-ab-eoz9.c
index d2b60487d462..de002f7a39bf 100644
--- a/drivers/rtc/rtc-ab-eoz9.c
+++ b/drivers/rtc/rtc-ab-eoz9.c
@@ -426,29 +426,9 @@ static umode_t abeoz9_is_visible(const void *data,
}
}
-static const u32 abeoz9_chip_config[] = {
- HWMON_C_REGISTER_TZ,
- 0
-};
-
-static const struct hwmon_channel_info abeoz9_chip = {
- .type = hwmon_chip,
- .config = abeoz9_chip_config,
-};
-
-static const u32 abeoz9_temp_config[] = {
- HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN,
- 0
-};
-
-static const struct hwmon_channel_info abeoz9_temp = {
- .type = hwmon_temp,
- .config = abeoz9_temp_config,
-};
-
static const struct hwmon_channel_info * const abeoz9_info[] = {
- &abeoz9_chip,
- &abeoz9_temp,
+ HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN),
NULL
};
diff --git a/drivers/rtc/rtc-ab8500.c b/drivers/rtc/rtc-ab8500.c
index 2dcda96f4a8e..ed2b6b8bb3bf 100644
--- a/drivers/rtc/rtc-ab8500.c
+++ b/drivers/rtc/rtc-ab8500.c
@@ -361,7 +361,7 @@ static int ab8500_rtc_probe(struct platform_device *pdev)
return -ENODEV;
}
- device_init_wakeup(&pdev->dev, true);
+ devm_device_init_wakeup(&pdev->dev);
rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(rtc))
@@ -375,7 +375,7 @@ static int ab8500_rtc_probe(struct platform_device *pdev)
if (err < 0)
return err;
- dev_pm_set_wake_irq(&pdev->dev, irq);
+ devm_pm_set_wake_irq(&pdev->dev, irq);
platform_set_drvdata(pdev, rtc);
set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rtc->features);
@@ -392,18 +392,11 @@ static int ab8500_rtc_probe(struct platform_device *pdev)
return devm_rtc_register_device(rtc);
}
-static void ab8500_rtc_remove(struct platform_device *pdev)
-{
- dev_pm_clear_wake_irq(&pdev->dev);
- device_init_wakeup(&pdev->dev, false);
-}
-
static struct platform_driver ab8500_rtc_driver = {
.driver = {
.name = "ab8500-rtc",
},
.probe = ab8500_rtc_probe,
- .remove = ab8500_rtc_remove,
.id_table = ab85xx_rtc_ids,
};
diff --git a/drivers/rtc/rtc-aspeed.c b/drivers/rtc/rtc-aspeed.c
index 880b015eebaf..0d0053b52f9b 100644
--- a/drivers/rtc/rtc-aspeed.c
+++ b/drivers/rtc/rtc-aspeed.c
@@ -8,7 +8,6 @@
#include <linux/io.h>
struct aspeed_rtc {
- struct rtc_device *rtc_dev;
void __iomem *base;
};
@@ -85,6 +84,7 @@ static const struct rtc_class_ops aspeed_rtc_ops = {
static int aspeed_rtc_probe(struct platform_device *pdev)
{
struct aspeed_rtc *rtc;
+ struct rtc_device *rtc_dev;
rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
if (!rtc)
@@ -94,17 +94,17 @@ static int aspeed_rtc_probe(struct platform_device *pdev)
if (IS_ERR(rtc->base))
return PTR_ERR(rtc->base);
- rtc->rtc_dev = devm_rtc_allocate_device(&pdev->dev);
- if (IS_ERR(rtc->rtc_dev))
- return PTR_ERR(rtc->rtc_dev);
+ rtc_dev = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(rtc_dev))
+ return PTR_ERR(rtc_dev);
platform_set_drvdata(pdev, rtc);
- rtc->rtc_dev->ops = &aspeed_rtc_ops;
- rtc->rtc_dev->range_min = RTC_TIMESTAMP_BEGIN_1900;
- rtc->rtc_dev->range_max = 38814989399LL; /* 3199-12-31 23:59:59 */
+ rtc_dev->ops = &aspeed_rtc_ops;
+ rtc_dev->range_min = RTC_TIMESTAMP_BEGIN_1900;
+ rtc_dev->range_max = 38814989399LL; /* 3199-12-31 23:59:59 */
- return devm_rtc_register_device(rtc->rtc_dev);
+ return devm_rtc_register_device(rtc_dev);
}
static const struct of_device_id aspeed_rtc_match[] = {
diff --git a/drivers/rtc/rtc-cros-ec.c b/drivers/rtc/rtc-cros-ec.c
index 865c2e82c7a5..e956505a06fb 100644
--- a/drivers/rtc/rtc-cros-ec.c
+++ b/drivers/rtc/rtc-cros-ec.c
@@ -35,21 +35,18 @@ struct cros_ec_rtc {
static int cros_ec_rtc_get(struct cros_ec_device *cros_ec, u32 command,
u32 *response)
{
+ DEFINE_RAW_FLEX(struct cros_ec_command, msg, data,
+ sizeof(struct ec_response_rtc));
int ret;
- struct {
- struct cros_ec_command msg;
- struct ec_response_rtc data;
- } __packed msg;
- memset(&msg, 0, sizeof(msg));
- msg.msg.command = command;
- msg.msg.insize = sizeof(msg.data);
+ msg->command = command;
+ msg->insize = sizeof(struct ec_response_rtc);
- ret = cros_ec_cmd_xfer_status(cros_ec, &msg.msg);
+ ret = cros_ec_cmd_xfer_status(cros_ec, msg);
if (ret < 0)
return ret;
- *response = msg.data.time;
+ *response = ((struct ec_response_rtc *)msg->data)->time;
return 0;
}
@@ -57,18 +54,15 @@ static int cros_ec_rtc_get(struct cros_ec_device *cros_ec, u32 command,
static int cros_ec_rtc_set(struct cros_ec_device *cros_ec, u32 command,
u32 param)
{
+ DEFINE_RAW_FLEX(struct cros_ec_command, msg, data,
+ sizeof(struct ec_response_rtc));
int ret;
- struct {
- struct cros_ec_command msg;
- struct ec_response_rtc data;
- } __packed msg;
- memset(&msg, 0, sizeof(msg));
- msg.msg.command = command;
- msg.msg.outsize = sizeof(msg.data);
- msg.data.time = param;
+ msg->command = command;
+ msg->outsize = sizeof(struct ec_response_rtc);
+ ((struct ec_response_rtc *)msg->data)->time = param;
- ret = cros_ec_cmd_xfer_status(cros_ec, &msg.msg);
+ ret = cros_ec_cmd_xfer_status(cros_ec, msg);
if (ret < 0)
return ret;
return 0;
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 872e0b679be4..5efbe69bf5ca 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -1807,10 +1807,8 @@ static int ds1307_probe(struct i2c_client *client)
* For some variants, be sure alarms can trigger when we're
* running on Vbackup (BBSQI/BBSQW)
*/
- if (want_irq || ds1307_can_wakeup_device) {
+ if (want_irq || ds1307_can_wakeup_device)
regs[0] |= DS1337_BIT_INTCN | chip->bbsqi_bit;
- regs[0] &= ~(DS1337_BIT_A2IE | DS1337_BIT_A1IE);
- }
regmap_write(ds1307->regmap, DS1337_REG_CONTROL,
regs[0]);
diff --git a/drivers/rtc/rtc-ds1343.c b/drivers/rtc/rtc-ds1343.c
index ed5a6ba89a3e..aa9500791b7e 100644
--- a/drivers/rtc/rtc-ds1343.c
+++ b/drivers/rtc/rtc-ds1343.c
@@ -427,18 +427,13 @@ static int ds1343_probe(struct spi_device *spi)
"unable to request irq for rtc ds1343\n");
} else {
device_init_wakeup(&spi->dev, true);
- dev_pm_set_wake_irq(&spi->dev, spi->irq);
+ devm_pm_set_wake_irq(&spi->dev, spi->irq);
}
}
return 0;
}
-static void ds1343_remove(struct spi_device *spi)
-{
- dev_pm_clear_wake_irq(&spi->dev);
-}
-
#ifdef CONFIG_PM_SLEEP
static int ds1343_suspend(struct device *dev)
@@ -471,7 +466,6 @@ static struct spi_driver ds1343_driver = {
.pm = &ds1343_pm,
},
.probe = ds1343_probe,
- .remove = ds1343_remove,
.id_table = ds1343_id,
};
diff --git a/drivers/rtc/rtc-ds2404.c b/drivers/rtc/rtc-ds2404.c
index 3231fd9f61da..217694eca36c 100644
--- a/drivers/rtc/rtc-ds2404.c
+++ b/drivers/rtc/rtc-ds2404.c
@@ -31,7 +31,6 @@ struct ds2404 {
struct gpio_desc *rst_gpiod;
struct gpio_desc *clk_gpiod;
struct gpio_desc *dq_gpiod;
- struct rtc_device *rtc;
};
static int ds2404_gpio_map(struct ds2404 *chip, struct platform_device *pdev)
@@ -182,6 +181,7 @@ static const struct rtc_class_ops ds2404_rtc_ops = {
static int rtc_probe(struct platform_device *pdev)
{
struct ds2404 *chip;
+ struct rtc_device *rtc;
int retval = -EBUSY;
chip = devm_kzalloc(&pdev->dev, sizeof(struct ds2404), GFP_KERNEL);
@@ -190,9 +190,9 @@ static int rtc_probe(struct platform_device *pdev)
chip->dev = &pdev->dev;
- chip->rtc = devm_rtc_allocate_device(&pdev->dev);
- if (IS_ERR(chip->rtc))
- return PTR_ERR(chip->rtc);
+ rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
retval = ds2404_gpio_map(chip, pdev);
if (retval)
@@ -200,10 +200,10 @@ static int rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, chip);
- chip->rtc->ops = &ds2404_rtc_ops;
- chip->rtc->range_max = U32_MAX;
+ rtc->ops = &ds2404_rtc_ops;
+ rtc->range_max = U32_MAX;
- retval = devm_rtc_register_device(chip->rtc);
+ retval = devm_rtc_register_device(rtc);
if (retval)
return retval;
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index 19c09c418746..18f35823b4b5 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -339,29 +339,9 @@ static int ds3232_hwmon_read(struct device *dev,
return err;
}
-static u32 ds3232_hwmon_chip_config[] = {
- HWMON_C_REGISTER_TZ,
- 0
-};
-
-static const struct hwmon_channel_info ds3232_hwmon_chip = {
- .type = hwmon_chip,
- .config = ds3232_hwmon_chip_config,
-};
-
-static u32 ds3232_hwmon_temp_config[] = {
- HWMON_T_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info ds3232_hwmon_temp = {
- .type = hwmon_temp,
- .config = ds3232_hwmon_temp_config,
-};
-
static const struct hwmon_channel_info * const ds3232_hwmon_info[] = {
- &ds3232_hwmon_chip,
- &ds3232_hwmon_temp,
+ HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
NULL
};
diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c
index 1fdd20d01560..dcdcdd06f30d 100644
--- a/drivers/rtc/rtc-ep93xx.c
+++ b/drivers/rtc/rtc-ep93xx.c
@@ -28,7 +28,6 @@
struct ep93xx_rtc {
void __iomem *mmio_base;
- struct rtc_device *rtc;
};
static int ep93xx_rtc_get_swcomp(struct device *dev, unsigned short *preload,
@@ -123,6 +122,7 @@ static const struct attribute_group ep93xx_rtc_sysfs_files = {
static int ep93xx_rtc_probe(struct platform_device *pdev)
{
struct ep93xx_rtc *ep93xx_rtc;
+ struct rtc_device *rtc;
int err;
ep93xx_rtc = devm_kzalloc(&pdev->dev, sizeof(*ep93xx_rtc), GFP_KERNEL);
@@ -135,18 +135,18 @@ static int ep93xx_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ep93xx_rtc);
- ep93xx_rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
- if (IS_ERR(ep93xx_rtc->rtc))
- return PTR_ERR(ep93xx_rtc->rtc);
+ rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
- ep93xx_rtc->rtc->ops = &ep93xx_rtc_ops;
- ep93xx_rtc->rtc->range_max = U32_MAX;
+ rtc->ops = &ep93xx_rtc_ops;
+ rtc->range_max = U32_MAX;
- err = rtc_add_group(ep93xx_rtc->rtc, &ep93xx_rtc_sysfs_files);
+ err = rtc_add_group(rtc, &ep93xx_rtc_sysfs_files);
if (err)
return err;
- return devm_rtc_register_device(ep93xx_rtc->rtc);
+ return devm_rtc_register_device(rtc);
}
static const struct of_device_id ep93xx_rtc_of_ids[] = {
diff --git a/drivers/rtc/rtc-fsl-ftm-alarm.c b/drivers/rtc/rtc-fsl-ftm-alarm.c
index a72c4ad0cec6..c8015f04c71f 100644
--- a/drivers/rtc/rtc-fsl-ftm-alarm.c
+++ b/drivers/rtc/rtc-fsl-ftm-alarm.c
@@ -309,7 +309,7 @@ static const struct of_device_id ftm_rtc_match[] = {
};
MODULE_DEVICE_TABLE(of, ftm_rtc_match);
-static const struct acpi_device_id ftm_imx_acpi_ids[] = {
+static const struct acpi_device_id ftm_imx_acpi_ids[] __maybe_unused = {
{"NXP0014",},
{ }
};
diff --git a/drivers/rtc/rtc-ftrtc010.c b/drivers/rtc/rtc-ftrtc010.c
index cb4a5d101f53..02608d378495 100644
--- a/drivers/rtc/rtc-ftrtc010.c
+++ b/drivers/rtc/rtc-ftrtc010.c
@@ -28,7 +28,6 @@ MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
struct ftrtc010_rtc {
- struct rtc_device *rtc_dev;
void __iomem *rtc_base;
int rtc_irq;
struct clk *pclk;
@@ -113,6 +112,7 @@ static int ftrtc010_rtc_probe(struct platform_device *pdev)
struct ftrtc010_rtc *rtc;
struct device *dev = &pdev->dev;
struct resource *res;
+ struct rtc_device *rtc_dev;
int ret;
rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
@@ -160,29 +160,28 @@ static int ftrtc010_rtc_probe(struct platform_device *pdev)
goto err_disable_extclk;
}
- rtc->rtc_dev = devm_rtc_allocate_device(dev);
- if (IS_ERR(rtc->rtc_dev)) {
- ret = PTR_ERR(rtc->rtc_dev);
+ rtc_dev = devm_rtc_allocate_device(dev);
+ if (IS_ERR(rtc_dev)) {
+ ret = PTR_ERR(rtc_dev);
goto err_disable_extclk;
}
- rtc->rtc_dev->ops = &ftrtc010_rtc_ops;
+ rtc_dev->ops = &ftrtc010_rtc_ops;
sec = readl(rtc->rtc_base + FTRTC010_RTC_SECOND);
min = readl(rtc->rtc_base + FTRTC010_RTC_MINUTE);
hour = readl(rtc->rtc_base + FTRTC010_RTC_HOUR);
days = readl(rtc->rtc_base + FTRTC010_RTC_DAYS);
- rtc->rtc_dev->range_min = (u64)days * 86400 + hour * 3600 +
- min * 60 + sec;
- rtc->rtc_dev->range_max = U32_MAX + rtc->rtc_dev->range_min;
+ rtc_dev->range_min = (u64)days * 86400 + hour * 3600 + min * 60 + sec;
+ rtc_dev->range_max = U32_MAX + rtc_dev->range_min;
ret = devm_request_irq(dev, rtc->rtc_irq, ftrtc010_rtc_interrupt,
IRQF_SHARED, pdev->name, dev);
if (unlikely(ret))
goto err_disable_extclk;
- return devm_rtc_register_device(rtc->rtc_dev);
+ return devm_rtc_register_device(rtc_dev);
err_disable_extclk:
clk_disable_unprepare(rtc->extclk);
diff --git a/drivers/rtc/rtc-m48t86.c b/drivers/rtc/rtc-m48t86.c
index dd4a62e2d39c..10cd054fe86f 100644
--- a/drivers/rtc/rtc-m48t86.c
+++ b/drivers/rtc/rtc-m48t86.c
@@ -41,7 +41,6 @@
struct m48t86_rtc_info {
void __iomem *index_reg;
void __iomem *data_reg;
- struct rtc_device *rtc;
};
static unsigned char m48t86_readb(struct device *dev, unsigned long addr)
@@ -219,6 +218,7 @@ static bool m48t86_verify_chip(struct platform_device *pdev)
static int m48t86_rtc_probe(struct platform_device *pdev)
{
struct m48t86_rtc_info *info;
+ struct rtc_device *rtc;
unsigned char reg;
int err;
struct nvmem_config m48t86_nvmem_cfg = {
@@ -250,17 +250,17 @@ static int m48t86_rtc_probe(struct platform_device *pdev)
return -ENODEV;
}
- info->rtc = devm_rtc_allocate_device(&pdev->dev);
- if (IS_ERR(info->rtc))
- return PTR_ERR(info->rtc);
+ rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
- info->rtc->ops = &m48t86_rtc_ops;
+ rtc->ops = &m48t86_rtc_ops;
- err = devm_rtc_register_device(info->rtc);
+ err = devm_rtc_register_device(rtc);
if (err)
return err;
- devm_rtc_nvmem_register(info->rtc, &m48t86_nvmem_cfg);
+ devm_rtc_nvmem_register(rtc, &m48t86_nvmem_cfg);
/* read battery status */
reg = m48t86_readb(&pdev->dev, M48T86_D);
diff --git a/drivers/rtc/rtc-max31335.c b/drivers/rtc/rtc-max31335.c
index 3fbcf5f6b92f..a7bb37aaab9e 100644
--- a/drivers/rtc/rtc-max31335.c
+++ b/drivers/rtc/rtc-max31335.c
@@ -184,31 +184,91 @@
#define MAX31335_RAM_SIZE 32
#define MAX31335_TIME_SIZE 0x07
+/* MAX31331 Register Map */
+#define MAX31331_RTC_CONFIG2 0x04
+
#define clk_hw_to_max31335(_hw) container_of(_hw, struct max31335_data, clkout)
+/* Supported Maxim RTC */
+enum max_rtc_ids {
+ ID_MAX31331,
+ ID_MAX31335,
+ MAX_RTC_ID_NR
+};
+
+struct chip_desc {
+ u8 sec_reg;
+ u8 alarm1_sec_reg;
+
+ u8 int_en_reg;
+ u8 int_status_reg;
+
+ u8 ram_reg;
+ u8 ram_size;
+
+ u8 temp_reg;
+
+ u8 trickle_reg;
+
+ u8 clkout_reg;
+
+ enum max_rtc_ids id;
+};
+
struct max31335_data {
struct regmap *regmap;
struct rtc_device *rtc;
struct clk_hw clkout;
+ struct clk *clkin;
+ const struct chip_desc *chip;
+ int irq;
};
static const int max31335_clkout_freq[] = { 1, 64, 1024, 32768 };
+static const struct chip_desc chip[MAX_RTC_ID_NR] = {
+ [ID_MAX31331] = {
+ .id = ID_MAX31331,
+ .int_en_reg = 0x01,
+ .int_status_reg = 0x00,
+ .sec_reg = 0x08,
+ .alarm1_sec_reg = 0x0F,
+ .ram_reg = 0x20,
+ .ram_size = 32,
+ .trickle_reg = 0x1B,
+ .clkout_reg = 0x04,
+ },
+ [ID_MAX31335] = {
+ .id = ID_MAX31335,
+ .int_en_reg = 0x01,
+ .int_status_reg = 0x00,
+ .sec_reg = 0x0A,
+ .alarm1_sec_reg = 0x11,
+ .ram_reg = 0x40,
+ .ram_size = 32,
+ .temp_reg = 0x35,
+ .trickle_reg = 0x1D,
+ .clkout_reg = 0x06,
+ },
+};
+
static const u16 max31335_trickle_resistors[] = {3000, 6000, 11000};
static bool max31335_volatile_reg(struct device *dev, unsigned int reg)
{
+ struct max31335_data *max31335 = dev_get_drvdata(dev);
+ const struct chip_desc *chip = max31335->chip;
+
/* time keeping registers */
- if (reg >= MAX31335_SECONDS &&
- reg < MAX31335_SECONDS + MAX31335_TIME_SIZE)
+ if (reg >= chip->sec_reg && reg < chip->sec_reg + MAX31335_TIME_SIZE)
return true;
/* interrupt status register */
- if (reg == MAX31335_STATUS1)
+ if (reg == chip->int_status_reg)
return true;
- /* temperature registers */
- if (reg == MAX31335_TEMP_DATA_MSB || reg == MAX31335_TEMP_DATA_LSB)
+ /* temperature registers if valid */
+ if (chip->temp_reg && (reg == chip->temp_reg || reg == chip->temp_reg + 1))
return true;
return false;
@@ -227,7 +287,7 @@ static int max31335_read_time(struct device *dev, struct rtc_time *tm)
u8 date[7];
int ret;
- ret = regmap_bulk_read(max31335->regmap, MAX31335_SECONDS, date,
+ ret = regmap_bulk_read(max31335->regmap, max31335->chip->sec_reg, date,
sizeof(date));
if (ret)
return ret;
@@ -262,7 +322,7 @@ static int max31335_set_time(struct device *dev, struct rtc_time *tm)
if (tm->tm_year >= 200)
date[5] |= FIELD_PREP(MAX31335_MONTH_CENTURY, 1);
- return regmap_bulk_write(max31335->regmap, MAX31335_SECONDS, date,
+ return regmap_bulk_write(max31335->regmap, max31335->chip->sec_reg, date,
sizeof(date));
}
@@ -273,7 +333,7 @@ static int max31335_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
struct rtc_time time;
u8 regs[6];
- ret = regmap_bulk_read(max31335->regmap, MAX31335_ALM1_SEC, regs,
+ ret = regmap_bulk_read(max31335->regmap, max31335->chip->alarm1_sec_reg, regs,
sizeof(regs));
if (ret)
return ret;
@@ -292,11 +352,11 @@ static int max31335_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
if (time.tm_year >= 200)
alrm->time.tm_year += 100;
- ret = regmap_read(max31335->regmap, MAX31335_INT_EN1, &ctrl);
+ ret = regmap_read(max31335->regmap, max31335->chip->int_en_reg, &ctrl);
if (ret)
return ret;
- ret = regmap_read(max31335->regmap, MAX31335_STATUS1, &status);
+ ret = regmap_read(max31335->regmap, max31335->chip->int_status_reg, &status);
if (ret)
return ret;
@@ -320,18 +380,18 @@ static int max31335_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
regs[4] = bin2bcd(alrm->time.tm_mon + 1);
regs[5] = bin2bcd(alrm->time.tm_year % 100);
- ret = regmap_bulk_write(max31335->regmap, MAX31335_ALM1_SEC,
+ ret = regmap_bulk_write(max31335->regmap, max31335->chip->alarm1_sec_reg,
regs, sizeof(regs));
if (ret)
return ret;
reg = FIELD_PREP(MAX31335_INT_EN1_A1IE, alrm->enabled);
- ret = regmap_update_bits(max31335->regmap, MAX31335_INT_EN1,
+ ret = regmap_update_bits(max31335->regmap, max31335->chip->int_en_reg,
MAX31335_INT_EN1_A1IE, reg);
if (ret)
return ret;
- ret = regmap_update_bits(max31335->regmap, MAX31335_STATUS1,
+ ret = regmap_update_bits(max31335->regmap, max31335->chip->int_status_reg,
MAX31335_STATUS1_A1F, 0);
return 0;
@@ -341,23 +401,33 @@ static int max31335_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct max31335_data *max31335 = dev_get_drvdata(dev);
- return regmap_update_bits(max31335->regmap, MAX31335_INT_EN1,
+ return regmap_update_bits(max31335->regmap, max31335->chip->int_en_reg,
MAX31335_INT_EN1_A1IE, enabled);
}
static irqreturn_t max31335_handle_irq(int irq, void *dev_id)
{
struct max31335_data *max31335 = dev_id;
- bool status;
- int ret;
+ struct mutex *lock = &max31335->rtc->ops_lock;
+ int ret, status;
- ret = regmap_update_bits_check(max31335->regmap, MAX31335_STATUS1,
- MAX31335_STATUS1_A1F, 0, &status);
+ mutex_lock(lock);
+
+ ret = regmap_read(max31335->regmap, max31335->chip->int_status_reg, &status);
if (ret)
- return IRQ_HANDLED;
+ goto exit;
+
+ if (FIELD_GET(MAX31335_STATUS1_A1F, status)) {
+ ret = regmap_update_bits(max31335->regmap, max31335->chip->int_status_reg,
+ MAX31335_STATUS1_A1F, 0);
+ if (ret)
+ goto exit;
- if (status)
rtc_update_irq(max31335->rtc, 1, RTC_AF | RTC_IRQF);
+ }
+
+exit:
+ mutex_unlock(lock);
return IRQ_HANDLED;
}
@@ -404,7 +474,7 @@ static int max31335_trickle_charger_setup(struct device *dev,
i = i + trickle_cfg;
- return regmap_write(max31335->regmap, MAX31335_TRICKLE_REG,
+ return regmap_write(max31335->regmap, max31335->chip->trickle_reg,
FIELD_PREP(MAX31335_TRICKLE_REG_TRICKLE, i) |
FIELD_PREP(MAX31335_TRICKLE_REG_EN_TRICKLE,
chargeable));
@@ -418,7 +488,7 @@ static unsigned long max31335_clkout_recalc_rate(struct clk_hw *hw,
unsigned int reg;
int ret;
- ret = regmap_read(max31335->regmap, MAX31335_RTC_CONFIG2, &reg);
+ ret = regmap_read(max31335->regmap, max31335->chip->clkout_reg, &reg);
if (ret)
return 0;
@@ -449,23 +519,23 @@ static int max31335_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
ARRAY_SIZE(max31335_clkout_freq));
freq_mask = __roundup_pow_of_two(ARRAY_SIZE(max31335_clkout_freq)) - 1;
- return regmap_update_bits(max31335->regmap, MAX31335_RTC_CONFIG2,
- freq_mask, index);
+ return regmap_update_bits(max31335->regmap, max31335->chip->clkout_reg,
+ freq_mask, index);
}
static int max31335_clkout_enable(struct clk_hw *hw)
{
struct max31335_data *max31335 = clk_hw_to_max31335(hw);
- return regmap_set_bits(max31335->regmap, MAX31335_RTC_CONFIG2,
- MAX31335_RTC_CONFIG2_ENCLKO);
+ return regmap_set_bits(max31335->regmap, max31335->chip->clkout_reg,
+ MAX31335_RTC_CONFIG2_ENCLKO);
}
static void max31335_clkout_disable(struct clk_hw *hw)
{
struct max31335_data *max31335 = clk_hw_to_max31335(hw);
- regmap_clear_bits(max31335->regmap, MAX31335_RTC_CONFIG2,
+ regmap_clear_bits(max31335->regmap, max31335->chip->clkout_reg,
MAX31335_RTC_CONFIG2_ENCLKO);
}
@@ -475,7 +545,7 @@ static int max31335_clkout_is_enabled(struct clk_hw *hw)
unsigned int reg;
int ret;
- ret = regmap_read(max31335->regmap, MAX31335_RTC_CONFIG2, &reg);
+ ret = regmap_read(max31335->regmap, max31335->chip->clkout_reg, &reg);
if (ret)
return ret;
@@ -500,7 +570,7 @@ static int max31335_nvmem_reg_read(void *priv, unsigned int offset,
void *val, size_t bytes)
{
struct max31335_data *max31335 = priv;
- unsigned int reg = MAX31335_TS0_SEC_1_128 + offset;
+ unsigned int reg = max31335->chip->ram_reg + offset;
return regmap_bulk_read(max31335->regmap, reg, val, bytes);
}
@@ -509,7 +579,7 @@ static int max31335_nvmem_reg_write(void *priv, unsigned int offset,
void *val, size_t bytes)
{
struct max31335_data *max31335 = priv;
- unsigned int reg = MAX31335_TS0_SEC_1_128 + offset;
+ unsigned int reg = max31335->chip->ram_reg + offset;
return regmap_bulk_write(max31335->regmap, reg, val, bytes);
}
@@ -533,7 +603,7 @@ static int max31335_read_temp(struct device *dev, enum hwmon_sensor_types type,
if (type != hwmon_temp || attr != hwmon_temp_input)
return -EOPNOTSUPP;
- ret = regmap_bulk_read(max31335->regmap, MAX31335_TEMP_DATA_MSB,
+ ret = regmap_bulk_read(max31335->regmap, max31335->chip->temp_reg,
reg, 2);
if (ret)
return ret;
@@ -577,8 +647,8 @@ static int max31335_clkout_register(struct device *dev)
int ret;
if (!device_property_present(dev, "#clock-cells"))
- return regmap_clear_bits(max31335->regmap, MAX31335_RTC_CONFIG2,
- MAX31335_RTC_CONFIG2_ENCLKO);
+ return regmap_clear_bits(max31335->regmap, max31335->chip->clkout_reg,
+ MAX31335_RTC_CONFIG2_ENCLKO);
max31335->clkout.init = &max31335_clk_init;
@@ -605,6 +675,7 @@ static int max31335_probe(struct i2c_client *client)
#if IS_REACHABLE(HWMON)
struct device *hwmon;
#endif
+ const struct chip_desc *match;
int ret;
max31335 = devm_kzalloc(&client->dev, sizeof(*max31335), GFP_KERNEL);
@@ -616,7 +687,10 @@ static int max31335_probe(struct i2c_client *client)
return PTR_ERR(max31335->regmap);
i2c_set_clientdata(client, max31335);
-
+ match = i2c_get_match_data(client);
+ if (!match)
+ return -ENODEV;
+ max31335->chip = match;
max31335->rtc = devm_rtc_allocate_device(&client->dev);
if (IS_ERR(max31335->rtc))
return PTR_ERR(max31335->rtc);
@@ -639,6 +713,8 @@ static int max31335_probe(struct i2c_client *client)
dev_warn(&client->dev,
"unable to request IRQ, alarm max31335 disabled\n");
client->irq = 0;
+ } else {
+ max31335->irq = client->irq;
}
}
@@ -652,13 +728,13 @@ static int max31335_probe(struct i2c_client *client)
"cannot register rtc nvmem\n");
#if IS_REACHABLE(HWMON)
- hwmon = devm_hwmon_device_register_with_info(&client->dev, client->name,
- max31335,
- &max31335_chip_info,
- NULL);
- if (IS_ERR(hwmon))
- return dev_err_probe(&client->dev, PTR_ERR(hwmon),
- "cannot register hwmon device\n");
+ if (max31335->chip->temp_reg) {
+ hwmon = devm_hwmon_device_register_with_info(&client->dev, client->name, max31335,
+ &max31335_chip_info, NULL);
+ if (IS_ERR(hwmon))
+ return dev_err_probe(&client->dev, PTR_ERR(hwmon),
+ "cannot register hwmon device\n");
+ }
#endif
ret = max31335_trickle_charger_setup(&client->dev, max31335);
@@ -669,14 +745,16 @@ static int max31335_probe(struct i2c_client *client)
}
static const struct i2c_device_id max31335_id[] = {
- { "max31335" },
+ { "max31331", (kernel_ulong_t)&chip[ID_MAX31331] },
+ { "max31335", (kernel_ulong_t)&chip[ID_MAX31335] },
{ }
};
MODULE_DEVICE_TABLE(i2c, max31335_id);
static const struct of_device_id max31335_of_match[] = {
- { .compatible = "adi,max31335" },
+ { .compatible = "adi,max31331", .data = &chip[ID_MAX31331] },
+ { .compatible = "adi,max31335", .data = &chip[ID_MAX31335] },
{ }
};
@@ -693,5 +771,6 @@ static struct i2c_driver max31335_driver = {
module_i2c_driver(max31335_driver);
MODULE_AUTHOR("Antoniu Miclaus <antoniu.miclaus@analog.com>");
+MODULE_AUTHOR("Saket Kumar Purwar <Saket.Kumarpurwar@analog.com>");
MODULE_DESCRIPTION("MAX31335 RTC driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-max77686.c b/drivers/rtc/rtc-max77686.c
index 7bb044d2ac25..69ea3ce75b5a 100644
--- a/drivers/rtc/rtc-max77686.c
+++ b/drivers/rtc/rtc-max77686.c
@@ -85,7 +85,6 @@ struct max77686_rtc_driver_data {
struct max77686_rtc_info {
struct device *dev;
- struct i2c_client *rtc;
struct rtc_device *rtc_dev;
struct mutex lock;
@@ -691,6 +690,7 @@ static int max77686_init_rtc_regmap(struct max77686_rtc_info *info)
{
struct device *parent = info->dev->parent;
struct i2c_client *parent_i2c = to_i2c_client(parent);
+ struct i2c_client *client;
int ret;
if (info->drv_data->rtc_irq_from_platform) {
@@ -704,40 +704,35 @@ static int max77686_init_rtc_regmap(struct max77686_rtc_info *info)
}
info->regmap = dev_get_regmap(parent, NULL);
- if (!info->regmap) {
- dev_err(info->dev, "Failed to get rtc regmap\n");
- return -ENODEV;
- }
+ if (!info->regmap)
+ return dev_err_probe(info->dev, -ENODEV,
+ "Failed to get rtc regmap\n");
if (info->drv_data->rtc_i2c_addr == MAX77686_INVALID_I2C_ADDR) {
info->rtc_regmap = info->regmap;
goto add_rtc_irq;
}
- info->rtc = devm_i2c_new_dummy_device(info->dev, parent_i2c->adapter,
- info->drv_data->rtc_i2c_addr);
- if (IS_ERR(info->rtc)) {
- dev_err(info->dev, "Failed to allocate I2C device for RTC\n");
- return PTR_ERR(info->rtc);
- }
+ client = devm_i2c_new_dummy_device(info->dev, parent_i2c->adapter,
+ info->drv_data->rtc_i2c_addr);
+ if (IS_ERR(client))
+ return dev_err_probe(info->dev, PTR_ERR(client),
+ "Failed to allocate I2C device for RTC\n");
- info->rtc_regmap = devm_regmap_init_i2c(info->rtc,
+ info->rtc_regmap = devm_regmap_init_i2c(client,
info->drv_data->regmap_config);
- if (IS_ERR(info->rtc_regmap)) {
- ret = PTR_ERR(info->rtc_regmap);
- dev_err(info->dev, "Failed to allocate RTC regmap: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(info->rtc_regmap))
+ return dev_err_probe(info->dev, PTR_ERR(info->rtc_regmap),
+ "Failed to allocate RTC regmap\n");
add_rtc_irq:
ret = regmap_add_irq_chip(info->rtc_regmap, info->rtc_irq,
IRQF_ONESHOT | IRQF_SHARED,
0, info->drv_data->rtc_irq_chip,
&info->rtc_irq_data);
- if (ret < 0) {
- dev_err(info->dev, "Failed to add RTC irq chip: %d\n", ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(info->dev, ret,
+ "Failed to add RTC irq chip\n");
return 0;
}
diff --git a/drivers/rtc/rtc-meson-vrtc.c b/drivers/rtc/rtc-meson-vrtc.c
index 5849729f7d01..7d38258cbe37 100644
--- a/drivers/rtc/rtc-meson-vrtc.c
+++ b/drivers/rtc/rtc-meson-vrtc.c
@@ -13,7 +13,6 @@
struct meson_vrtc_data {
void __iomem *io_alarm;
- struct rtc_device *rtc;
unsigned long alarm_time;
bool enabled;
};
@@ -65,6 +64,7 @@ static const struct rtc_class_ops meson_vrtc_ops = {
static int meson_vrtc_probe(struct platform_device *pdev)
{
struct meson_vrtc_data *vrtc;
+ struct rtc_device *rtc;
vrtc = devm_kzalloc(&pdev->dev, sizeof(*vrtc), GFP_KERNEL);
if (!vrtc)
@@ -78,12 +78,12 @@ static int meson_vrtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, vrtc);
- vrtc->rtc = devm_rtc_allocate_device(&pdev->dev);
- if (IS_ERR(vrtc->rtc))
- return PTR_ERR(vrtc->rtc);
+ rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
- vrtc->rtc->ops = &meson_vrtc_ops;
- return devm_rtc_register_device(vrtc->rtc);
+ rtc->ops = &meson_vrtc_ops;
+ return devm_rtc_register_device(rtc);
}
static int __maybe_unused meson_vrtc_suspend(struct device *dev)
diff --git a/drivers/rtc/rtc-meson.c b/drivers/rtc/rtc-meson.c
index db1d626edca5..47e9ebf58ffc 100644
--- a/drivers/rtc/rtc-meson.c
+++ b/drivers/rtc/rtc-meson.c
@@ -59,7 +59,6 @@
#define MESON_STATIC_DEFAULT (MESON_STATIC_BIAS_CUR | MESON_STATIC_VOLTAGE)
struct meson_rtc {
- struct rtc_device *rtc; /* rtc device we created */
struct device *dev; /* device we bound from */
struct reset_control *reset; /* reset source */
struct regulator *vdd; /* voltage input */
@@ -292,6 +291,7 @@ static int meson_rtc_probe(struct platform_device *pdev)
};
struct device *dev = &pdev->dev;
struct meson_rtc *rtc;
+ struct rtc_device *rtc_dev;
void __iomem *base;
int ret;
u32 tm;
@@ -300,16 +300,16 @@ static int meson_rtc_probe(struct platform_device *pdev)
if (!rtc)
return -ENOMEM;
- rtc->rtc = devm_rtc_allocate_device(dev);
- if (IS_ERR(rtc->rtc))
- return PTR_ERR(rtc->rtc);
+ rtc_dev = devm_rtc_allocate_device(dev);
+ if (IS_ERR(rtc_dev))
+ return PTR_ERR(rtc_dev);
platform_set_drvdata(pdev, rtc);
rtc->dev = dev;
- rtc->rtc->ops = &meson_rtc_ops;
- rtc->rtc->range_max = U32_MAX;
+ rtc_dev->ops = &meson_rtc_ops;
+ rtc_dev->range_max = U32_MAX;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
@@ -365,11 +365,11 @@ static int meson_rtc_probe(struct platform_device *pdev)
}
meson_rtc_nvmem_config.priv = rtc;
- ret = devm_rtc_nvmem_register(rtc->rtc, &meson_rtc_nvmem_config);
+ ret = devm_rtc_nvmem_register(rtc_dev, &meson_rtc_nvmem_config);
if (ret)
goto out_disable_vdd;
- ret = devm_rtc_register_device(rtc->rtc);
+ ret = devm_rtc_register_device(rtc_dev);
if (ret)
goto out_disable_vdd;
diff --git a/drivers/rtc/rtc-mpfs.c b/drivers/rtc/rtc-mpfs.c
index 3892b0f9917f..6aa3eae575d2 100644
--- a/drivers/rtc/rtc-mpfs.c
+++ b/drivers/rtc/rtc-mpfs.c
@@ -266,19 +266,14 @@ static int mpfs_rtc_probe(struct platform_device *pdev)
writel(prescaler, rtcdev->base + PRESCALER_REG);
dev_info(&pdev->dev, "prescaler set to: %lu\n", prescaler);
- device_init_wakeup(&pdev->dev, true);
- ret = dev_pm_set_wake_irq(&pdev->dev, wakeup_irq);
+ devm_device_init_wakeup(&pdev->dev);
+ ret = devm_pm_set_wake_irq(&pdev->dev, wakeup_irq);
if (ret)
dev_err(&pdev->dev, "failed to enable irq wake\n");
return devm_rtc_register_device(rtcdev->rtc);
}
-static void mpfs_rtc_remove(struct platform_device *pdev)
-{
- dev_pm_clear_wake_irq(&pdev->dev);
-}
-
static const struct of_device_id mpfs_rtc_of_match[] = {
{ .compatible = "microchip,mpfs-rtc" },
{ }
@@ -288,7 +283,6 @@ MODULE_DEVICE_TABLE(of, mpfs_rtc_of_match);
static struct platform_driver mpfs_rtc_driver = {
.probe = mpfs_rtc_probe,
- .remove = mpfs_rtc_remove,
.driver = {
.name = "mpfs_rtc",
.of_match_table = mpfs_rtc_of_match,
diff --git a/drivers/rtc/rtc-nxp-bbnsm.c b/drivers/rtc/rtc-nxp-bbnsm.c
index fa3b0328c7a2..d4fc9dc583d3 100644
--- a/drivers/rtc/rtc-nxp-bbnsm.c
+++ b/drivers/rtc/rtc-nxp-bbnsm.c
@@ -189,36 +189,26 @@ static int bbnsm_rtc_probe(struct platform_device *pdev)
/* clear all the pending events */
regmap_write(bbnsm->regmap, BBNSM_EVENTS, 0x7A);
- device_init_wakeup(&pdev->dev, true);
- dev_pm_set_wake_irq(&pdev->dev, bbnsm->irq);
+ ret = devm_device_init_wakeup(&pdev->dev);
+ if (ret)
+ dev_err(&pdev->dev, "failed to init wakeup, %d\n", ret);
+
+ ret = devm_pm_set_wake_irq(&pdev->dev, bbnsm->irq);
+ if (ret)
+ dev_err(&pdev->dev, "failed to set wake irq, %d\n", ret);
ret = devm_request_irq(&pdev->dev, bbnsm->irq, bbnsm_rtc_irq_handler,
IRQF_SHARED, "rtc alarm", &pdev->dev);
if (ret) {
dev_err(&pdev->dev, "failed to request irq %d: %d\n",
bbnsm->irq, ret);
- goto err;
+ return ret;
}
bbnsm->rtc->ops = &bbnsm_rtc_ops;
bbnsm->rtc->range_max = U32_MAX;
- ret = devm_rtc_register_device(bbnsm->rtc);
- if (ret)
- goto err;
-
- return 0;
-
-err:
- dev_pm_clear_wake_irq(&pdev->dev);
- device_init_wakeup(&pdev->dev, false);
- return ret;
-}
-
-static void bbnsm_rtc_remove(struct platform_device *pdev)
-{
- dev_pm_clear_wake_irq(&pdev->dev);
- device_init_wakeup(&pdev->dev, false);
+ return devm_rtc_register_device(bbnsm->rtc);
}
static const struct of_device_id bbnsm_dt_ids[] = {
@@ -233,7 +223,6 @@ static struct platform_driver bbnsm_rtc_driver = {
.of_match_table = bbnsm_dt_ids,
},
.probe = bbnsm_rtc_probe,
- .remove = bbnsm_rtc_remove,
};
module_platform_driver(bbnsm_rtc_driver);
diff --git a/drivers/rtc/rtc-pcf50633.c b/drivers/rtc/rtc-pcf50633.c
deleted file mode 100644
index c019c4d91c7d..000000000000
--- a/drivers/rtc/rtc-pcf50633.c
+++ /dev/null
@@ -1,284 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* NXP PCF50633 RTC Driver
- *
- * (C) 2006-2008 by Openmoko, Inc.
- * Author: Balaji Rao <balajirrao@openmoko.org>
- * All rights reserved.
- *
- * Broken down from monstrous PCF50633 driver mainly by
- * Harald Welte, Andy Green and Werner Almesberger
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
-#include <linux/rtc.h>
-#include <linux/bcd.h>
-#include <linux/err.h>
-
-#include <linux/mfd/pcf50633/core.h>
-
-#define PCF50633_REG_RTCSC 0x59 /* Second */
-#define PCF50633_REG_RTCMN 0x5a /* Minute */
-#define PCF50633_REG_RTCHR 0x5b /* Hour */
-#define PCF50633_REG_RTCWD 0x5c /* Weekday */
-#define PCF50633_REG_RTCDT 0x5d /* Day */
-#define PCF50633_REG_RTCMT 0x5e /* Month */
-#define PCF50633_REG_RTCYR 0x5f /* Year */
-#define PCF50633_REG_RTCSCA 0x60 /* Alarm Second */
-#define PCF50633_REG_RTCMNA 0x61 /* Alarm Minute */
-#define PCF50633_REG_RTCHRA 0x62 /* Alarm Hour */
-#define PCF50633_REG_RTCWDA 0x63 /* Alarm Weekday */
-#define PCF50633_REG_RTCDTA 0x64 /* Alarm Day */
-#define PCF50633_REG_RTCMTA 0x65 /* Alarm Month */
-#define PCF50633_REG_RTCYRA 0x66 /* Alarm Year */
-
-enum pcf50633_time_indexes {
- PCF50633_TI_SEC,
- PCF50633_TI_MIN,
- PCF50633_TI_HOUR,
- PCF50633_TI_WKDAY,
- PCF50633_TI_DAY,
- PCF50633_TI_MONTH,
- PCF50633_TI_YEAR,
- PCF50633_TI_EXTENT /* always last */
-};
-
-struct pcf50633_time {
- u_int8_t time[PCF50633_TI_EXTENT];
-};
-
-struct pcf50633_rtc {
- int alarm_enabled;
- int alarm_pending;
-
- struct pcf50633 *pcf;
- struct rtc_device *rtc_dev;
-};
-
-static void pcf2rtc_time(struct rtc_time *rtc, struct pcf50633_time *pcf)
-{
- rtc->tm_sec = bcd2bin(pcf->time[PCF50633_TI_SEC]);
- rtc->tm_min = bcd2bin(pcf->time[PCF50633_TI_MIN]);
- rtc->tm_hour = bcd2bin(pcf->time[PCF50633_TI_HOUR]);
- rtc->tm_wday = bcd2bin(pcf->time[PCF50633_TI_WKDAY]);
- rtc->tm_mday = bcd2bin(pcf->time[PCF50633_TI_DAY]);
- rtc->tm_mon = bcd2bin(pcf->time[PCF50633_TI_MONTH]) - 1;
- rtc->tm_year = bcd2bin(pcf->time[PCF50633_TI_YEAR]) + 100;
-}
-
-static void rtc2pcf_time(struct pcf50633_time *pcf, struct rtc_time *rtc)
-{
- pcf->time[PCF50633_TI_SEC] = bin2bcd(rtc->tm_sec);
- pcf->time[PCF50633_TI_MIN] = bin2bcd(rtc->tm_min);
- pcf->time[PCF50633_TI_HOUR] = bin2bcd(rtc->tm_hour);
- pcf->time[PCF50633_TI_WKDAY] = bin2bcd(rtc->tm_wday);
- pcf->time[PCF50633_TI_DAY] = bin2bcd(rtc->tm_mday);
- pcf->time[PCF50633_TI_MONTH] = bin2bcd(rtc->tm_mon + 1);
- pcf->time[PCF50633_TI_YEAR] = bin2bcd(rtc->tm_year % 100);
-}
-
-static int
-pcf50633_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
-{
- struct pcf50633_rtc *rtc = dev_get_drvdata(dev);
- int err;
-
- if (enabled)
- err = pcf50633_irq_unmask(rtc->pcf, PCF50633_IRQ_ALARM);
- else
- err = pcf50633_irq_mask(rtc->pcf, PCF50633_IRQ_ALARM);
-
- if (err < 0)
- return err;
-
- rtc->alarm_enabled = enabled;
-
- return 0;
-}
-
-static int pcf50633_rtc_read_time(struct device *dev, struct rtc_time *tm)
-{
- struct pcf50633_rtc *rtc;
- struct pcf50633_time pcf_tm;
- int ret;
-
- rtc = dev_get_drvdata(dev);
-
- ret = pcf50633_read_block(rtc->pcf, PCF50633_REG_RTCSC,
- PCF50633_TI_EXTENT,
- &pcf_tm.time[0]);
- if (ret != PCF50633_TI_EXTENT) {
- dev_err(dev, "Failed to read time\n");
- return -EIO;
- }
-
- dev_dbg(dev, "PCF_TIME: %02x.%02x.%02x %02x:%02x:%02x\n",
- pcf_tm.time[PCF50633_TI_DAY],
- pcf_tm.time[PCF50633_TI_MONTH],
- pcf_tm.time[PCF50633_TI_YEAR],
- pcf_tm.time[PCF50633_TI_HOUR],
- pcf_tm.time[PCF50633_TI_MIN],
- pcf_tm.time[PCF50633_TI_SEC]);
-
- pcf2rtc_time(tm, &pcf_tm);
-
- dev_dbg(dev, "RTC_TIME: %ptRr\n", tm);
-
- return 0;
-}
-
-static int pcf50633_rtc_set_time(struct device *dev, struct rtc_time *tm)
-{
- struct pcf50633_rtc *rtc;
- struct pcf50633_time pcf_tm;
- int alarm_masked, ret = 0;
-
- rtc = dev_get_drvdata(dev);
-
- dev_dbg(dev, "RTC_TIME: %ptRr\n", tm);
-
- rtc2pcf_time(&pcf_tm, tm);
-
- dev_dbg(dev, "PCF_TIME: %02x.%02x.%02x %02x:%02x:%02x\n",
- pcf_tm.time[PCF50633_TI_DAY],
- pcf_tm.time[PCF50633_TI_MONTH],
- pcf_tm.time[PCF50633_TI_YEAR],
- pcf_tm.time[PCF50633_TI_HOUR],
- pcf_tm.time[PCF50633_TI_MIN],
- pcf_tm.time[PCF50633_TI_SEC]);
-
-
- alarm_masked = pcf50633_irq_mask_get(rtc->pcf, PCF50633_IRQ_ALARM);
-
- if (!alarm_masked)
- pcf50633_irq_mask(rtc->pcf, PCF50633_IRQ_ALARM);
-
- /* Returns 0 on success */
- ret = pcf50633_write_block(rtc->pcf, PCF50633_REG_RTCSC,
- PCF50633_TI_EXTENT,
- &pcf_tm.time[0]);
-
- if (!alarm_masked)
- pcf50633_irq_unmask(rtc->pcf, PCF50633_IRQ_ALARM);
-
- return ret;
-}
-
-static int pcf50633_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
-{
- struct pcf50633_rtc *rtc;
- struct pcf50633_time pcf_tm;
- int ret = 0;
-
- rtc = dev_get_drvdata(dev);
-
- alrm->enabled = rtc->alarm_enabled;
- alrm->pending = rtc->alarm_pending;
-
- ret = pcf50633_read_block(rtc->pcf, PCF50633_REG_RTCSCA,
- PCF50633_TI_EXTENT, &pcf_tm.time[0]);
- if (ret != PCF50633_TI_EXTENT) {
- dev_err(dev, "Failed to read time\n");
- return -EIO;
- }
-
- pcf2rtc_time(&alrm->time, &pcf_tm);
-
- return rtc_valid_tm(&alrm->time);
-}
-
-static int pcf50633_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
-{
- struct pcf50633_rtc *rtc;
- struct pcf50633_time pcf_tm;
- int alarm_masked, ret = 0;
-
- rtc = dev_get_drvdata(dev);
-
- rtc2pcf_time(&pcf_tm, &alrm->time);
-
- /* do like mktime does and ignore tm_wday */
- pcf_tm.time[PCF50633_TI_WKDAY] = 7;
-
- alarm_masked = pcf50633_irq_mask_get(rtc->pcf, PCF50633_IRQ_ALARM);
-
- /* disable alarm interrupt */
- if (!alarm_masked)
- pcf50633_irq_mask(rtc->pcf, PCF50633_IRQ_ALARM);
-
- /* Returns 0 on success */
- ret = pcf50633_write_block(rtc->pcf, PCF50633_REG_RTCSCA,
- PCF50633_TI_EXTENT, &pcf_tm.time[0]);
- if (!alrm->enabled)
- rtc->alarm_pending = 0;
-
- if (!alarm_masked || alrm->enabled)
- pcf50633_irq_unmask(rtc->pcf, PCF50633_IRQ_ALARM);
- rtc->alarm_enabled = alrm->enabled;
-
- return ret;
-}
-
-static const struct rtc_class_ops pcf50633_rtc_ops = {
- .read_time = pcf50633_rtc_read_time,
- .set_time = pcf50633_rtc_set_time,
- .read_alarm = pcf50633_rtc_read_alarm,
- .set_alarm = pcf50633_rtc_set_alarm,
- .alarm_irq_enable = pcf50633_rtc_alarm_irq_enable,
-};
-
-static void pcf50633_rtc_irq(int irq, void *data)
-{
- struct pcf50633_rtc *rtc = data;
-
- rtc_update_irq(rtc->rtc_dev, 1, RTC_AF | RTC_IRQF);
- rtc->alarm_pending = 1;
-}
-
-static int pcf50633_rtc_probe(struct platform_device *pdev)
-{
- struct pcf50633_rtc *rtc;
-
- rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
- if (!rtc)
- return -ENOMEM;
-
- rtc->pcf = dev_to_pcf50633(pdev->dev.parent);
- platform_set_drvdata(pdev, rtc);
- rtc->rtc_dev = devm_rtc_device_register(&pdev->dev, "pcf50633-rtc",
- &pcf50633_rtc_ops, THIS_MODULE);
-
- if (IS_ERR(rtc->rtc_dev))
- return PTR_ERR(rtc->rtc_dev);
-
- pcf50633_register_irq(rtc->pcf, PCF50633_IRQ_ALARM,
- pcf50633_rtc_irq, rtc);
- return 0;
-}
-
-static void pcf50633_rtc_remove(struct platform_device *pdev)
-{
- struct pcf50633_rtc *rtc;
-
- rtc = platform_get_drvdata(pdev);
- pcf50633_free_irq(rtc->pcf, PCF50633_IRQ_ALARM);
-}
-
-static struct platform_driver pcf50633_rtc_driver = {
- .driver = {
- .name = "pcf50633-rtc",
- },
- .probe = pcf50633_rtc_probe,
- .remove = pcf50633_rtc_remove,
-};
-
-module_platform_driver(pcf50633_rtc_driver);
-
-MODULE_DESCRIPTION("PCF50633 RTC driver");
-MODULE_AUTHOR("Balaji Rao <balajirrao@openmoko.org>");
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
index 905986c61655..4fa5c4ecdd5a 100644
--- a/drivers/rtc/rtc-pcf85063.c
+++ b/drivers/rtc/rtc-pcf85063.c
@@ -35,6 +35,7 @@
#define PCF85063_REG_CTRL1_CAP_SEL BIT(0)
#define PCF85063_REG_CTRL1_STOP BIT(5)
#define PCF85063_REG_CTRL1_EXT_TEST BIT(7)
+#define PCF85063_REG_CTRL1_SWR 0x58
#define PCF85063_REG_CTRL2 0x01
#define PCF85063_CTRL2_AF BIT(6)
@@ -589,16 +590,30 @@ static int pcf85063_probe(struct i2c_client *client)
i2c_set_clientdata(client, pcf85063);
- err = regmap_read(pcf85063->regmap, PCF85063_REG_CTRL1, &tmp);
- if (err) {
- dev_err(&client->dev, "RTC chip is not present\n");
- return err;
- }
+ err = regmap_read(pcf85063->regmap, PCF85063_REG_SC, &tmp);
+ if (err)
+ return dev_err_probe(&client->dev, err, "RTC chip is not present\n");
pcf85063->rtc = devm_rtc_allocate_device(&client->dev);
if (IS_ERR(pcf85063->rtc))
return PTR_ERR(pcf85063->rtc);
+ /*
+ * If a Power loss is detected, SW reset the device.
+ * From PCF85063A datasheet:
+ * There is a low probability that some devices will have corruption
+ * of the registers after the automatic power-on reset...
+ */
+ if (tmp & PCF85063_REG_SC_OS) {
+ dev_warn(&client->dev,
+ "POR issue detected, sending a SW reset\n");
+ err = regmap_write(pcf85063->regmap, PCF85063_REG_CTRL1,
+ PCF85063_REG_CTRL1_SWR);
+ if (err < 0)
+ dev_warn(&client->dev,
+ "SW reset failed, trying to continue\n");
+ }
+
err = pcf85063_load_capacitance(pcf85063, client->dev.of_node,
config->force_cap_7000 ? 7000 : 0);
if (err < 0)
diff --git a/drivers/rtc/rtc-pl030.c b/drivers/rtc/rtc-pl030.c
index 39038c0754ee..5caaa714f448 100644
--- a/drivers/rtc/rtc-pl030.c
+++ b/drivers/rtc/rtc-pl030.c
@@ -21,7 +21,6 @@
#define RTC_CR_MIE (1 << 0)
struct pl030_rtc {
- struct rtc_device *rtc;
void __iomem *base;
};
@@ -86,6 +85,7 @@ static int pl030_probe(struct amba_device *dev, const struct amba_id *id)
{
struct pl030_rtc *rtc;
int ret;
+ struct rtc_device *rtc_dev;
ret = amba_request_regions(dev, NULL);
if (ret)
@@ -97,14 +97,14 @@ static int pl030_probe(struct amba_device *dev, const struct amba_id *id)
goto err_rtc;
}
- rtc->rtc = devm_rtc_allocate_device(&dev->dev);
- if (IS_ERR(rtc->rtc)) {
- ret = PTR_ERR(rtc->rtc);
+ rtc_dev = devm_rtc_allocate_device(&dev->dev);
+ if (IS_ERR(rtc_dev)) {
+ ret = PTR_ERR(rtc_dev);
goto err_rtc;
}
- rtc->rtc->ops = &pl030_ops;
- rtc->rtc->range_max = U32_MAX;
+ rtc_dev->ops = &pl030_ops;
+ rtc_dev->range_max = U32_MAX;
rtc->base = ioremap(dev->res.start, resource_size(&dev->res));
if (!rtc->base) {
ret = -ENOMEM;
@@ -121,7 +121,7 @@ static int pl030_probe(struct amba_device *dev, const struct amba_id *id)
if (ret)
goto err_irq;
- ret = devm_rtc_register_device(rtc->rtc);
+ ret = devm_rtc_register_device(rtc_dev);
if (ret)
goto err_reg;
@@ -148,7 +148,7 @@ static void pl030_remove(struct amba_device *dev)
amba_release_regions(dev);
}
-static struct amba_id pl030_ids[] = {
+static const struct amba_id pl030_ids[] = {
{
.id = 0x00041030,
.mask = 0x000fffff,
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index bad6a5d9c683..eab39dfa4e5f 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -74,6 +74,8 @@
* @st_weekday: if this is an ST Microelectronics silicon version that need
* the weekday fix
* @irqflags: special IRQ flags per variant
+ * @range_min: minimum date/time supported by the RTC
+ * @range_max: maximum date/time supported by the RTC
*/
struct pl031_vendor_data {
struct rtc_class_ops ops;
@@ -284,8 +286,6 @@ static void pl031_remove(struct amba_device *adev)
{
struct pl031_local *ldata = dev_get_drvdata(&adev->dev);
- dev_pm_clear_wake_irq(&adev->dev);
- device_init_wakeup(&adev->dev, false);
if (adev->irq[0])
free_irq(adev->irq[0], ldata);
amba_release_regions(adev);
@@ -350,7 +350,7 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
}
}
- device_init_wakeup(&adev->dev, true);
+ devm_device_init_wakeup(&adev->dev);
ldata->rtc = devm_rtc_allocate_device(&adev->dev);
if (IS_ERR(ldata->rtc)) {
ret = PTR_ERR(ldata->rtc);
@@ -373,7 +373,7 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
vendor->irqflags, "rtc-pl031", ldata);
if (ret)
goto out;
- dev_pm_set_wake_irq(&adev->dev, adev->irq[0]);
+ devm_pm_set_wake_irq(&adev->dev, adev->irq[0]);
}
return 0;
diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c
index b2518aea4218..3c1dddcc81df 100644
--- a/drivers/rtc/rtc-pm8xxx.c
+++ b/drivers/rtc/rtc-pm8xxx.c
@@ -5,6 +5,7 @@
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
* Copyright (c) 2023, Linaro Limited
*/
+#include <linux/efi.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
@@ -16,9 +17,10 @@
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-
#include <linux/unaligned.h>
+#include <asm/byteorder.h>
+
/* RTC_CTRL register bit fields */
#define PM8xxx_RTC_ENABLE BIT(7)
#define PM8xxx_RTC_ALARM_CLEAR BIT(0)
@@ -46,28 +48,125 @@ struct pm8xxx_rtc_regs {
unsigned int alarm_en;
};
+struct qcom_uefi_rtc_info {
+ __le32 offset_gps;
+ u8 reserved[8];
+} __packed;
+
/**
* struct pm8xxx_rtc - RTC driver internal structure
* @rtc: RTC device
* @regmap: regmap used to access registers
* @allow_set_time: whether the time can be set
+ * @use_uefi: use UEFI variable as fallback for offset
* @alarm_irq: alarm irq number
* @regs: register description
* @dev: device structure
+ * @rtc_info: qcom uefi rtc-info structure
* @nvmem_cell: nvmem cell for offset
* @offset: offset from epoch in seconds
+ * @offset_dirty: offset needs to be stored on shutdown
*/
struct pm8xxx_rtc {
struct rtc_device *rtc;
struct regmap *regmap;
bool allow_set_time;
+ bool use_uefi;
int alarm_irq;
const struct pm8xxx_rtc_regs *regs;
struct device *dev;
+ struct qcom_uefi_rtc_info rtc_info;
struct nvmem_cell *nvmem_cell;
u32 offset;
+ bool offset_dirty;
};
+#ifdef CONFIG_EFI
+
+MODULE_IMPORT_NS("EFIVAR");
+
+#define QCOM_UEFI_NAME L"RTCInfo"
+#define QCOM_UEFI_GUID EFI_GUID(0x882f8c2b, 0x9646, 0x435f, \
+ 0x8d, 0xe5, 0xf2, 0x08, 0xff, 0x80, 0xc1, 0xbd)
+#define QCOM_UEFI_ATTRS (EFI_VARIABLE_NON_VOLATILE | \
+ EFI_VARIABLE_BOOTSERVICE_ACCESS | \
+ EFI_VARIABLE_RUNTIME_ACCESS)
+
+static int pm8xxx_rtc_read_uefi_offset(struct pm8xxx_rtc *rtc_dd)
+{
+ struct qcom_uefi_rtc_info *rtc_info = &rtc_dd->rtc_info;
+ unsigned long size = sizeof(*rtc_info);
+ struct device *dev = rtc_dd->dev;
+ efi_status_t status;
+ u32 offset_gps;
+ int rc;
+
+ rc = efivar_lock();
+ if (rc)
+ return rc;
+
+ status = efivar_get_variable(QCOM_UEFI_NAME, &QCOM_UEFI_GUID, NULL,
+ &size, rtc_info);
+ efivar_unlock();
+
+ if (status != EFI_SUCCESS) {
+ dev_dbg(dev, "failed to read UEFI offset: %lu\n", status);
+ return efi_status_to_err(status);
+ }
+
+ if (size != sizeof(*rtc_info)) {
+ dev_dbg(dev, "unexpected UEFI structure size %lu\n", size);
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "uefi_rtc_info = %*ph\n", (int)size, rtc_info);
+
+ /* Convert from GPS to Unix time offset */
+ offset_gps = le32_to_cpu(rtc_info->offset_gps);
+ rtc_dd->offset = offset_gps + (u32)RTC_TIMESTAMP_EPOCH_GPS;
+
+ return 0;
+}
+
+static int pm8xxx_rtc_write_uefi_offset(struct pm8xxx_rtc *rtc_dd, u32 offset)
+{
+ struct qcom_uefi_rtc_info *rtc_info = &rtc_dd->rtc_info;
+ unsigned long size = sizeof(*rtc_info);
+ struct device *dev = rtc_dd->dev;
+ efi_status_t status;
+ u32 offset_gps;
+
+ /* Convert from Unix to GPS time offset */
+ offset_gps = offset - (u32)RTC_TIMESTAMP_EPOCH_GPS;
+
+ rtc_info->offset_gps = cpu_to_le32(offset_gps);
+
+ dev_dbg(dev, "efi_rtc_info = %*ph\n", (int)size, rtc_info);
+
+ status = efivar_set_variable(QCOM_UEFI_NAME, &QCOM_UEFI_GUID,
+ QCOM_UEFI_ATTRS, size, rtc_info);
+ if (status != EFI_SUCCESS) {
+ dev_dbg(dev, "failed to write UEFI offset: %lx\n", status);
+ return efi_status_to_err(status);
+ }
+
+ return 0;
+}
+
+#else /* CONFIG_EFI */
+
+static int pm8xxx_rtc_read_uefi_offset(struct pm8xxx_rtc *rtc_dd)
+{
+ return -ENODEV;
+}
+
+static int pm8xxx_rtc_write_uefi_offset(struct pm8xxx_rtc *rtc_dd, u32 offset)
+{
+ return -ENODEV;
+}
+
+#endif /* CONFIG_EFI */
+
static int pm8xxx_rtc_read_nvmem_offset(struct pm8xxx_rtc *rtc_dd)
{
size_t len;
@@ -110,14 +209,6 @@ static int pm8xxx_rtc_write_nvmem_offset(struct pm8xxx_rtc *rtc_dd, u32 offset)
return 0;
}
-static int pm8xxx_rtc_read_offset(struct pm8xxx_rtc *rtc_dd)
-{
- if (!rtc_dd->nvmem_cell)
- return 0;
-
- return pm8xxx_rtc_read_nvmem_offset(rtc_dd);
-}
-
static int pm8xxx_rtc_read_raw(struct pm8xxx_rtc *rtc_dd, u32 *secs)
{
const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
@@ -155,7 +246,7 @@ static int pm8xxx_rtc_update_offset(struct pm8xxx_rtc *rtc_dd, u32 secs)
u32 offset;
int rc;
- if (!rtc_dd->nvmem_cell)
+ if (!rtc_dd->nvmem_cell && !rtc_dd->use_uefi)
return -ENODEV;
rc = pm8xxx_rtc_read_raw(rtc_dd, &raw_secs);
@@ -167,10 +258,25 @@ static int pm8xxx_rtc_update_offset(struct pm8xxx_rtc *rtc_dd, u32 secs)
if (offset == rtc_dd->offset)
return 0;
- rc = pm8xxx_rtc_write_nvmem_offset(rtc_dd, offset);
+ /*
+ * Reduce flash wear by deferring updates due to clock drift until
+ * shutdown.
+ */
+ if (abs_diff(offset, rtc_dd->offset) < 30) {
+ rtc_dd->offset_dirty = true;
+ goto out;
+ }
+
+ if (rtc_dd->nvmem_cell)
+ rc = pm8xxx_rtc_write_nvmem_offset(rtc_dd, offset);
+ else
+ rc = pm8xxx_rtc_write_uefi_offset(rtc_dd, offset);
+
if (rc)
return rc;
+ rtc_dd->offset_dirty = false;
+out:
rtc_dd->offset = offset;
return 0;
@@ -455,6 +561,30 @@ static const struct of_device_id pm8xxx_id_table[] = {
};
MODULE_DEVICE_TABLE(of, pm8xxx_id_table);
+static int pm8xxx_rtc_probe_offset(struct pm8xxx_rtc *rtc_dd)
+{
+ int rc;
+
+ rtc_dd->nvmem_cell = devm_nvmem_cell_get(rtc_dd->dev, "offset");
+ if (IS_ERR(rtc_dd->nvmem_cell)) {
+ rc = PTR_ERR(rtc_dd->nvmem_cell);
+ if (rc != -ENOENT)
+ return rc;
+ rtc_dd->nvmem_cell = NULL;
+ } else {
+ return pm8xxx_rtc_read_nvmem_offset(rtc_dd);
+ }
+
+ /* Use UEFI storage as fallback if available */
+ if (efivar_is_available()) {
+ rc = pm8xxx_rtc_read_uefi_offset(rtc_dd);
+ if (rc == 0)
+ rtc_dd->use_uefi = true;
+ }
+
+ return 0;
+}
+
static int pm8xxx_rtc_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
@@ -469,30 +599,23 @@ static int pm8xxx_rtc_probe(struct platform_device *pdev)
if (rtc_dd == NULL)
return -ENOMEM;
+ rtc_dd->regs = match->data;
+ rtc_dd->dev = &pdev->dev;
+
rtc_dd->regmap = dev_get_regmap(pdev->dev.parent, NULL);
if (!rtc_dd->regmap)
return -ENXIO;
- rtc_dd->alarm_irq = platform_get_irq(pdev, 0);
- if (rtc_dd->alarm_irq < 0)
- return -ENXIO;
+ if (!of_property_read_bool(pdev->dev.of_node, "qcom,no-alarm")) {
+ rtc_dd->alarm_irq = platform_get_irq(pdev, 0);
+ if (rtc_dd->alarm_irq < 0)
+ return -ENXIO;
+ }
rtc_dd->allow_set_time = of_property_read_bool(pdev->dev.of_node,
"allow-set-time");
-
- rtc_dd->nvmem_cell = devm_nvmem_cell_get(&pdev->dev, "offset");
- if (IS_ERR(rtc_dd->nvmem_cell)) {
- rc = PTR_ERR(rtc_dd->nvmem_cell);
- if (rc != -ENOENT)
- return rc;
- rtc_dd->nvmem_cell = NULL;
- }
-
- rtc_dd->regs = match->data;
- rtc_dd->dev = &pdev->dev;
-
if (!rtc_dd->allow_set_time) {
- rc = pm8xxx_rtc_read_offset(rtc_dd);
+ rc = pm8xxx_rtc_probe_offset(rtc_dd);
if (rc)
return rc;
}
@@ -503,8 +626,6 @@ static int pm8xxx_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtc_dd);
- device_init_wakeup(&pdev->dev, true);
-
rtc_dd->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(rtc_dd->rtc))
return PTR_ERR(rtc_dd->rtc);
@@ -512,32 +633,41 @@ static int pm8xxx_rtc_probe(struct platform_device *pdev)
rtc_dd->rtc->ops = &pm8xxx_rtc_ops;
rtc_dd->rtc->range_max = U32_MAX;
- rc = devm_request_any_context_irq(&pdev->dev, rtc_dd->alarm_irq,
- pm8xxx_alarm_trigger,
- IRQF_TRIGGER_RISING,
- "pm8xxx_rtc_alarm", rtc_dd);
- if (rc < 0)
- return rc;
+ if (rtc_dd->alarm_irq) {
+ rc = devm_request_any_context_irq(&pdev->dev, rtc_dd->alarm_irq,
+ pm8xxx_alarm_trigger,
+ IRQF_TRIGGER_RISING,
+ "pm8xxx_rtc_alarm", rtc_dd);
+ if (rc < 0)
+ return rc;
- rc = devm_rtc_register_device(rtc_dd->rtc);
- if (rc)
- return rc;
+ rc = devm_pm_set_wake_irq(&pdev->dev, rtc_dd->alarm_irq);
+ if (rc)
+ return rc;
- rc = dev_pm_set_wake_irq(&pdev->dev, rtc_dd->alarm_irq);
- if (rc)
- return rc;
+ devm_device_init_wakeup(&pdev->dev);
+ } else {
+ clear_bit(RTC_FEATURE_ALARM, rtc_dd->rtc->features);
+ }
- return 0;
+ return devm_rtc_register_device(rtc_dd->rtc);
}
-static void pm8xxx_remove(struct platform_device *pdev)
+static void pm8xxx_shutdown(struct platform_device *pdev)
{
- dev_pm_clear_wake_irq(&pdev->dev);
+ struct pm8xxx_rtc *rtc_dd = platform_get_drvdata(pdev);
+
+ if (rtc_dd->offset_dirty) {
+ if (rtc_dd->nvmem_cell)
+ pm8xxx_rtc_write_nvmem_offset(rtc_dd, rtc_dd->offset);
+ else
+ pm8xxx_rtc_write_uefi_offset(rtc_dd, rtc_dd->offset);
+ }
}
static struct platform_driver pm8xxx_rtc_driver = {
.probe = pm8xxx_rtc_probe,
- .remove = pm8xxx_remove,
+ .shutdown = pm8xxx_shutdown,
.driver = {
.name = "rtc-pm8xxx",
.of_match_table = pm8xxx_id_table,
diff --git a/drivers/rtc/rtc-renesas-rtca3.c b/drivers/rtc/rtc-renesas-rtca3.c
index a056291d3887..ab816bdf0d77 100644
--- a/drivers/rtc/rtc-renesas-rtca3.c
+++ b/drivers/rtc/rtc-renesas-rtca3.c
@@ -586,17 +586,14 @@ static int rtca3_initial_setup(struct clk *clk, struct rtca3_priv *priv)
*/
usleep_range(sleep_us, sleep_us + 10);
- /* Disable all interrupts. */
- mask = RTCA3_RCR1_AIE | RTCA3_RCR1_CIE | RTCA3_RCR1_PIE;
- ret = rtca3_alarm_irq_set_helper(priv, mask, 0);
- if (ret)
- return ret;
-
mask = RTCA3_RCR2_START | RTCA3_RCR2_HR24;
val = readb(priv->base + RTCA3_RCR2);
- /* Nothing to do if already started in 24 hours and calendar count mode. */
- if ((val & mask) == mask)
- return 0;
+ /* Only disable the interrupts if already started in 24 hours and calendar count mode. */
+ if ((val & mask) == mask) {
+ /* Disable all interrupts. */
+ mask = RTCA3_RCR1_AIE | RTCA3_RCR1_CIE | RTCA3_RCR1_PIE;
+ return rtca3_alarm_irq_set_helper(priv, mask, 0);
+ }
/* Reconfigure the RTC in 24 hours and calendar count mode. */
mask = RTCA3_RCR2_START | RTCA3_RCR2_CNTMD;
diff --git a/drivers/rtc/rtc-rv3032.c b/drivers/rtc/rtc-rv3032.c
index 35b2e36b426a..2c6a8918acba 100644
--- a/drivers/rtc/rtc-rv3032.c
+++ b/drivers/rtc/rtc-rv3032.c
@@ -69,8 +69,7 @@
#define RV3032_CLKOUT2_FD_MSK GENMASK(6, 5)
#define RV3032_CLKOUT2_OS BIT(7)
-#define RV3032_CTRL1_EERD BIT(3)
-#define RV3032_CTRL1_WADA BIT(5)
+#define RV3032_CTRL1_EERD BIT(2)
#define RV3032_CTRL2_STOP BIT(0)
#define RV3032_CTRL2_EIE BIT(2)
@@ -947,11 +946,6 @@ static int rv3032_probe(struct i2c_client *client)
if (!client->irq)
clear_bit(RTC_FEATURE_ALARM, rv3032->rtc->features);
- ret = regmap_update_bits(rv3032->regmap, RV3032_CTRL1,
- RV3032_CTRL1_WADA, RV3032_CTRL1_WADA);
- if (ret)
- return ret;
-
rv3032_trickle_charger_setup(&client->dev, rv3032);
set_bit(RTC_FEATURE_BACKUP_SWITCH_MODE, rv3032->rtc->features);
diff --git a/drivers/rtc/rtc-rx8581.c b/drivers/rtc/rtc-rx8581.c
index b18c12887bdc..20c2dff01bae 100644
--- a/drivers/rtc/rtc-rx8581.c
+++ b/drivers/rtc/rtc-rx8581.c
@@ -52,11 +52,6 @@
#define RX8571_USER_RAM 0x10
#define RX8571_NVRAM_SIZE 0x10
-struct rx8581 {
- struct regmap *regmap;
- struct rtc_device *rtc;
-};
-
struct rx85x1_config {
struct regmap_config regmap;
unsigned int num_nvram;
@@ -72,14 +67,14 @@ static int rx8581_rtc_read_time(struct device *dev, struct rtc_time *tm)
unsigned char date[7];
unsigned int data;
int err;
- struct rx8581 *rx8581 = i2c_get_clientdata(client);
+ struct regmap *regmap = i2c_get_clientdata(client);
/* First we ensure that the "update flag" is not set, we read the
* time and date then re-read the "update flag". If the update flag
* has been set, we know that the time has changed during the read so
* we repeat the whole process again.
*/
- err = regmap_read(rx8581->regmap, RX8581_REG_FLAG, &data);
+ err = regmap_read(regmap, RX8581_REG_FLAG, &data);
if (err < 0)
return err;
@@ -92,20 +87,20 @@ static int rx8581_rtc_read_time(struct device *dev, struct rtc_time *tm)
do {
/* If update flag set, clear it */
if (data & RX8581_FLAG_UF) {
- err = regmap_write(rx8581->regmap, RX8581_REG_FLAG,
- data & ~RX8581_FLAG_UF);
+ err = regmap_write(regmap, RX8581_REG_FLAG,
+ data & ~RX8581_FLAG_UF);
if (err < 0)
return err;
}
/* Now read time and date */
- err = regmap_bulk_read(rx8581->regmap, RX8581_REG_SC, date,
+ err = regmap_bulk_read(regmap, RX8581_REG_SC, date,
sizeof(date));
if (err < 0)
return err;
/* Check flag register */
- err = regmap_read(rx8581->regmap, RX8581_REG_FLAG, &data);
+ err = regmap_read(regmap, RX8581_REG_FLAG, &data);
if (err < 0)
return err;
} while (data & RX8581_FLAG_UF);
@@ -137,7 +132,7 @@ static int rx8581_rtc_set_time(struct device *dev, struct rtc_time *tm)
struct i2c_client *client = to_i2c_client(dev);
int err;
unsigned char buf[7];
- struct rx8581 *rx8581 = i2c_get_clientdata(client);
+ struct regmap *regmap = i2c_get_clientdata(client);
dev_dbg(dev, "%s: secs=%d, mins=%d, hours=%d, "
"mday=%d, mon=%d, year=%d, wday=%d\n",
@@ -160,25 +155,23 @@ static int rx8581_rtc_set_time(struct device *dev, struct rtc_time *tm)
buf[RX8581_REG_DW] = (0x1 << tm->tm_wday);
/* Stop the clock */
- err = regmap_update_bits(rx8581->regmap, RX8581_REG_CTRL,
+ err = regmap_update_bits(regmap, RX8581_REG_CTRL,
RX8581_CTRL_STOP, RX8581_CTRL_STOP);
if (err < 0)
return err;
/* write register's data */
- err = regmap_bulk_write(rx8581->regmap, RX8581_REG_SC,
- buf, sizeof(buf));
+ err = regmap_bulk_write(regmap, RX8581_REG_SC, buf, sizeof(buf));
if (err < 0)
return err;
/* get VLF and clear it */
- err = regmap_update_bits(rx8581->regmap, RX8581_REG_FLAG,
- RX8581_FLAG_VLF, 0);
+ err = regmap_update_bits(regmap, RX8581_REG_FLAG, RX8581_FLAG_VLF, 0);
if (err < 0)
return err;
/* Restart the clock */
- return regmap_update_bits(rx8581->regmap, RX8581_REG_CTRL,
+ return regmap_update_bits(regmap, RX8581_REG_CTRL,
RX8581_CTRL_STOP, 0);
}
@@ -190,29 +183,27 @@ static const struct rtc_class_ops rx8581_rtc_ops = {
static int rx8571_nvram_read(void *priv, unsigned int offset, void *val,
size_t bytes)
{
- struct rx8581 *rx8581 = priv;
+ struct regmap *regmap = priv;
- return regmap_bulk_read(rx8581->regmap, RX8571_USER_RAM + offset,
- val, bytes);
+ return regmap_bulk_read(regmap, RX8571_USER_RAM + offset, val, bytes);
}
static int rx8571_nvram_write(void *priv, unsigned int offset, void *val,
size_t bytes)
{
- struct rx8581 *rx8581 = priv;
+ struct regmap *regmap = priv;
- return regmap_bulk_write(rx8581->regmap, RX8571_USER_RAM + offset,
- val, bytes);
+ return regmap_bulk_write(regmap, RX8571_USER_RAM + offset, val, bytes);
}
static int rx85x1_nvram_read(void *priv, unsigned int offset, void *val,
size_t bytes)
{
- struct rx8581 *rx8581 = priv;
+ struct regmap *regmap = priv;
unsigned int tmp_val;
int ret;
- ret = regmap_read(rx8581->regmap, RX8581_REG_RAM, &tmp_val);
+ ret = regmap_read(regmap, RX8581_REG_RAM, &tmp_val);
(*(unsigned char *)val) = (unsigned char) tmp_val;
return ret;
@@ -221,12 +212,11 @@ static int rx85x1_nvram_read(void *priv, unsigned int offset, void *val,
static int rx85x1_nvram_write(void *priv, unsigned int offset, void *val,
size_t bytes)
{
- struct rx8581 *rx8581 = priv;
+ struct regmap *regmap = priv;
unsigned char tmp_val;
tmp_val = *((unsigned char *)val);
- return regmap_write(rx8581->regmap, RX8581_REG_RAM,
- (unsigned int)tmp_val);
+ return regmap_write(regmap, RX8581_REG_RAM, (unsigned int)tmp_val);
}
static const struct rx85x1_config rx8581_config = {
@@ -249,9 +239,10 @@ static const struct rx85x1_config rx8571_config = {
static int rx8581_probe(struct i2c_client *client)
{
- struct rx8581 *rx8581;
+ struct regmap *regmap;
const struct rx85x1_config *config = &rx8581_config;
const void *data = of_device_get_match_data(&client->dev);
+ struct rtc_device *rtc;
static struct nvmem_config nvmem_cfg[] = {
{
.name = "rx85x1-",
@@ -276,31 +267,27 @@ static int rx8581_probe(struct i2c_client *client)
if (data)
config = data;
- rx8581 = devm_kzalloc(&client->dev, sizeof(struct rx8581), GFP_KERNEL);
- if (!rx8581)
- return -ENOMEM;
-
- i2c_set_clientdata(client, rx8581);
+ regmap = devm_regmap_init_i2c(client, &config->regmap);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
- rx8581->regmap = devm_regmap_init_i2c(client, &config->regmap);
- if (IS_ERR(rx8581->regmap))
- return PTR_ERR(rx8581->regmap);
+ i2c_set_clientdata(client, regmap);
- rx8581->rtc = devm_rtc_allocate_device(&client->dev);
- if (IS_ERR(rx8581->rtc))
- return PTR_ERR(rx8581->rtc);
+ rtc = devm_rtc_allocate_device(&client->dev);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
- rx8581->rtc->ops = &rx8581_rtc_ops;
- rx8581->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
- rx8581->rtc->range_max = RTC_TIMESTAMP_END_2099;
- rx8581->rtc->start_secs = 0;
- rx8581->rtc->set_start_time = true;
+ rtc->ops = &rx8581_rtc_ops;
+ rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ rtc->range_max = RTC_TIMESTAMP_END_2099;
+ rtc->start_secs = 0;
+ rtc->set_start_time = true;
- ret = devm_rtc_register_device(rx8581->rtc);
+ ret = devm_rtc_register_device(rtc);
for (i = 0; i < config->num_nvram; i++) {
- nvmem_cfg[i].priv = rx8581;
- devm_rtc_nvmem_register(rx8581->rtc, &nvmem_cfg[i]);
+ nvmem_cfg[i].priv = regmap;
+ devm_rtc_nvmem_register(rtc, &nvmem_cfg[i]);
}
return ret;
diff --git a/drivers/rtc/rtc-rzn1.c b/drivers/rtc/rtc-rzn1.c
index cb220807d925..eeb9612a666f 100644
--- a/drivers/rtc/rtc-rzn1.c
+++ b/drivers/rtc/rtc-rzn1.c
@@ -19,6 +19,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/rtc.h>
+#include <linux/spinlock.h>
#define RZN1_RTC_CTL0 0x00
#define RZN1_RTC_CTL0_SLSB_SUBU 0
@@ -27,6 +28,7 @@
#define RZN1_RTC_CTL0_CE BIT(7)
#define RZN1_RTC_CTL1 0x04
+#define RZN1_RTC_CTL1_1SE BIT(3)
#define RZN1_RTC_CTL1_ALME BIT(4)
#define RZN1_RTC_CTL2 0x08
@@ -58,6 +60,13 @@
struct rzn1_rtc {
struct rtc_device *rtcdev;
void __iomem *base;
+ /*
+ * Protects access to RZN1_RTC_CTL1 reg. rtc_lock with threaded_irqs
+ * would introduce race conditions when switching interrupts because
+ * of potential sleeps
+ */
+ spinlock_t ctl1_access_lock;
+ struct rtc_time tm_alarm;
};
static void rzn1_rtc_get_time_snapshot(struct rzn1_rtc *rtc, struct rtc_time *tm)
@@ -135,8 +144,38 @@ static int rzn1_rtc_set_time(struct device *dev, struct rtc_time *tm)
static irqreturn_t rzn1_rtc_alarm_irq(int irq, void *dev_id)
{
struct rzn1_rtc *rtc = dev_id;
+ u32 ctl1, set_irq_bits = 0;
+
+ if (rtc->tm_alarm.tm_sec == 0)
+ rtc_update_irq(rtc->rtcdev, 1, RTC_AF | RTC_IRQF);
+ else
+ /* Switch to 1s interrupts */
+ set_irq_bits = RZN1_RTC_CTL1_1SE;
- rtc_update_irq(rtc->rtcdev, 1, RTC_AF | RTC_IRQF);
+ guard(spinlock)(&rtc->ctl1_access_lock);
+
+ ctl1 = readl(rtc->base + RZN1_RTC_CTL1);
+ ctl1 &= ~RZN1_RTC_CTL1_ALME;
+ ctl1 |= set_irq_bits;
+ writel(ctl1, rtc->base + RZN1_RTC_CTL1);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rzn1_rtc_1s_irq(int irq, void *dev_id)
+{
+ struct rzn1_rtc *rtc = dev_id;
+ u32 ctl1;
+
+ if (readl(rtc->base + RZN1_RTC_SECC) == bin2bcd(rtc->tm_alarm.tm_sec)) {
+ guard(spinlock)(&rtc->ctl1_access_lock);
+
+ ctl1 = readl(rtc->base + RZN1_RTC_CTL1);
+ ctl1 &= ~RZN1_RTC_CTL1_1SE;
+ writel(ctl1, rtc->base + RZN1_RTC_CTL1);
+
+ rtc_update_irq(rtc->rtcdev, 1, RTC_AF | RTC_IRQF);
+ }
return IRQ_HANDLED;
}
@@ -144,14 +183,38 @@ static irqreturn_t rzn1_rtc_alarm_irq(int irq, void *dev_id)
static int rzn1_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
{
struct rzn1_rtc *rtc = dev_get_drvdata(dev);
- u32 ctl1 = readl(rtc->base + RZN1_RTC_CTL1);
+ struct rtc_time *tm = &rtc->tm_alarm, tm_now;
+ u32 ctl1;
+ int ret;
- if (enable)
- ctl1 |= RZN1_RTC_CTL1_ALME;
- else
- ctl1 &= ~RZN1_RTC_CTL1_ALME;
+ guard(spinlock_irqsave)(&rtc->ctl1_access_lock);
- writel(ctl1, rtc->base + RZN1_RTC_CTL1);
+ ctl1 = readl(rtc->base + RZN1_RTC_CTL1);
+
+ if (enable) {
+ /*
+ * Use alarm interrupt if alarm time is at least a minute away
+ * or less than a minute but in the next minute. Otherwise use
+ * 1 second interrupt to wait for the proper second
+ */
+ do {
+ ctl1 &= ~(RZN1_RTC_CTL1_ALME | RZN1_RTC_CTL1_1SE);
+
+ ret = rzn1_rtc_read_time(dev, &tm_now);
+ if (ret)
+ return ret;
+
+ if (rtc_tm_sub(tm, &tm_now) > 59 || tm->tm_min != tm_now.tm_min)
+ ctl1 |= RZN1_RTC_CTL1_ALME;
+ else
+ ctl1 |= RZN1_RTC_CTL1_1SE;
+
+ writel(ctl1, rtc->base + RZN1_RTC_CTL1);
+ } while (readl(rtc->base + RZN1_RTC_SECC) != bin2bcd(tm_now.tm_sec));
+ } else {
+ ctl1 &= ~(RZN1_RTC_CTL1_ALME | RZN1_RTC_CTL1_1SE);
+ writel(ctl1, rtc->base + RZN1_RTC_CTL1);
+ }
return 0;
}
@@ -185,7 +248,7 @@ static int rzn1_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
}
ctl1 = readl(rtc->base + RZN1_RTC_CTL1);
- alrm->enabled = !!(ctl1 & RZN1_RTC_CTL1_ALME);
+ alrm->enabled = !!(ctl1 & (RZN1_RTC_CTL1_ALME | RZN1_RTC_CTL1_1SE));
return 0;
}
@@ -216,6 +279,8 @@ static int rzn1_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
writel(bin2bcd(tm->tm_hour), rtc->base + RZN1_RTC_ALH);
writel(BIT(wday), rtc->base + RZN1_RTC_ALW);
+ rtc->tm_alarm = alrm->time;
+
rzn1_rtc_alarm_irq_enable(dev, alrm->enabled);
return 0;
@@ -304,7 +369,7 @@ static const struct rtc_class_ops rzn1_rtc_ops = {
static int rzn1_rtc_probe(struct platform_device *pdev)
{
struct rzn1_rtc *rtc;
- int alarm_irq;
+ int irq;
int ret;
rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
@@ -317,9 +382,9 @@ static int rzn1_rtc_probe(struct platform_device *pdev)
if (IS_ERR(rtc->base))
return dev_err_probe(&pdev->dev, PTR_ERR(rtc->base), "Missing reg\n");
- alarm_irq = platform_get_irq(pdev, 0);
- if (alarm_irq < 0)
- return alarm_irq;
+ irq = platform_get_irq_byname(pdev, "alarm");
+ if (irq < 0)
+ return irq;
rtc->rtcdev = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(rtc->rtcdev))
@@ -329,8 +394,6 @@ static int rzn1_rtc_probe(struct platform_device *pdev)
rtc->rtcdev->range_max = RTC_TIMESTAMP_END_2099;
rtc->rtcdev->alarm_offset_max = 7 * 86400;
rtc->rtcdev->ops = &rzn1_rtc_ops;
- set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rtc->rtcdev->features);
- clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->rtcdev->features);
ret = devm_pm_runtime_enable(&pdev->dev);
if (ret < 0)
@@ -349,13 +412,24 @@ static int rzn1_rtc_probe(struct platform_device *pdev)
/* Disable all interrupts */
writel(0, rtc->base + RZN1_RTC_CTL1);
- ret = devm_request_irq(&pdev->dev, alarm_irq, rzn1_rtc_alarm_irq, 0,
- dev_name(&pdev->dev), rtc);
+ spin_lock_init(&rtc->ctl1_access_lock);
+
+ ret = devm_request_irq(&pdev->dev, irq, rzn1_rtc_alarm_irq, 0, "RZN1 RTC Alarm", rtc);
if (ret) {
- dev_err(&pdev->dev, "RTC timer interrupt not available\n");
+ dev_err(&pdev->dev, "RTC alarm interrupt not available\n");
goto dis_runtime_pm;
}
+ irq = platform_get_irq_byname_optional(pdev, "pps");
+ if (irq >= 0)
+ ret = devm_request_irq(&pdev->dev, irq, rzn1_rtc_1s_irq, 0, "RZN1 RTC 1s", rtc);
+
+ if (irq < 0 || ret) {
+ set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rtc->rtcdev->features);
+ clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->rtcdev->features);
+ dev_warn(&pdev->dev, "RTC pps interrupt not available. Alarm has only minute accuracy\n");
+ }
+
ret = devm_rtc_register_device(rtc->rtcdev);
if (ret)
goto dis_runtime_pm;
diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c
index e3dc18882f41..3408d2ab2741 100644
--- a/drivers/rtc/rtc-s35390a.c
+++ b/drivers/rtc/rtc-s35390a.c
@@ -63,7 +63,6 @@ MODULE_DEVICE_TABLE(of, s35390a_of_match);
struct s35390a {
struct i2c_client *client[8];
- struct rtc_device *rtc;
int twentyfourhour;
};
@@ -422,6 +421,7 @@ static int s35390a_probe(struct i2c_client *client)
int err, err_read;
unsigned int i;
struct s35390a *s35390a;
+ struct rtc_device *rtc;
char buf, status1;
struct device *dev = &client->dev;
@@ -447,9 +447,9 @@ static int s35390a_probe(struct i2c_client *client)
}
}
- s35390a->rtc = devm_rtc_allocate_device(dev);
- if (IS_ERR(s35390a->rtc))
- return PTR_ERR(s35390a->rtc);
+ rtc = devm_rtc_allocate_device(dev);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
err_read = s35390a_read_status(s35390a, &status1);
if (err_read < 0) {
@@ -480,17 +480,17 @@ static int s35390a_probe(struct i2c_client *client)
device_set_wakeup_capable(dev, 1);
- s35390a->rtc->ops = &s35390a_rtc_ops;
- s35390a->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
- s35390a->rtc->range_max = RTC_TIMESTAMP_END_2099;
+ rtc->ops = &s35390a_rtc_ops;
+ rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ rtc->range_max = RTC_TIMESTAMP_END_2099;
- set_bit(RTC_FEATURE_ALARM_RES_MINUTE, s35390a->rtc->features);
- clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, s35390a->rtc->features );
+ set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rtc->features);
+ clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features);
if (status1 & S35390A_FLAG_INT2)
- rtc_update_irq(s35390a->rtc, 1, RTC_AF);
+ rtc_update_irq(rtc, 1, RTC_AF);
- return devm_rtc_register_device(s35390a->rtc);
+ return devm_rtc_register_device(rtc);
}
static struct i2c_driver s35390a_driver = {
diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c
index 36acca5b2639..db5c9b641277 100644
--- a/drivers/rtc/rtc-s5m.c
+++ b/drivers/rtc/rtc-s5m.c
@@ -146,7 +146,6 @@ static const struct s5m_rtc_reg_config s2mps15_rtc_regs = {
struct s5m_rtc_info {
struct device *dev;
- struct i2c_client *i2c;
struct sec_pmic_dev *s5m87xx;
struct regmap *regmap;
struct rtc_device *rtc_dev;
@@ -627,11 +626,10 @@ static int s5m8767_rtc_init_reg(struct s5m_rtc_info *info)
}
info->rtc_24hr_mode = 1;
- if (ret < 0) {
- dev_err(info->dev, "%s: fail to write controlm reg(%d)\n",
- __func__, ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(info->dev, ret,
+ "%s: fail to write controlm reg\n",
+ __func__);
return ret;
}
@@ -640,6 +638,7 @@ static int s5m_rtc_probe(struct platform_device *pdev)
{
struct sec_pmic_dev *s5m87xx = dev_get_drvdata(pdev->dev.parent);
struct s5m_rtc_info *info;
+ struct i2c_client *i2c;
const struct regmap_config *regmap_cfg;
int ret, alarm_irq;
@@ -669,26 +668,21 @@ static int s5m_rtc_probe(struct platform_device *pdev)
alarm_irq = S5M8767_IRQ_RTCA1;
break;
default:
- dev_err(&pdev->dev,
- "Device type %lu is not supported by RTC driver\n",
- platform_get_device_id(pdev)->driver_data);
- return -ENODEV;
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "Device type %lu is not supported by RTC driver\n",
+ platform_get_device_id(pdev)->driver_data);
}
- info->i2c = devm_i2c_new_dummy_device(&pdev->dev, s5m87xx->i2c->adapter,
- RTC_I2C_ADDR);
- if (IS_ERR(info->i2c)) {
- dev_err(&pdev->dev, "Failed to allocate I2C for RTC\n");
- return PTR_ERR(info->i2c);
- }
+ i2c = devm_i2c_new_dummy_device(&pdev->dev, s5m87xx->i2c->adapter,
+ RTC_I2C_ADDR);
+ if (IS_ERR(i2c))
+ return dev_err_probe(&pdev->dev, PTR_ERR(i2c),
+ "Failed to allocate I2C for RTC\n");
- info->regmap = devm_regmap_init_i2c(info->i2c, regmap_cfg);
- if (IS_ERR(info->regmap)) {
- ret = PTR_ERR(info->regmap);
- dev_err(&pdev->dev, "Failed to allocate RTC register map: %d\n",
- ret);
- return ret;
- }
+ info->regmap = devm_regmap_init_i2c(i2c, regmap_cfg);
+ if (IS_ERR(info->regmap))
+ return dev_err_probe(&pdev->dev, PTR_ERR(info->regmap),
+ "Failed to allocate RTC register map\n");
info->dev = &pdev->dev;
info->s5m87xx = s5m87xx;
@@ -696,11 +690,10 @@ static int s5m_rtc_probe(struct platform_device *pdev)
if (s5m87xx->irq_data) {
info->irq = regmap_irq_get_virq(s5m87xx->irq_data, alarm_irq);
- if (info->irq <= 0) {
- dev_err(&pdev->dev, "Failed to get virtual IRQ %d\n",
- alarm_irq);
- return -EINVAL;
- }
+ if (info->irq <= 0)
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "Failed to get virtual IRQ %d\n",
+ alarm_irq);
}
platform_set_drvdata(pdev, info);
@@ -724,11 +717,10 @@ static int s5m_rtc_probe(struct platform_device *pdev)
ret = devm_request_threaded_irq(&pdev->dev, info->irq, NULL,
s5m_rtc_alarm_irq, 0, "rtc-alarm0",
info);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to request alarm IRQ: %d: %d\n",
- info->irq, ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to request alarm IRQ %d\n",
+ info->irq);
device_init_wakeup(&pdev->dev, true);
}
diff --git a/drivers/rtc/rtc-sd2405al.c b/drivers/rtc/rtc-sd2405al.c
index d2568c3e3876..00c3033e8079 100644
--- a/drivers/rtc/rtc-sd2405al.c
+++ b/drivers/rtc/rtc-sd2405al.c
@@ -42,7 +42,6 @@
struct sd2405al {
struct device *dev;
- struct rtc_device *rtc;
struct regmap *regmap;
};
@@ -167,6 +166,7 @@ static const struct regmap_config sd2405al_regmap_conf = {
static int sd2405al_probe(struct i2c_client *client)
{
struct sd2405al *sd2405al;
+ struct rtc_device *rtc;
int ret;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
@@ -182,17 +182,17 @@ static int sd2405al_probe(struct i2c_client *client)
if (IS_ERR(sd2405al->regmap))
return PTR_ERR(sd2405al->regmap);
- sd2405al->rtc = devm_rtc_allocate_device(&client->dev);
- if (IS_ERR(sd2405al->rtc))
- return PTR_ERR(sd2405al->rtc);
+ rtc = devm_rtc_allocate_device(&client->dev);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
- sd2405al->rtc->ops = &sd2405al_rtc_ops;
- sd2405al->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
- sd2405al->rtc->range_max = RTC_TIMESTAMP_END_2099;
+ rtc->ops = &sd2405al_rtc_ops;
+ rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ rtc->range_max = RTC_TIMESTAMP_END_2099;
dev_set_drvdata(&client->dev, sd2405al);
- ret = devm_rtc_register_device(sd2405al->rtc);
+ ret = devm_rtc_register_device(rtc);
if (ret < 0)
return ret;
diff --git a/drivers/rtc/rtc-sd3078.c b/drivers/rtc/rtc-sd3078.c
index fe27b54beaad..10cc1dcfc774 100644
--- a/drivers/rtc/rtc-sd3078.c
+++ b/drivers/rtc/rtc-sd3078.c
@@ -36,11 +36,6 @@
*/
#define WRITE_PROTECT_EN 0
-struct sd3078 {
- struct rtc_device *rtc;
- struct regmap *regmap;
-};
-
/*
* In order to prevent arbitrary modification of the time register,
* when modification of the register,
@@ -49,14 +44,11 @@ struct sd3078 {
* 2. set WRITE2 bit
* 3. set WRITE3 bit
*/
-static void sd3078_enable_reg_write(struct sd3078 *sd3078)
+static void sd3078_enable_reg_write(struct regmap *regmap)
{
- regmap_update_bits(sd3078->regmap, SD3078_REG_CTRL2,
- KEY_WRITE1, KEY_WRITE1);
- regmap_update_bits(sd3078->regmap, SD3078_REG_CTRL1,
- KEY_WRITE2, KEY_WRITE2);
- regmap_update_bits(sd3078->regmap, SD3078_REG_CTRL1,
- KEY_WRITE3, KEY_WRITE3);
+ regmap_update_bits(regmap, SD3078_REG_CTRL2, KEY_WRITE1, KEY_WRITE1);
+ regmap_update_bits(regmap, SD3078_REG_CTRL1, KEY_WRITE2, KEY_WRITE2);
+ regmap_update_bits(regmap, SD3078_REG_CTRL1, KEY_WRITE3, KEY_WRITE3);
}
#if WRITE_PROTECT_EN
@@ -69,14 +61,11 @@ static void sd3078_enable_reg_write(struct sd3078 *sd3078)
* 2. clear WRITE3 bit
* 3. clear WRITE1 bit
*/
-static void sd3078_disable_reg_write(struct sd3078 *sd3078)
+static void sd3078_disable_reg_write(struct regmap *regmap)
{
- regmap_update_bits(sd3078->regmap, SD3078_REG_CTRL1,
- KEY_WRITE2, 0);
- regmap_update_bits(sd3078->regmap, SD3078_REG_CTRL1,
- KEY_WRITE3, 0);
- regmap_update_bits(sd3078->regmap, SD3078_REG_CTRL2,
- KEY_WRITE1, 0);
+ regmap_update_bits(regmap, SD3078_REG_CTRL1, KEY_WRITE2, 0);
+ regmap_update_bits(regmap, SD3078_REG_CTRL1, KEY_WRITE3, 0);
+ regmap_update_bits(regmap, SD3078_REG_CTRL2, KEY_WRITE1, 0);
}
#endif
@@ -85,11 +74,10 @@ static int sd3078_rtc_read_time(struct device *dev, struct rtc_time *tm)
unsigned char hour;
unsigned char rtc_data[NUM_TIME_REGS] = {0};
struct i2c_client *client = to_i2c_client(dev);
- struct sd3078 *sd3078 = i2c_get_clientdata(client);
+ struct regmap *regmap = i2c_get_clientdata(client);
int ret;
- ret = regmap_bulk_read(sd3078->regmap, SD3078_REG_SC, rtc_data,
- NUM_TIME_REGS);
+ ret = regmap_bulk_read(regmap, SD3078_REG_SC, rtc_data, NUM_TIME_REGS);
if (ret < 0) {
dev_err(dev, "reading from RTC failed with err:%d\n", ret);
return ret;
@@ -123,7 +111,7 @@ static int sd3078_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
unsigned char rtc_data[NUM_TIME_REGS];
struct i2c_client *client = to_i2c_client(dev);
- struct sd3078 *sd3078 = i2c_get_clientdata(client);
+ struct regmap *regmap = i2c_get_clientdata(client);
int ret;
rtc_data[SD3078_REG_SC] = bin2bcd(tm->tm_sec);
@@ -135,10 +123,10 @@ static int sd3078_rtc_set_time(struct device *dev, struct rtc_time *tm)
rtc_data[SD3078_REG_YR] = bin2bcd(tm->tm_year - 100);
#if WRITE_PROTECT_EN
- sd3078_enable_reg_write(sd3078);
+ sd3078_enable_reg_write(regmap);
#endif
- ret = regmap_bulk_write(sd3078->regmap, SD3078_REG_SC, rtc_data,
+ ret = regmap_bulk_write(regmap, SD3078_REG_SC, rtc_data,
NUM_TIME_REGS);
if (ret < 0) {
dev_err(dev, "writing to RTC failed with err:%d\n", ret);
@@ -146,7 +134,7 @@ static int sd3078_rtc_set_time(struct device *dev, struct rtc_time *tm)
}
#if WRITE_PROTECT_EN
- sd3078_disable_reg_write(sd3078);
+ sd3078_disable_reg_write(regmap);
#endif
return 0;
@@ -166,36 +154,33 @@ static const struct regmap_config regmap_config = {
static int sd3078_probe(struct i2c_client *client)
{
int ret;
- struct sd3078 *sd3078;
+ struct regmap *regmap;
+ struct rtc_device *rtc;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -ENODEV;
- sd3078 = devm_kzalloc(&client->dev, sizeof(*sd3078), GFP_KERNEL);
- if (!sd3078)
- return -ENOMEM;
-
- sd3078->regmap = devm_regmap_init_i2c(client, &regmap_config);
- if (IS_ERR(sd3078->regmap)) {
+ regmap = devm_regmap_init_i2c(client, &regmap_config);
+ if (IS_ERR(regmap)) {
dev_err(&client->dev, "regmap allocation failed\n");
- return PTR_ERR(sd3078->regmap);
+ return PTR_ERR(regmap);
}
- i2c_set_clientdata(client, sd3078);
+ i2c_set_clientdata(client, regmap);
- sd3078->rtc = devm_rtc_allocate_device(&client->dev);
- if (IS_ERR(sd3078->rtc))
- return PTR_ERR(sd3078->rtc);
+ rtc = devm_rtc_allocate_device(&client->dev);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
- sd3078->rtc->ops = &sd3078_rtc_ops;
- sd3078->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
- sd3078->rtc->range_max = RTC_TIMESTAMP_END_2099;
+ rtc->ops = &sd3078_rtc_ops;
+ rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ rtc->range_max = RTC_TIMESTAMP_END_2099;
- ret = devm_rtc_register_device(sd3078->rtc);
+ ret = devm_rtc_register_device(rtc);
if (ret)
return ret;
- sd3078_enable_reg_write(sd3078);
+ sd3078_enable_reg_write(regmap);
return 0;
}
diff --git a/drivers/rtc/rtc-stm32.c b/drivers/rtc/rtc-stm32.c
index a0564d443569..1b715db47160 100644
--- a/drivers/rtc/rtc-stm32.c
+++ b/drivers/rtc/rtc-stm32.c
@@ -1143,11 +1143,11 @@ static int stm32_rtc_probe(struct platform_device *pdev)
goto err;
}
- ret = device_init_wakeup(&pdev->dev, true);
+ ret = devm_device_init_wakeup(&pdev->dev);
if (ret)
goto err;
- ret = dev_pm_set_wake_irq(&pdev->dev, rtc->irq_alarm);
+ ret = devm_pm_set_wake_irq(&pdev->dev, rtc->irq_alarm);
if (ret)
goto err;
@@ -1208,9 +1208,6 @@ err_no_rtc_ck:
if (rtc->data->need_dbp)
regmap_update_bits(rtc->dbp, rtc->dbp_reg, rtc->dbp_mask, 0);
- dev_pm_clear_wake_irq(&pdev->dev);
- device_init_wakeup(&pdev->dev, false);
-
return ret;
}
@@ -1237,9 +1234,6 @@ static void stm32_rtc_remove(struct platform_device *pdev)
/* Enable backup domain write protection if needed */
if (rtc->data->need_dbp)
regmap_update_bits(rtc->dbp, rtc->dbp_reg, rtc->dbp_mask, 0);
-
- dev_pm_clear_wake_irq(&pdev->dev);
- device_init_wakeup(&pdev->dev, false);
}
static int stm32_rtc_suspend(struct device *dev)
diff --git a/drivers/soc/fsl/qe/qe_ic.c b/drivers/soc/fsl/qe/qe_ic.c
index bbae3d39c7be..77bf0e83ffcc 100644
--- a/drivers/soc/fsl/qe/qe_ic.c
+++ b/drivers/soc/fsl/qe/qe_ic.c
@@ -344,7 +344,7 @@ static unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
if (irq == 0)
return 0;
- return irq_linear_revmap(qe_ic->irqhost, irq);
+ return irq_find_mapping(qe_ic->irqhost, irq);
}
/* Return an interrupt vector or 0 if no interrupt is pending. */
@@ -360,7 +360,7 @@ static unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
if (irq == 0)
return 0;
- return irq_linear_revmap(qe_ic->irqhost, irq);
+ return irq_find_mapping(qe_ic->irqhost, irq);
}
static void qe_ic_cascade_low(struct irq_desc *desc)
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index f40c282d4d63..ed38f6d41f47 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -937,9 +937,9 @@ config SPI_QCOM_QSPI
QSPI(Quad SPI) driver for Qualcomm QSPI controller.
config SPI_QPIC_SNAND
- bool "QPIC SNAND controller"
+ tristate "QPIC SNAND controller"
depends on ARCH_QCOM || COMPILE_TEST
- select MTD
+ depends on MTD
help
QPIC_SNAND (QPIC SPI NAND) driver for Qualcomm QPIC controller.
QPIC controller supports both parallel nand and serial nand.
diff --git a/drivers/spi/spi-amd.c b/drivers/spi/spi-amd.c
index c85997478b81..17fc0b17e756 100644
--- a/drivers/spi/spi-amd.c
+++ b/drivers/spi/spi-amd.c
@@ -302,7 +302,7 @@ static void amd_set_spi_freq(struct amd_spi *amd_spi, u32 speed_hz)
{
unsigned int i, spd7_val, alt_spd;
- for (i = 0; i < ARRAY_SIZE(amd_spi_freq); i++)
+ for (i = 0; i < ARRAY_SIZE(amd_spi_freq)-1; i++)
if (speed_hz >= amd_spi_freq[i].speed_hz)
break;
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 0d1aa6592484..77de5a07639a 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -1162,7 +1162,8 @@ static void bcm2835_spi_cleanup(struct spi_device *spi)
sizeof(u32),
DMA_TO_DEVICE);
- gpiod_put(bs->cs_gpio);
+ if (!IS_ERR(bs->cs_gpio))
+ gpiod_put(bs->cs_gpio);
spi_set_csgpiod(spi, 0, NULL);
kfree(target);
@@ -1225,7 +1226,12 @@ static int bcm2835_spi_setup(struct spi_device *spi)
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
struct bcm2835_spidev *target = spi_get_ctldata(spi);
struct gpiod_lookup_table *lookup __free(kfree) = NULL;
- int ret;
+ const char *pinctrl_compats[] = {
+ "brcm,bcm2835-gpio",
+ "brcm,bcm2711-gpio",
+ "brcm,bcm7211-gpio",
+ };
+ int ret, i;
u32 cs;
if (!target) {
@@ -1290,6 +1296,14 @@ static int bcm2835_spi_setup(struct spi_device *spi)
goto err_cleanup;
}
+ for (i = 0; i < ARRAY_SIZE(pinctrl_compats); i++) {
+ if (of_find_compatible_node(NULL, NULL, pinctrl_compats[i]))
+ break;
+ }
+
+ if (i == ARRAY_SIZE(pinctrl_compats))
+ return 0;
+
/*
* TODO: The code below is a slightly better alternative to the utter
* abuse of the GPIO API that I found here before. It creates a
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index 559fbdfbd9f7..c90462783b3f 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -2073,7 +2073,7 @@ static const struct cqspi_driver_platdata k2g_qspi = {
static const struct cqspi_driver_platdata am654_ospi = {
.hwcaps_mask = CQSPI_SUPPORTS_OCTAL | CQSPI_SUPPORTS_QUAD,
- .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_NEEDS_WR_DELAY,
+ .quirks = CQSPI_NEEDS_WR_DELAY,
};
static const struct cqspi_driver_platdata intel_lgm_qspi = {
diff --git a/drivers/spi/spi-cadence-xspi.c b/drivers/spi/spi-cadence-xspi.c
index aed98ab14334..6dcba0e0ddaa 100644
--- a/drivers/spi/spi-cadence-xspi.c
+++ b/drivers/spi/spi-cadence-xspi.c
@@ -432,7 +432,7 @@ static bool cdns_mrvl_xspi_setup_clock(struct cdns_xspi_dev *cdns_xspi,
u32 clk_reg;
bool update_clk = false;
- while (i < ARRAY_SIZE(cdns_mrvl_xspi_clk_div_list)) {
+ while (i < (ARRAY_SIZE(cdns_mrvl_xspi_clk_div_list) - 1)) {
clk_val = MRVL_XSPI_CLOCK_DIVIDED(
cdns_mrvl_xspi_clk_div_list[i]);
if (clk_val <= requested_clk)
diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c
index 355e6a39fb41..5c59fddb32c1 100644
--- a/drivers/spi/spi-fsl-qspi.c
+++ b/drivers/spi/spi-fsl-qspi.c
@@ -844,6 +844,19 @@ static const struct spi_controller_mem_caps fsl_qspi_mem_caps = {
.per_op_freq = true,
};
+static void fsl_qspi_cleanup(void *data)
+{
+ struct fsl_qspi *q = data;
+
+ /* disable the hardware */
+ qspi_writel(q, QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR);
+ qspi_writel(q, 0x0, q->iobase + QUADSPI_RSER);
+
+ fsl_qspi_clk_disable_unprep(q);
+
+ mutex_destroy(&q->lock);
+}
+
static int fsl_qspi_probe(struct platform_device *pdev)
{
struct spi_controller *ctlr;
@@ -934,6 +947,10 @@ static int fsl_qspi_probe(struct platform_device *pdev)
ctlr->dev.of_node = np;
+ ret = devm_add_action_or_reset(dev, fsl_qspi_cleanup, q);
+ if (ret)
+ goto err_destroy_mutex;
+
ret = devm_spi_register_controller(dev, ctlr);
if (ret)
goto err_destroy_mutex;
@@ -953,19 +970,6 @@ err_put_ctrl:
return ret;
}
-static void fsl_qspi_remove(struct platform_device *pdev)
-{
- struct fsl_qspi *q = platform_get_drvdata(pdev);
-
- /* disable the hardware */
- qspi_writel(q, QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR);
- qspi_writel(q, 0x0, q->iobase + QUADSPI_RSER);
-
- fsl_qspi_clk_disable_unprep(q);
-
- mutex_destroy(&q->lock);
-}
-
static int fsl_qspi_suspend(struct device *dev)
{
return 0;
@@ -1003,7 +1007,6 @@ static struct platform_driver fsl_qspi_driver = {
.pm = &fsl_qspi_pm_ops,
},
.probe = fsl_qspi_probe,
- .remove = fsl_qspi_remove,
};
module_platform_driver(fsl_qspi_driver);
diff --git a/drivers/spi/spi-qpic-snand.c b/drivers/spi/spi-qpic-snand.c
index fbba7741a9bf..17eb67e19132 100644
--- a/drivers/spi/spi-qpic-snand.c
+++ b/drivers/spi/spi-qpic-snand.c
@@ -1614,7 +1614,7 @@ static const struct of_device_id qcom_snandc_of_match[] = {
.data = &ipq9574_snandc_props,
},
{}
-}
+};
MODULE_DEVICE_TABLE(of, qcom_snandc_of_match);
static struct platform_driver qcom_spi_driver = {
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 1bc012fce7cb..1a6381de6f33 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -547,7 +547,7 @@ static int rockchip_spi_config(struct rockchip_spi *rs,
cr0 |= (spi->mode & 0x3U) << CR0_SCPH_OFFSET;
if (spi->mode & SPI_LSB_FIRST)
cr0 |= CR0_FBM_LSB << CR0_FBM_OFFSET;
- if (spi->mode & SPI_CS_HIGH)
+ if ((spi->mode & SPI_CS_HIGH) && !(spi_get_csgpiod(spi, 0)))
cr0 |= BIT(spi_get_chipselect(spi, 0)) << CR0_SOI_OFFSET;
if (xfer->rx_buf && xfer->tx_buf)
diff --git a/drivers/staging/gpib/Kconfig b/drivers/staging/gpib/Kconfig
index 81510db3072e..aa01538d5beb 100644
--- a/drivers/staging/gpib/Kconfig
+++ b/drivers/staging/gpib/Kconfig
@@ -50,7 +50,6 @@ config GPIB_CEC_PCI
tristate "CEC PCI board"
depends on PCI
depends on HAS_IOPORT
- depends on !X86_PAE
select GPIB_COMMON
select GPIB_NEC7210
help
@@ -64,7 +63,6 @@ config GPIB_NI_PCI_ISA
tristate "NI PCI/ISA compatible boards"
depends on ISA_BUS || PCI || PCMCIA
depends on HAS_IOPORT
- depends on !X86_PAE
depends on PCMCIA || !PCMCIA
depends on HAS_IOPORT_MAP
select GPIB_COMMON
@@ -90,7 +88,6 @@ config GPIB_CB7210
tristate "Measurement Computing compatible boards"
depends on HAS_IOPORT
depends on ISA_BUS || PCI || PCMCIA
- depends on !X86_PAE
depends on PCMCIA || !PCMCIA
select GPIB_COMMON
select GPIB_NEC7210
@@ -169,7 +166,6 @@ config GPIB_HP82341
tristate "HP82341x"
select GPIB_COMMON
select GPIB_TMS9914
- depends on BROKEN
depends on ISA_BUS || EISA
help
GPIB driver for HP82341 A/B/C/D boards
@@ -182,7 +178,6 @@ config GPIB_INES
depends on PCI || ISA_BUS || PCMCIA
depends on PCMCIA || !PCMCIA
depends on HAS_IOPORT
- depends on !X86_PAE
select GPIB_COMMON
select GPIB_NEC7210
help
diff --git a/drivers/staging/gpib/agilent_82350b/agilent_82350b.c b/drivers/staging/gpib/agilent_82350b/agilent_82350b.c
index 3f4f95b7fe34..445b9380ff98 100644
--- a/drivers/staging/gpib/agilent_82350b/agilent_82350b.c
+++ b/drivers/staging/gpib/agilent_82350b/agilent_82350b.c
@@ -4,6 +4,10 @@
* copyright : (C) 2002, 2004 by Frank Mori Hess *
***************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define dev_fmt pr_fmt
+#define DRV_NAME KBUILD_MODNAME
+
#include "agilent_82350b.h"
#include <linux/delay.h>
#include <linux/ioport.h>
@@ -20,8 +24,14 @@
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GPIB driver for Agilent 82350b");
-int agilent_82350b_accel_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end,
- size_t *bytes_read)
+static int read_transfer_counter(struct agilent_82350b_priv *a_priv);
+static unsigned short read_and_clear_event_status(struct gpib_board *board);
+static void set_transfer_counter(struct agilent_82350b_priv *a_priv, int count);
+static int agilent_82350b_write(struct gpib_board *board, uint8_t *buffer,
+ size_t length, int send_eoi, size_t *bytes_written);
+
+static int agilent_82350b_accel_read(struct gpib_board *board, uint8_t *buffer,
+ size_t length, int *end, size_t *bytes_read)
{
struct agilent_82350b_priv *a_priv = board->private_data;
@@ -48,9 +58,6 @@ int agilent_82350b_accel_read(gpib_board_t *board, uint8_t *buffer, size_t lengt
retval = tms9914_read(board, tms_priv, buffer, 1, end, &num_bytes);
*bytes_read += num_bytes;
- if (retval < 0)
- dev_err(board->gpib_dev, "%s: tms9914_read failed retval=%i\n",
- driver_name, retval);
if (retval < 0 || *end)
return retval;
++buffer;
@@ -66,10 +73,7 @@ int agilent_82350b_accel_read(gpib_board_t *board, uint8_t *buffer, size_t lengt
int j;
int count;
- if (num_fifo_bytes - i < agilent_82350b_fifo_size)
- block_size = num_fifo_bytes - i;
- else
- block_size = agilent_82350b_fifo_size;
+ block_size = min(num_fifo_bytes - i, agilent_82350b_fifo_size);
set_transfer_counter(a_priv, block_size);
writeb(ENABLE_TI_TO_SRAM | DIRECTION_GPIB_TO_HOST,
a_priv->gpib_base + SRAM_ACCESS_CONTROL_REG);
@@ -86,7 +90,6 @@ int agilent_82350b_accel_read(gpib_board_t *board, uint8_t *buffer, size_t lengt
test_bit(DEV_CLEAR_BN, &tms_priv->state) ||
test_bit(TIMO_NUM, &board->status));
if (retval) {
- dev_dbg(board->gpib_dev, "%s: read wait interrupted\n", driver_name);
retval = -ERESTARTSYS;
break;
}
@@ -100,13 +103,10 @@ int agilent_82350b_accel_read(gpib_board_t *board, uint8_t *buffer, size_t lengt
*end = 1;
}
if (test_bit(TIMO_NUM, &board->status)) {
- dev_err(board->gpib_dev, "%s: read timed out\n", driver_name);
retval = -ETIMEDOUT;
break;
}
if (test_bit(DEV_CLEAR_BN, &tms_priv->state)) {
- dev_err(board->gpib_dev, "%s: device clear interrupted read\n",
- driver_name);
retval = -EINTR;
break;
}
@@ -130,30 +130,24 @@ int agilent_82350b_accel_read(gpib_board_t *board, uint8_t *buffer, size_t lengt
return 0;
}
-static int translate_wait_return_value(gpib_board_t *board, int retval)
+static int translate_wait_return_value(struct gpib_board *board, int retval)
{
struct agilent_82350b_priv *a_priv = board->private_data;
struct tms9914_priv *tms_priv = &a_priv->tms9914_priv;
- if (retval) {
- dev_err(board->gpib_dev, "%s: write wait interrupted\n", driver_name);
+ if (retval)
return -ERESTARTSYS;
- }
- if (test_bit(TIMO_NUM, &board->status)) {
- dev_err(board->gpib_dev, "%s: write timed out\n", driver_name);
+ if (test_bit(TIMO_NUM, &board->status))
return -ETIMEDOUT;
- }
- if (test_bit(DEV_CLEAR_BN, &tms_priv->state)) {
- dev_err(board->gpib_dev, "%s: device clear interrupted write\n", driver_name);
+ if (test_bit(DEV_CLEAR_BN, &tms_priv->state))
return -EINTR;
- }
return 0;
}
-int agilent_82350b_accel_write(gpib_board_t *board, uint8_t *buffer, size_t length, int send_eoi,
- size_t *bytes_written)
-
+static int agilent_82350b_accel_write(struct gpib_board *board, uint8_t *buffer,
+ size_t length, int send_eoi,
+ size_t *bytes_written)
{
struct agilent_82350b_priv *a_priv = board->private_data;
struct tms9914_priv *tms_priv = &a_priv->tms9914_priv;
@@ -174,10 +168,8 @@ int agilent_82350b_accel_write(gpib_board_t *board, uint8_t *buffer, size_t leng
event_status = read_and_clear_event_status(board);
- //pr_info("ag_ac_wr: event status 0x%x tms state 0x%lx\n", event_status, tms_priv->state);
-
#ifdef EXPERIMENTAL
- pr_info("ag_ac_wr: wait for previous BO to complete if any\n");
+ // wait for previous BO to complete if any
retval = wait_event_interruptible(board->wait,
test_bit(DEV_CLEAR_BN, &tms_priv->state) ||
test_bit(WRITE_READY_BN, &tms_priv->state) ||
@@ -188,22 +180,16 @@ int agilent_82350b_accel_write(gpib_board_t *board, uint8_t *buffer, size_t leng
return retval;
#endif
- //pr_info("ag_ac_wr: sending first byte\n");
retval = agilent_82350b_write(board, buffer, 1, 0, &num_bytes);
*bytes_written += num_bytes;
if (retval < 0)
return retval;
- //pr_info("ag_ac_wr: %ld bytes eoi %d tms state 0x%lx\n",length, send_eoi, tms_priv->state);
-
write_byte(tms_priv, tms_priv->imr0_bits & ~HR_BOIE, IMR0);
for (i = 1; i < fifotransferlength;) {
clear_bit(WRITE_READY_BN, &tms_priv->state);
- if (fifotransferlength - i < agilent_82350b_fifo_size)
- block_size = fifotransferlength - i;
- else
- block_size = agilent_82350b_fifo_size;
+ block_size = min(fifotransferlength - i, agilent_82350b_fifo_size);
set_transfer_counter(a_priv, block_size);
for (j = 0; j < block_size; ++j, ++i) {
// load data into board's sram
@@ -211,13 +197,8 @@ int agilent_82350b_accel_write(gpib_board_t *board, uint8_t *buffer, size_t leng
}
writeb(ENABLE_TI_TO_SRAM, a_priv->gpib_base + SRAM_ACCESS_CONTROL_REG);
- //pr_info("ag_ac_wr: send block: %d bytes tms 0x%lx\n", block_size,
- // tms_priv->state);
-
- if (agilent_82350b_fifo_is_halted(a_priv)) {
+ if (agilent_82350b_fifo_is_halted(a_priv))
writeb(RESTART_STREAM_BIT, a_priv->gpib_base + STREAM_STATUS_REG);
- // pr_info("ag_ac_wr: needed restart\n");
- }
retval = wait_event_interruptible(board->wait,
((event_status =
@@ -227,7 +208,6 @@ int agilent_82350b_accel_write(gpib_board_t *board, uint8_t *buffer, size_t leng
test_bit(TIMO_NUM, &board->status));
writeb(0, a_priv->gpib_base + SRAM_ACCESS_CONTROL_REG);
num_bytes = block_size - read_transfer_counter(a_priv);
- //pr_info("ag_ac_wr: sent %ld bytes tms 0x%lx\n", num_bytes, tms_priv->state);
*bytes_written += num_bytes;
retval = translate_wait_return_value(board, retval);
@@ -239,9 +219,6 @@ int agilent_82350b_accel_write(gpib_board_t *board, uint8_t *buffer, size_t leng
return retval;
if (send_eoi) {
- //pr_info("ag_ac_wr: sending last byte with eoi byte no: %d\n",
- // fifotransferlength+1);
-
retval = agilent_82350b_write(board, buffer + fifotransferlength, 1, send_eoi,
&num_bytes);
*bytes_written += num_bytes;
@@ -251,8 +228,7 @@ int agilent_82350b_accel_write(gpib_board_t *board, uint8_t *buffer, size_t leng
return 0;
}
-unsigned short read_and_clear_event_status(gpib_board_t *board)
-
+static unsigned short read_and_clear_event_status(struct gpib_board *board)
{
struct agilent_82350b_priv *a_priv = board->private_data;
unsigned long flags;
@@ -265,12 +241,12 @@ unsigned short read_and_clear_event_status(gpib_board_t *board)
return status;
}
-irqreturn_t agilent_82350b_interrupt(int irq, void *arg)
+static irqreturn_t agilent_82350b_interrupt(int irq, void *arg)
{
int tms9914_status1 = 0, tms9914_status2 = 0;
int event_status;
- gpib_board_t *board = arg;
+ struct gpib_board *board = arg;
struct agilent_82350b_priv *a_priv = board->private_data;
unsigned long flags;
irqreturn_t retval = IRQ_NONE;
@@ -286,7 +262,6 @@ irqreturn_t agilent_82350b_interrupt(int irq, void *arg)
tms9914_interrupt_have_status(board, &a_priv->tms9914_priv, tms9914_status1,
tms9914_status2);
}
-//pr_info("event_status=0x%x s1 %x s2 %x\n", event_status,tms9914_status1,tms9914_status2);
//write-clear status bits
if (event_status & (BUFFER_END_STATUS_BIT | TERM_COUNT_STATUS_BIT)) {
writeb(event_status & (BUFFER_END_STATUS_BIT | TERM_COUNT_STATUS_BIT),
@@ -298,12 +273,9 @@ irqreturn_t agilent_82350b_interrupt(int irq, void *arg)
return retval;
}
-void agilent_82350b_detach(gpib_board_t *board);
-
-const char *driver_name = "agilent_82350b";
-
-int read_transfer_counter(struct agilent_82350b_priv *a_priv)
+static void agilent_82350b_detach(struct gpib_board *board);
+static int read_transfer_counter(struct agilent_82350b_priv *a_priv)
{
int lo, mid, value;
@@ -314,8 +286,7 @@ int read_transfer_counter(struct agilent_82350b_priv *a_priv)
return value;
}
-void set_transfer_counter(struct agilent_82350b_priv *a_priv, int count)
-
+static void set_transfer_counter(struct agilent_82350b_priv *a_priv, int count)
{
int complement = -count;
@@ -326,17 +297,16 @@ void set_transfer_counter(struct agilent_82350b_priv *a_priv, int count)
}
// wrappers for interface functions
-int agilent_82350b_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end,
- size_t *bytes_read)
-
+static int agilent_82350b_read(struct gpib_board *board, uint8_t *buffer,
+ size_t length, int *end, size_t *bytes_read)
{
struct agilent_82350b_priv *priv = board->private_data;
return tms9914_read(board, &priv->tms9914_priv, buffer, length, end, bytes_read);
}
-int agilent_82350b_write(gpib_board_t *board, uint8_t *buffer, size_t length, int send_eoi,
- size_t *bytes_written)
+static int agilent_82350b_write(struct gpib_board *board, uint8_t *buffer,
+ size_t length, int send_eoi, size_t *bytes_written)
{
struct agilent_82350b_priv *priv = board->private_data;
@@ -344,8 +314,8 @@ int agilent_82350b_write(gpib_board_t *board, uint8_t *buffer, size_t length, in
return tms9914_write(board, &priv->tms9914_priv, buffer, length, send_eoi, bytes_written);
}
-int agilent_82350b_command(gpib_board_t *board, uint8_t *buffer, size_t length,
- size_t *bytes_written)
+static int agilent_82350b_command(struct gpib_board *board, uint8_t *buffer,
+ size_t length, size_t *bytes_written)
{
struct agilent_82350b_priv *priv = board->private_data;
@@ -353,7 +323,7 @@ int agilent_82350b_command(gpib_board_t *board, uint8_t *buffer, size_t length,
return tms9914_command(board, &priv->tms9914_priv, buffer, length, bytes_written);
}
-int agilent_82350b_take_control(gpib_board_t *board, int synchronous)
+static int agilent_82350b_take_control(struct gpib_board *board, int synchronous)
{
struct agilent_82350b_priv *priv = board->private_data;
@@ -361,7 +331,7 @@ int agilent_82350b_take_control(gpib_board_t *board, int synchronous)
return tms9914_take_control_workaround(board, &priv->tms9914_priv, synchronous);
}
-int agilent_82350b_go_to_standby(gpib_board_t *board)
+static int agilent_82350b_go_to_standby(struct gpib_board *board)
{
struct agilent_82350b_priv *priv = board->private_data;
@@ -369,7 +339,8 @@ int agilent_82350b_go_to_standby(gpib_board_t *board)
return tms9914_go_to_standby(board, &priv->tms9914_priv);
}
-void agilent_82350b_request_system_control(gpib_board_t *board, int request_control)
+static void agilent_82350b_request_system_control(struct gpib_board *board,
+ int request_control)
{
struct agilent_82350b_priv *a_priv = board->private_data;
@@ -387,7 +358,7 @@ void agilent_82350b_request_system_control(gpib_board_t *board, int request_cont
tms9914_request_system_control(board, &a_priv->tms9914_priv, request_control);
}
-void agilent_82350b_interface_clear(gpib_board_t *board, int assert)
+static void agilent_82350b_interface_clear(struct gpib_board *board, int assert)
{
struct agilent_82350b_priv *priv = board->private_data;
@@ -395,104 +366,96 @@ void agilent_82350b_interface_clear(gpib_board_t *board, int assert)
tms9914_interface_clear(board, &priv->tms9914_priv, assert);
}
-void agilent_82350b_remote_enable(gpib_board_t *board, int enable)
-
+static void agilent_82350b_remote_enable(struct gpib_board *board, int enable)
{
struct agilent_82350b_priv *priv = board->private_data;
tms9914_remote_enable(board, &priv->tms9914_priv, enable);
}
-int agilent_82350b_enable_eos(gpib_board_t *board, uint8_t eos_byte, int compare_8_bits)
-
+static int agilent_82350b_enable_eos(struct gpib_board *board, uint8_t eos_byte,
+ int compare_8_bits)
{
struct agilent_82350b_priv *priv = board->private_data;
return tms9914_enable_eos(board, &priv->tms9914_priv, eos_byte, compare_8_bits);
}
-void agilent_82350b_disable_eos(gpib_board_t *board)
-
+static void agilent_82350b_disable_eos(struct gpib_board *board)
{
struct agilent_82350b_priv *priv = board->private_data;
tms9914_disable_eos(board, &priv->tms9914_priv);
}
-unsigned int agilent_82350b_update_status(gpib_board_t *board, unsigned int clear_mask)
-
+static unsigned int agilent_82350b_update_status(struct gpib_board *board,
+ unsigned int clear_mask)
{
struct agilent_82350b_priv *priv = board->private_data;
return tms9914_update_status(board, &priv->tms9914_priv, clear_mask);
}
-int agilent_82350b_primary_address(gpib_board_t *board, unsigned int address)
-
+static int agilent_82350b_primary_address(struct gpib_board *board,
+ unsigned int address)
{
struct agilent_82350b_priv *priv = board->private_data;
return tms9914_primary_address(board, &priv->tms9914_priv, address);
}
-int agilent_82350b_secondary_address(gpib_board_t *board, unsigned int address, int enable)
-
+static int agilent_82350b_secondary_address(struct gpib_board *board,
+ unsigned int address, int enable)
{
struct agilent_82350b_priv *priv = board->private_data;
return tms9914_secondary_address(board, &priv->tms9914_priv, address, enable);
}
-int agilent_82350b_parallel_poll(gpib_board_t *board, uint8_t *result)
-
+static int agilent_82350b_parallel_poll(struct gpib_board *board, uint8_t *result)
{
struct agilent_82350b_priv *priv = board->private_data;
return tms9914_parallel_poll(board, &priv->tms9914_priv, result);
}
-void agilent_82350b_parallel_poll_configure(gpib_board_t *board, uint8_t config)
-
+static void agilent_82350b_parallel_poll_configure(struct gpib_board *board,
+ uint8_t config)
{
struct agilent_82350b_priv *priv = board->private_data;
tms9914_parallel_poll_configure(board, &priv->tms9914_priv, config);
}
-void agilent_82350b_parallel_poll_response(gpib_board_t *board, int ist)
-
+static void agilent_82350b_parallel_poll_response(struct gpib_board *board, int ist)
{
struct agilent_82350b_priv *priv = board->private_data;
tms9914_parallel_poll_response(board, &priv->tms9914_priv, ist);
}
-void agilent_82350b_serial_poll_response(gpib_board_t *board, uint8_t status)
-
+static void agilent_82350b_serial_poll_response(struct gpib_board *board, uint8_t status)
{
struct agilent_82350b_priv *priv = board->private_data;
tms9914_serial_poll_response(board, &priv->tms9914_priv, status);
}
-uint8_t agilent_82350b_serial_poll_status(gpib_board_t *board)
-
+static uint8_t agilent_82350b_serial_poll_status(struct gpib_board *board)
{
struct agilent_82350b_priv *priv = board->private_data;
return tms9914_serial_poll_status(board, &priv->tms9914_priv);
}
-int agilent_82350b_line_status(const gpib_board_t *board)
-
+static int agilent_82350b_line_status(const struct gpib_board *board)
{
struct agilent_82350b_priv *priv = board->private_data;
return tms9914_line_status(board, &priv->tms9914_priv);
}
-unsigned int agilent_82350b_t1_delay(gpib_board_t *board, unsigned int nanosec)
-
+static int agilent_82350b_t1_delay(struct gpib_board *board, unsigned int nanosec)
{
struct agilent_82350b_priv *a_priv = board->private_data;
static const int nanosec_per_clock = 30;
@@ -507,16 +470,14 @@ unsigned int agilent_82350b_t1_delay(gpib_board_t *board, unsigned int nanosec)
return value * nanosec_per_clock;
}
-void agilent_82350b_return_to_local(gpib_board_t *board)
-
+static void agilent_82350b_return_to_local(struct gpib_board *board)
{
struct agilent_82350b_priv *priv = board->private_data;
tms9914_return_to_local(board, &priv->tms9914_priv);
}
-int agilent_82350b_allocate_private(gpib_board_t *board)
-
+static int agilent_82350b_allocate_private(struct gpib_board *board)
{
board->private_data = kzalloc(sizeof(struct agilent_82350b_priv), GFP_KERNEL);
if (!board->private_data)
@@ -524,15 +485,14 @@ int agilent_82350b_allocate_private(gpib_board_t *board)
return 0;
}
-void agilent_82350b_free_private(gpib_board_t *board)
-
+static void agilent_82350b_free_private(struct gpib_board *board)
{
kfree(board->private_data);
board->private_data = NULL;
}
-static int init_82350a_hardware(gpib_board_t *board, const gpib_board_config_t *config)
-
+static int init_82350a_hardware(struct gpib_board *board,
+ const gpib_board_config_t *config)
{
struct agilent_82350b_priv *a_priv = board->private_data;
static const unsigned int firmware_length = 5302;
@@ -557,11 +517,10 @@ static int init_82350a_hardware(gpib_board_t *board, const gpib_board_config_t *
return 0;
// need to programme borg
if (!config->init_data || config->init_data_length != firmware_length) {
- dev_err(board->gpib_dev, "%s: the 82350A board requires firmware after powering on.\n",
- driver_name);
+ dev_err(board->gpib_dev, "the 82350A board requires firmware after powering on.\n");
return -EIO;
}
- dev_info(board->gpib_dev, "%s: Loading firmware...\n", driver_name);
+ dev_dbg(board->gpib_dev, "Loading firmware...\n");
// tickle the borg
writel(plx_cntrl_static_bits | PLX9050_USER3_DATA_BIT,
@@ -580,7 +539,7 @@ static int init_82350a_hardware(gpib_board_t *board, const gpib_board_config_t *
usleep_range(10, 20);
}
if (j == timeout) {
- dev_err(board->gpib_dev, "%s: timed out loading firmware.\n", driver_name);
+ dev_err(board->gpib_dev, "timed out loading firmware.\n");
return -ETIMEDOUT;
}
writeb(firmware_data[i], a_priv->gpib_base + CONFIG_DATA_REG);
@@ -591,15 +550,14 @@ static int init_82350a_hardware(gpib_board_t *board, const gpib_board_config_t *
usleep_range(10, 20);
}
if (j == timeout) {
- dev_err(board->gpib_dev, "%s: timed out waiting for firmware load to complete.\n",
- driver_name);
+ dev_err(board->gpib_dev, "timed out waiting for firmware load to complete.\n");
return -ETIMEDOUT;
}
- dev_info(board->gpib_dev, "%s: ...done.\n", driver_name);
+ dev_dbg(board->gpib_dev, " ...done.\n");
return 0;
}
-static int test_sram(gpib_board_t *board)
+static int test_sram(struct gpib_board *board)
{
struct agilent_82350b_priv *a_priv = board->private_data;
@@ -617,19 +575,19 @@ static int test_sram(gpib_board_t *board)
unsigned int read_value = readb(a_priv->sram_base + i);
if ((i & byte_mask) != read_value) {
- dev_err(board->gpib_dev, "%s: SRAM test failed at %d wanted %d got %d\n",
- driver_name, i, (i & byte_mask), read_value);
+ dev_err(board->gpib_dev, "SRAM test failed at %d wanted %d got %d\n",
+ i, (i & byte_mask), read_value);
return -EIO;
}
if (need_resched())
schedule();
}
- dev_info(board->gpib_dev, "%s: SRAM test passed 0x%x bytes checked\n",
- driver_name, sram_length);
+ dev_dbg(board->gpib_dev, "SRAM test passed 0x%x bytes checked\n", sram_length);
return 0;
}
-static int agilent_82350b_generic_attach(gpib_board_t *board, const gpib_board_config_t *config,
+static int agilent_82350b_generic_attach(struct gpib_board *board,
+ const gpib_board_config_t *config,
int use_fifos)
{
@@ -653,14 +611,14 @@ static int agilent_82350b_generic_attach(gpib_board_t *board, const gpib_board_c
PCI_DEVICE_ID_82350B, NULL);
if (a_priv->pci_device) {
a_priv->model = MODEL_82350B;
- dev_info(board->gpib_dev, "%s: Agilent 82350B board found\n", driver_name);
+ dev_dbg(board->gpib_dev, "Agilent 82350B board found\n");
} else {
a_priv->pci_device = gpib_pci_get_device(config, PCI_VENDOR_ID_AGILENT,
PCI_DEVICE_ID_82351A, NULL);
if (a_priv->pci_device) {
a_priv->model = MODEL_82351A;
- dev_info(board->gpib_dev, "%s: Agilent 82351B board found\n", driver_name);
+ dev_dbg(board->gpib_dev, "Agilent 82351B board found\n");
} else {
a_priv->pci_device = gpib_pci_get_subsys(config, PCI_VENDOR_ID_PLX,
@@ -670,46 +628,40 @@ static int agilent_82350b_generic_attach(gpib_board_t *board, const gpib_board_c
a_priv->pci_device);
if (a_priv->pci_device) {
a_priv->model = MODEL_82350A;
- dev_info(board->gpib_dev, "%s: HP/Agilent 82350A board found\n",
- driver_name);
+ dev_dbg(board->gpib_dev, "HP/Agilent 82350A board found\n");
} else {
- dev_err(board->gpib_dev, "%s: no 82350/82351 board found\n",
- driver_name);
+ dev_err(board->gpib_dev, "no 82350/82351 board found\n");
return -ENODEV;
}
}
}
if (pci_enable_device(a_priv->pci_device)) {
- dev_err(board->gpib_dev, "%s: error enabling pci device\n", driver_name);
+ dev_err(board->gpib_dev, "error enabling pci device\n");
return -EIO;
}
- if (pci_request_regions(a_priv->pci_device, driver_name))
- return -EIO;
+ if (pci_request_regions(a_priv->pci_device, DRV_NAME))
+ return -ENOMEM;
switch (a_priv->model) {
case MODEL_82350A:
a_priv->plx_base = ioremap(pci_resource_start(a_priv->pci_device, PLX_MEM_REGION),
pci_resource_len(a_priv->pci_device, PLX_MEM_REGION));
- dev_dbg(board->gpib_dev, "%s: plx base address remapped to 0x%p\n",
- driver_name, a_priv->plx_base);
+ dev_dbg(board->gpib_dev, "plx base address remapped to 0x%p\n", a_priv->plx_base);
a_priv->gpib_base = ioremap(pci_resource_start(a_priv->pci_device,
GPIB_82350A_REGION),
pci_resource_len(a_priv->pci_device,
GPIB_82350A_REGION));
- dev_dbg(board->gpib_dev, "%s: gpib base address remapped to 0x%p\n",
- driver_name, a_priv->gpib_base);
+ dev_dbg(board->gpib_dev, "chip base address remapped to 0x%p\n", a_priv->gpib_base);
tms_priv->mmiobase = a_priv->gpib_base + TMS9914_BASE_REG;
a_priv->sram_base = ioremap(pci_resource_start(a_priv->pci_device,
SRAM_82350A_REGION),
pci_resource_len(a_priv->pci_device,
SRAM_82350A_REGION));
- dev_dbg(board->gpib_dev, "%s: sram base address remapped to 0x%p\n",
- driver_name, a_priv->sram_base);
+ dev_dbg(board->gpib_dev, "sram base address remapped to 0x%p\n", a_priv->sram_base);
a_priv->borg_base = ioremap(pci_resource_start(a_priv->pci_device,
BORG_82350A_REGION),
pci_resource_len(a_priv->pci_device,
BORG_82350A_REGION));
- dev_dbg(board->gpib_dev, "%s: borg base address remapped to 0x%p\n",
- driver_name, a_priv->borg_base);
+ dev_dbg(board->gpib_dev, "borg base address remapped to 0x%p\n", a_priv->borg_base);
retval = init_82350a_hardware(board, config);
if (retval < 0)
@@ -719,21 +671,18 @@ static int agilent_82350b_generic_attach(gpib_board_t *board, const gpib_board_c
case MODEL_82351A:
a_priv->gpib_base = ioremap(pci_resource_start(a_priv->pci_device, GPIB_REGION),
pci_resource_len(a_priv->pci_device, GPIB_REGION));
- dev_dbg(board->gpib_dev, "%s: gpib base address remapped to 0x%p\n",
- driver_name, a_priv->gpib_base);
+ dev_dbg(board->gpib_dev, "chip base address remapped to 0x%p\n", a_priv->gpib_base);
tms_priv->mmiobase = a_priv->gpib_base + TMS9914_BASE_REG;
a_priv->sram_base = ioremap(pci_resource_start(a_priv->pci_device, SRAM_REGION),
pci_resource_len(a_priv->pci_device, SRAM_REGION));
- dev_dbg(board->gpib_dev, "%s: sram base address remapped to 0x%p\n",
- driver_name, a_priv->sram_base);
+ dev_dbg(board->gpib_dev, "sram base address remapped to 0x%p\n", a_priv->sram_base);
a_priv->misc_base = ioremap(pci_resource_start(a_priv->pci_device, MISC_REGION),
pci_resource_len(a_priv->pci_device, MISC_REGION));
- dev_dbg(board->gpib_dev, "%s: misc base address remapped to 0x%p\n",
- driver_name, a_priv->misc_base);
+ dev_dbg(board->gpib_dev, "misc base address remapped to 0x%p\n", a_priv->misc_base);
break;
default:
- pr_err("%s: invalid board\n", driver_name);
- return -1;
+ dev_err(board->gpib_dev, "invalid board\n");
+ return -ENODEV;
}
retval = test_sram(board);
@@ -741,12 +690,12 @@ static int agilent_82350b_generic_attach(gpib_board_t *board, const gpib_board_c
return retval;
if (request_irq(a_priv->pci_device->irq, agilent_82350b_interrupt,
- IRQF_SHARED, driver_name, board)) {
- pr_err("%s: can't request IRQ %d\n", driver_name, a_priv->pci_device->irq);
+ IRQF_SHARED, DRV_NAME, board)) {
+ dev_err(board->gpib_dev, "failed to obtain irq %d\n", a_priv->pci_device->irq);
return -EIO;
}
a_priv->irq = a_priv->pci_device->irq;
- dev_dbg(board->gpib_dev, "%s: IRQ %d\n", driver_name, a_priv->irq);
+ dev_dbg(board->gpib_dev, " IRQ %d\n", a_priv->irq);
writeb(0, a_priv->gpib_base + SRAM_ACCESS_CONTROL_REG);
a_priv->card_mode_bits = ENABLE_PCI_IRQ_BIT;
@@ -780,20 +729,19 @@ static int agilent_82350b_generic_attach(gpib_board_t *board, const gpib_board_c
return 0;
}
-int agilent_82350b_unaccel_attach(gpib_board_t *board, const gpib_board_config_t *config)
-
+static int agilent_82350b_unaccel_attach(struct gpib_board *board,
+ const gpib_board_config_t *config)
{
return agilent_82350b_generic_attach(board, config, 0);
}
-int agilent_82350b_accel_attach(gpib_board_t *board, const gpib_board_config_t *config)
-
+static int agilent_82350b_accel_attach(struct gpib_board *board,
+ const gpib_board_config_t *config)
{
return agilent_82350b_generic_attach(board, config, 1);
}
-void agilent_82350b_detach(gpib_board_t *board)
-
+static void agilent_82350b_detach(struct gpib_board *board)
{
struct agilent_82350b_priv *a_priv = board->private_data;
struct tms9914_priv *tms_priv;
@@ -848,6 +796,7 @@ static gpib_interface_t agilent_82350b_unaccel_interface = {
.primary_address = agilent_82350b_primary_address,
.secondary_address = agilent_82350b_secondary_address,
.serial_poll_response = agilent_82350b_serial_poll_response,
+ .serial_poll_status = agilent_82350b_serial_poll_status,
.t1_delay = agilent_82350b_t1_delay,
.return_to_local = agilent_82350b_return_to_local,
};
@@ -875,6 +824,7 @@ static gpib_interface_t agilent_82350b_interface = {
.primary_address = agilent_82350b_primary_address,
.secondary_address = agilent_82350b_secondary_address,
.serial_poll_response = agilent_82350b_serial_poll_response,
+ .serial_poll_status = agilent_82350b_serial_poll_status,
.t1_delay = agilent_82350b_t1_delay,
.return_to_local = agilent_82350b_return_to_local,
};
@@ -895,31 +845,30 @@ static const struct pci_device_id agilent_82350b_pci_table[] = {
MODULE_DEVICE_TABLE(pci, agilent_82350b_pci_table);
static struct pci_driver agilent_82350b_pci_driver = {
- .name = "agilent_82350b",
+ .name = DRV_NAME,
.id_table = agilent_82350b_pci_table,
.probe = &agilent_82350b_pci_probe
};
static int __init agilent_82350b_init_module(void)
-
{
int result;
result = pci_register_driver(&agilent_82350b_pci_driver);
if (result) {
- pr_err("agilent_82350b: pci_register_driver failed: error = %d\n", result);
+ pr_err("pci_register_driver failed: error = %d\n", result);
return result;
}
result = gpib_register_driver(&agilent_82350b_unaccel_interface, THIS_MODULE);
if (result) {
- pr_err("agilent_82350b: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
goto err_unaccel;
}
result = gpib_register_driver(&agilent_82350b_interface, THIS_MODULE);
if (result) {
- pr_err("agilent_82350b: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
goto err_interface;
}
@@ -934,7 +883,6 @@ err_unaccel:
}
static void __exit agilent_82350b_exit_module(void)
-
{
gpib_unregister_driver(&agilent_82350b_interface);
gpib_unregister_driver(&agilent_82350b_unaccel_interface);
diff --git a/drivers/staging/gpib/agilent_82350b/agilent_82350b.h b/drivers/staging/gpib/agilent_82350b/agilent_82350b.h
index 32b322113c10..1573230c619d 100644
--- a/drivers/staging/gpib/agilent_82350b/agilent_82350b.h
+++ b/drivers/staging/gpib/agilent_82350b/agilent_82350b.h
@@ -57,56 +57,6 @@ struct agilent_82350b_priv {
bool using_fifos;
};
-// driver name
-extern const char *driver_name;
-
-// init functions
-
-int agilent_82350b_unaccel_attach(gpib_board_t *board, const gpib_board_config_t *config);
-int agilent_82350b_accel_attach(gpib_board_t *board, const gpib_board_config_t *config);
-
-// interface functions
-int agilent_82350b_accel_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end,
- size_t *bytes_read);
-int agilent_82350b_accel_write(gpib_board_t *board, uint8_t *buffer, size_t length, int send_eoi,
- size_t *bytes_written);
-int agilent_82350b_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end,
- size_t *bytes_read);
-int agilent_82350b_write(gpib_board_t *board, uint8_t *buffer, size_t length, int send_eoi,
- size_t *bytes_written);
-int agilent_82350b_command(gpib_board_t *board, uint8_t *buffer, size_t length,
- size_t *bytes_written);
-int agilent_82350b_take_control(gpib_board_t *board, int synchronous);
-int agilent_82350b_go_to_standby(gpib_board_t *board);
-void agilent_82350b_request_system_control(gpib_board_t *board, int request_control);
-void agilent_82350b_interface_clear(gpib_board_t *board, int assert);
-void agilent_82350b_remote_enable(gpib_board_t *board, int enable);
-int agilent_82350b_enable_eos(gpib_board_t *board, uint8_t eos_byte, int
- compare_8_bits);
-void agilent_82350b_disable_eos(gpib_board_t *board);
-unsigned int agilent_82350b_update_status(gpib_board_t *board, unsigned int clear_mask);
-int agilent_82350b_primary_address(gpib_board_t *board, unsigned int address);
-int agilent_82350b_secondary_address(gpib_board_t *board, unsigned int address, int
- enable);
-int agilent_82350b_parallel_poll(gpib_board_t *board, uint8_t *result);
-void agilent_82350b_parallel_poll_configure(gpib_board_t *board, uint8_t config);
-void agilent_82350b_parallel_poll_response(gpib_board_t *board, int ist);
-void agilent_82350b_serial_poll_response(gpib_board_t *board, uint8_t status);
-void agilent_82350b_return_to_local(gpib_board_t *board);
-uint8_t agilent_82350b_serial_poll_status(gpib_board_t *board);
-int agilent_82350b_line_status(const gpib_board_t *board);
-unsigned int agilent_82350b_t1_delay(gpib_board_t *board, unsigned int nanosec);
-
-// interrupt service routines
-irqreturn_t agilent_82350b_interrupt(int irq, void *arg);
-
-// utility functions
-int agilent_82350b_allocate_private(gpib_board_t *board);
-void agilent_82350b_free_private(gpib_board_t *board);
-unsigned short read_and_clear_event_status(gpib_board_t *board);
-int read_transfer_counter(struct agilent_82350b_priv *a_priv);
-void set_transfer_counter(struct agilent_82350b_priv *a_priv, int count);
-
//registers
enum agilent_82350b_gpib_registers
diff --git a/drivers/staging/gpib/agilent_82357a/agilent_82357a.c b/drivers/staging/gpib/agilent_82357a/agilent_82357a.c
index 69f0e490d401..67bf125645c0 100644
--- a/drivers/staging/gpib/agilent_82357a/agilent_82357a.c
+++ b/drivers/staging/gpib/agilent_82357a/agilent_82357a.c
@@ -7,6 +7,10 @@
#define _GNU_SOURCE
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define dev_fmt pr_fmt
+#define DRV_NAME KBUILD_MODNAME
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -21,9 +25,10 @@ MODULE_DESCRIPTION("GPIB driver for Agilent 82357A/B usb adapters");
static struct usb_interface *agilent_82357a_driver_interfaces[MAX_NUM_82357A_INTERFACES];
static DEFINE_MUTEX(agilent_82357a_hotplug_lock); // protect board insertion and removal
-static unsigned int agilent_82357a_update_status(gpib_board_t *board, unsigned int clear_mask);
+static unsigned int agilent_82357a_update_status(struct gpib_board *board,
+ unsigned int clear_mask);
-static int agilent_82357a_take_control_internal(gpib_board_t *board, int synchronous);
+static int agilent_82357a_take_control_internal(struct gpib_board *board, int synchronous);
static void agilent_82357a_bulk_complete(struct urb *urb)
{
@@ -79,14 +84,12 @@ static int agilent_82357a_send_bulk_msg(struct agilent_82357a_priv *a_priv, void
retval = usb_submit_urb(a_priv->bulk_urb, GFP_KERNEL);
if (retval) {
- dev_err(&usb_dev->dev, "%s: failed to submit bulk out urb, retval=%i\n",
- __func__, retval);
+ dev_err(&usb_dev->dev, "failed to submit bulk out urb, retval=%i\n", retval);
mutex_unlock(&a_priv->bulk_alloc_lock);
goto cleanup;
}
mutex_unlock(&a_priv->bulk_alloc_lock);
if (down_interruptible(&context->complete)) {
- dev_err(&usb_dev->dev, "%s: interrupted\n", __func__);
retval = -ERESTARTSYS;
goto cleanup;
}
@@ -149,14 +152,12 @@ static int agilent_82357a_receive_bulk_msg(struct agilent_82357a_priv *a_priv, v
retval = usb_submit_urb(a_priv->bulk_urb, GFP_KERNEL);
if (retval) {
- dev_err(&usb_dev->dev, "%s: failed to submit bulk out urb, retval=%i\n",
- __func__, retval);
+ dev_err(&usb_dev->dev, "failed to submit bulk in urb, retval=%i\n", retval);
mutex_unlock(&a_priv->bulk_alloc_lock);
goto cleanup;
}
mutex_unlock(&a_priv->bulk_alloc_lock);
if (down_interruptible(&context->complete)) {
- dev_err(&usb_dev->dev, "%s: interrupted\n", __func__);
retval = -ERESTARTSYS;
goto cleanup;
}
@@ -205,7 +206,6 @@ static int agilent_82357a_receive_control_msg(struct agilent_82357a_priv *a_priv
static void agilent_82357a_dump_raw_block(const u8 *raw_data, int length)
{
- pr_info("hex block dump\n");
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 8, 1, raw_data, length, true);
}
@@ -225,7 +225,7 @@ static int agilent_82357a_write_registers(struct agilent_82357a_priv *a_priv,
static const int max_writes = 31;
if (num_writes > max_writes) {
- dev_err(&usb_dev->dev, "%s: bug! num_writes=%i too large\n", __func__, num_writes);
+ dev_err(&usb_dev->dev, "bug! num_writes=%i too large\n", num_writes);
return -EIO;
}
out_data_length = num_writes * bytes_per_write + header_length;
@@ -239,8 +239,7 @@ static int agilent_82357a_write_registers(struct agilent_82357a_priv *a_priv,
out_data[i++] = writes[j].address;
out_data[i++] = writes[j].value;
}
- if (i > out_data_length)
- dev_err(&usb_dev->dev, "%s: bug! buffer overrun\n", __func__);
+
retval = mutex_lock_interruptible(&a_priv->bulk_transfer_lock);
if (retval) {
kfree(out_data);
@@ -249,8 +248,8 @@ static int agilent_82357a_write_registers(struct agilent_82357a_priv *a_priv,
retval = agilent_82357a_send_bulk_msg(a_priv, out_data, i, &bytes_written, 1000);
kfree(out_data);
if (retval) {
- dev_err(&usb_dev->dev, "%s: agilent_82357a_send_bulk_msg returned %i, bytes_written=%i, i=%i\n",
- __func__, retval, bytes_written, i);
+ dev_err(&usb_dev->dev, "send_bulk_msg returned %i, bytes_written=%i, i=%i\n",
+ retval, bytes_written, i);
mutex_unlock(&a_priv->bulk_transfer_lock);
return retval;
}
@@ -265,20 +264,19 @@ static int agilent_82357a_write_registers(struct agilent_82357a_priv *a_priv,
mutex_unlock(&a_priv->bulk_transfer_lock);
if (retval) {
- dev_err(&usb_dev->dev, "%s: agilent_82357a_receive_bulk_msg returned %i, bytes_read=%i\n",
- __func__, retval, bytes_read);
+ dev_err(&usb_dev->dev, "receive_bulk_msg returned %i, bytes_read=%i\n",
+ retval, bytes_read);
agilent_82357a_dump_raw_block(in_data, bytes_read);
kfree(in_data);
return -EIO;
}
if (in_data[0] != (0xff & ~DATA_PIPE_CMD_WR_REGS)) {
- dev_err(&usb_dev->dev, "%s: error, bulk command=0x%x != ~DATA_PIPE_CMD_WR_REGS\n",
- __func__, in_data[0]);
+ dev_err(&usb_dev->dev, "bulk command=0x%x != ~DATA_PIPE_CMD_WR_REGS\n", in_data[0]);
return -EIO;
}
if (in_data[1]) {
- dev_err(&usb_dev->dev, "%s: nonzero error code 0x%x in DATA_PIPE_CMD_WR_REGS response\n",
- __func__, in_data[1]);
+ dev_err(&usb_dev->dev, "nonzero error code 0x%x in DATA_PIPE_CMD_WR_REGS response\n",
+ in_data[1]);
return -EIO;
}
kfree(in_data);
@@ -299,9 +297,10 @@ static int agilent_82357a_read_registers(struct agilent_82357a_priv *a_priv,
static const int header_length = 2;
static const int max_reads = 62;
- if (num_reads > max_reads)
- dev_err(&usb_dev->dev, "%s: bug! num_reads=%i too large\n", __func__, num_reads);
-
+ if (num_reads > max_reads) {
+ dev_err(&usb_dev->dev, "bug! num_reads=%i too large\n", num_reads);
+ return -EIO;
+ }
out_data_length = num_reads + header_length;
out_data = kmalloc(out_data_length, GFP_KERNEL);
if (!out_data)
@@ -311,8 +310,7 @@ static int agilent_82357a_read_registers(struct agilent_82357a_priv *a_priv,
out_data[i++] = num_reads;
for (j = 0; j < num_reads; j++)
out_data[i++] = reads[j].address;
- if (i > out_data_length)
- dev_err(&usb_dev->dev, "%s: bug! buffer overrun\n", __func__);
+
if (blocking) {
retval = mutex_lock_interruptible(&a_priv->bulk_transfer_lock);
if (retval) {
@@ -329,8 +327,8 @@ static int agilent_82357a_read_registers(struct agilent_82357a_priv *a_priv,
retval = agilent_82357a_send_bulk_msg(a_priv, out_data, i, &bytes_written, 1000);
kfree(out_data);
if (retval) {
- dev_err(&usb_dev->dev, "%s: agilent_82357a_send_bulk_msg returned %i, bytes_written=%i, i=%i\n",
- __func__, retval, bytes_written, i);
+ dev_err(&usb_dev->dev, "send_bulk_msg returned %i, bytes_written=%i, i=%i\n",
+ retval, bytes_written, i);
mutex_unlock(&a_priv->bulk_transfer_lock);
return retval;
}
@@ -345,21 +343,20 @@ static int agilent_82357a_read_registers(struct agilent_82357a_priv *a_priv,
mutex_unlock(&a_priv->bulk_transfer_lock);
if (retval) {
- dev_err(&usb_dev->dev, "%s: agilent_82357a_receive_bulk_msg returned %i, bytes_read=%i\n",
- __func__, retval, bytes_read);
+ dev_err(&usb_dev->dev, "receive_bulk_msg returned %i, bytes_read=%i\n",
+ retval, bytes_read);
agilent_82357a_dump_raw_block(in_data, bytes_read);
kfree(in_data);
return -EIO;
}
i = 0;
if (in_data[i++] != (0xff & ~DATA_PIPE_CMD_RD_REGS)) {
- dev_err(&usb_dev->dev, "%s: error, bulk command=0x%x != ~DATA_PIPE_CMD_RD_REGS\n",
- __func__, in_data[0]);
+ dev_err(&usb_dev->dev, "bulk command=0x%x != ~DATA_PIPE_CMD_RD_REGS\n", in_data[0]);
return -EIO;
}
if (in_data[i++]) {
- dev_err(&usb_dev->dev, "%s: nonzero error code 0x%x in DATA_PIPE_CMD_RD_REGS response\n",
- __func__, in_data[1]);
+ dev_err(&usb_dev->dev, "nonzero error code 0x%x in DATA_PIPE_CMD_RD_REGS response\n",
+ in_data[1]);
return -EIO;
}
for (j = 0; j < num_reads; j++)
@@ -390,14 +387,13 @@ static int agilent_82357a_abort(struct agilent_82357a_priv *a_priv, int flush)
wIndex, status_data,
status_data_len, 100);
if (receive_control_retval < 0) {
- dev_err(&usb_dev->dev, "%s: agilent_82357a_receive_control_msg() returned %i\n",
- __func__, receive_control_retval);
+ dev_err(&usb_dev->dev, "82357a_receive_control_msg() returned %i\n",
+ receive_control_retval);
retval = -EIO;
goto cleanup;
}
if (status_data[0] != (~XFER_ABORT & 0xff)) {
- dev_err(&usb_dev->dev, "%s: error, major code=0x%x != ~XFER_ABORT\n",
- __func__, status_data[0]);
+ dev_err(&usb_dev->dev, "major code=0x%x != ~XFER_ABORT\n", status_data[0]);
retval = -EIO;
goto cleanup;
}
@@ -413,8 +409,7 @@ static int agilent_82357a_abort(struct agilent_82357a_priv *a_priv, int flush)
fallthrough;
case UGP_ERR_FLUSHING_ALREADY:
default:
- dev_err(&usb_dev->dev, "%s: abort returned error code=0x%x\n",
- __func__, status_data[1]);
+ dev_err(&usb_dev->dev, "abort returned error code=0x%x\n", status_data[1]);
retval = -EIO;
break;
}
@@ -425,15 +420,15 @@ cleanup:
}
// interface functions
-int agilent_82357a_command(gpib_board_t *board, uint8_t *buffer, size_t length,
+int agilent_82357a_command(struct gpib_board *board, uint8_t *buffer, size_t length,
size_t *bytes_written);
-static int agilent_82357a_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end,
+static int agilent_82357a_read(struct gpib_board *board, uint8_t *buffer, size_t length, int *end,
size_t *nbytes)
{
int retval;
struct agilent_82357a_priv *a_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface);
+ struct usb_device *usb_dev;
u8 *out_data, *in_data;
int out_data_length, in_data_length;
int bytes_written, bytes_read;
@@ -444,6 +439,10 @@ static int agilent_82357a_read(gpib_board_t *board, uint8_t *buffer, size_t leng
*nbytes = 0;
*end = 0;
+
+ if (!a_priv->bus_interface)
+ return -ENODEV;
+ usb_dev = interface_to_usbdev(a_priv->bus_interface);
out_data_length = 0x9;
out_data = kmalloc(out_data_length, GFP_KERNEL);
if (!out_data)
@@ -469,8 +468,8 @@ static int agilent_82357a_read(gpib_board_t *board, uint8_t *buffer, size_t leng
retval = agilent_82357a_send_bulk_msg(a_priv, out_data, i, &bytes_written, msec_timeout);
kfree(out_data);
if (retval || bytes_written != i) {
- dev_err(&usb_dev->dev, "%s: agilent_82357a_send_bulk_msg returned %i, bytes_written=%i, i=%i\n",
- __func__, retval, bytes_written, i);
+ dev_err(&usb_dev->dev, "send_bulk_msg returned %i, bytes_written=%i, i=%i\n",
+ retval, bytes_written, i);
mutex_unlock(&a_priv->bulk_transfer_lock);
if (retval < 0)
return retval;
@@ -501,19 +500,19 @@ static int agilent_82357a_read(gpib_board_t *board, uint8_t *buffer, size_t leng
&extra_bytes_read, 100);
bytes_read += extra_bytes_read;
if (extra_bytes_retval) {
- dev_err(&usb_dev->dev, "%s: extra_bytes_retval=%i, bytes_read=%i\n",
- __func__, extra_bytes_retval, bytes_read);
+ dev_err(&usb_dev->dev, "extra_bytes_retval=%i, bytes_read=%i\n",
+ extra_bytes_retval, bytes_read);
agilent_82357a_abort(a_priv, 0);
}
} else if (retval) {
- dev_err(&usb_dev->dev, "%s: agilent_82357a_receive_bulk_msg returned %i, bytes_read=%i\n",
- __func__, retval, bytes_read);
+ dev_err(&usb_dev->dev, "receive_bulk_msg returned %i, bytes_read=%i\n",
+ retval, bytes_read);
agilent_82357a_abort(a_priv, 0);
}
mutex_unlock(&a_priv->bulk_transfer_lock);
if (bytes_read > length + 1) {
bytes_read = length + 1;
- pr_warn("%s: bytes_read > length? truncating", __func__);
+ dev_warn(&usb_dev->dev, "bytes_read > length? truncating");
}
if (bytes_read >= 1) {
@@ -535,12 +534,14 @@ static int agilent_82357a_read(gpib_board_t *board, uint8_t *buffer, size_t leng
return retval;
}
-static ssize_t agilent_82357a_generic_write(gpib_board_t *board, uint8_t *buffer, size_t length,
- int send_commands, int send_eoi, size_t *bytes_written)
+static ssize_t agilent_82357a_generic_write(struct gpib_board *board,
+ uint8_t *buffer, size_t length,
+ int send_commands, int send_eoi,
+ size_t *bytes_written)
{
int retval;
struct agilent_82357a_priv *a_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface);
+ struct usb_device *usb_dev;
u8 *out_data = NULL;
u8 *status_data = NULL;
int out_data_length;
@@ -551,6 +552,10 @@ static ssize_t agilent_82357a_generic_write(gpib_board_t *board, uint8_t *buffer
struct agilent_82357a_register_pairlet read_reg;
*bytes_written = 0;
+ if (!a_priv->bus_interface)
+ return -ENODEV;
+
+ usb_dev = interface_to_usbdev(a_priv->bus_interface);
out_data_length = length + 0x8;
out_data = kmalloc(out_data_length, GFP_KERNEL);
if (!out_data)
@@ -584,8 +589,8 @@ static ssize_t agilent_82357a_generic_write(gpib_board_t *board, uint8_t *buffer
kfree(out_data);
if (retval || raw_bytes_written != i) {
agilent_82357a_abort(a_priv, 0);
- dev_err(&usb_dev->dev, "%s: agilent_82357a_send_bulk_msg returned %i, raw_bytes_written=%i, i=%i\n",
- __func__, retval, raw_bytes_written, i);
+ dev_err(&usb_dev->dev, "send_bulk_msg returned %i, raw_bytes_written=%i, i=%i\n",
+ retval, raw_bytes_written, i);
mutex_unlock(&a_priv->bulk_transfer_lock);
if (retval < 0)
return retval;
@@ -597,7 +602,7 @@ static ssize_t agilent_82357a_generic_write(gpib_board_t *board, uint8_t *buffer
&a_priv->interrupt_flags) ||
test_bit(TIMO_NUM, &board->status));
if (retval) {
- dev_err(&usb_dev->dev, "%s: wait write complete interrupted\n", __func__);
+ dev_dbg(&usb_dev->dev, "wait write complete interrupted\n");
agilent_82357a_abort(a_priv, 0);
mutex_unlock(&a_priv->bulk_transfer_lock);
return -ERESTARTSYS;
@@ -614,8 +619,7 @@ static ssize_t agilent_82357a_generic_write(gpib_board_t *board, uint8_t *buffer
read_reg.address = BSR;
retval = agilent_82357a_read_registers(a_priv, &read_reg, 1, 1);
if (retval) {
- dev_err(&usb_dev->dev, "%s: agilent_82357a_read_registers() returned error\n",
- __func__);
+ dev_err(&usb_dev->dev, "read_registers() returned error\n");
return -ETIMEDOUT;
}
@@ -632,8 +636,7 @@ static ssize_t agilent_82357a_generic_write(gpib_board_t *board, uint8_t *buffer
read_reg.address = ADSR;
retval = agilent_82357a_read_registers(a_priv, &read_reg, 1, 1);
if (retval) {
- dev_err(&usb_dev->dev, "%s: agilent_82357a_read_registers() returned error\n",
- __func__);
+ dev_err(&usb_dev->dev, "read_registers() returned error\n");
return -ETIMEDOUT;
}
adsr = read_reg.value;
@@ -659,8 +662,7 @@ static ssize_t agilent_82357a_generic_write(gpib_board_t *board, uint8_t *buffer
100);
mutex_unlock(&a_priv->bulk_transfer_lock);
if (retval < 0) {
- dev_err(&usb_dev->dev, "%s: agilent_82357a_receive_control_msg() returned %i\n",
- __func__, retval);
+ dev_err(&usb_dev->dev, "receive_control_msg() returned %i\n", retval);
kfree(status_data);
return -EIO;
}
@@ -673,19 +675,19 @@ static ssize_t agilent_82357a_generic_write(gpib_board_t *board, uint8_t *buffer
return 0;
}
-static int agilent_82357a_write(gpib_board_t *board, uint8_t *buffer, size_t length,
- int send_eoi, size_t *bytes_written)
+static int agilent_82357a_write(struct gpib_board *board, uint8_t *buffer,
+ size_t length, int send_eoi, size_t *bytes_written)
{
return agilent_82357a_generic_write(board, buffer, length, 0, send_eoi, bytes_written);
}
-int agilent_82357a_command(gpib_board_t *board, uint8_t *buffer, size_t length,
+int agilent_82357a_command(struct gpib_board *board, uint8_t *buffer, size_t length,
size_t *bytes_written)
{
return agilent_82357a_generic_write(board, buffer, length, 1, 0, bytes_written);
}
-int agilent_82357a_take_control_internal(gpib_board_t *board, int synchronous)
+int agilent_82357a_take_control_internal(struct gpib_board *board, int synchronous)
{
struct agilent_82357a_priv *a_priv = board->private_data;
struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface);
@@ -699,17 +701,20 @@ int agilent_82357a_take_control_internal(gpib_board_t *board, int synchronous)
write.value = AUX_TCA;
retval = agilent_82357a_write_registers(a_priv, &write, 1);
if (retval)
- dev_err(&usb_dev->dev, "%s: agilent_82357a_write_registers() returned error\n",
- __func__);
+ dev_err(&usb_dev->dev, "write_registers() returned error\n");
return retval;
}
-static int agilent_82357a_take_control(gpib_board_t *board, int synchronous)
+static int agilent_82357a_take_control(struct gpib_board *board, int synchronous)
{
+ struct agilent_82357a_priv *a_priv = board->private_data;
const int timeout = 10;
int i;
+ if (!a_priv->bus_interface)
+ return -ENODEV;
+
/* It looks like the 9914 does not handle tcs properly.
* See comment above tms9914_take_control_workaround() in
* drivers/gpib/tms9914/tms9914_aux.c
@@ -730,31 +735,39 @@ static int agilent_82357a_take_control(gpib_board_t *board, int synchronous)
return 0;
}
-static int agilent_82357a_go_to_standby(gpib_board_t *board)
+static int agilent_82357a_go_to_standby(struct gpib_board *board)
{
struct agilent_82357a_priv *a_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface);
+ struct usb_device *usb_dev;
struct agilent_82357a_register_pairlet write;
int retval;
+ if (!a_priv->bus_interface)
+ return -ENODEV;
+
+ usb_dev = interface_to_usbdev(a_priv->bus_interface);
write.address = AUXCR;
write.value = AUX_GTS;
retval = agilent_82357a_write_registers(a_priv, &write, 1);
if (retval)
- dev_err(&usb_dev->dev, "%s: agilent_82357a_write_registers() returned error\n",
- __func__);
+ dev_err(&usb_dev->dev, "write_registers() returned error\n");
return 0;
}
//FIXME should change prototype to return int
-static void agilent_82357a_request_system_control(gpib_board_t *board, int request_control)
+static void agilent_82357a_request_system_control(struct gpib_board *board,
+ int request_control)
{
struct agilent_82357a_priv *a_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface);
+ struct usb_device *usb_dev;
struct agilent_82357a_register_pairlet writes[2];
int retval;
int i = 0;
+ if (!a_priv->bus_interface)
+ return; // -ENODEV;
+
+ usb_dev = interface_to_usbdev(a_priv->bus_interface);
/* 82357B needs bit to be set in 9914 AUXCR register */
writes[i].address = AUXCR;
if (request_control) {
@@ -771,18 +784,21 @@ static void agilent_82357a_request_system_control(gpib_board_t *board, int reque
++i;
retval = agilent_82357a_write_registers(a_priv, writes, i);
if (retval)
- dev_err(&usb_dev->dev, "%s: agilent_82357a_write_registers() returned error\n",
- __func__);
+ dev_err(&usb_dev->dev, "write_registers() returned error\n");
return;// retval;
}
-static void agilent_82357a_interface_clear(gpib_board_t *board, int assert)
+static void agilent_82357a_interface_clear(struct gpib_board *board, int assert)
{
struct agilent_82357a_priv *a_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface);
+ struct usb_device *usb_dev;
struct agilent_82357a_register_pairlet write;
int retval;
+ if (!a_priv->bus_interface)
+ return; // -ENODEV;
+
+ usb_dev = interface_to_usbdev(a_priv->bus_interface);
write.address = AUXCR;
write.value = AUX_SIC;
if (assert) {
@@ -791,56 +807,64 @@ static void agilent_82357a_interface_clear(gpib_board_t *board, int assert)
}
retval = agilent_82357a_write_registers(a_priv, &write, 1);
if (retval)
- dev_err(&usb_dev->dev, "%s: agilent_82357a_write_registers() returned error\n",
- __func__);
+ dev_err(&usb_dev->dev, "write_registers() returned error\n");
}
-static void agilent_82357a_remote_enable(gpib_board_t *board, int enable)
+static void agilent_82357a_remote_enable(struct gpib_board *board, int enable)
{
struct agilent_82357a_priv *a_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface);
+ struct usb_device *usb_dev;
struct agilent_82357a_register_pairlet write;
int retval;
+ if (!a_priv->bus_interface)
+ return; //-ENODEV;
+
+ usb_dev = interface_to_usbdev(a_priv->bus_interface);
write.address = AUXCR;
write.value = AUX_SRE;
if (enable)
write.value |= AUX_CS;
retval = agilent_82357a_write_registers(a_priv, &write, 1);
if (retval)
- dev_err(&usb_dev->dev, "%s: agilent_82357a_write_registers() returned error\n",
- __func__);
+ dev_err(&usb_dev->dev, "write_registers() returned error\n");
a_priv->ren_state = enable;
return;// 0;
}
-static int agilent_82357a_enable_eos(gpib_board_t *board, uint8_t eos_byte, int compare_8_bits)
+static int agilent_82357a_enable_eos(struct gpib_board *board, uint8_t eos_byte,
+ int compare_8_bits)
{
struct agilent_82357a_priv *a_priv = board->private_data;
- if (compare_8_bits == 0) {
- pr_warn("%s: hardware only supports 8-bit EOS compare", __func__);
+ if (!a_priv->bus_interface)
+ return -ENODEV;
+ if (compare_8_bits == 0)
return -EOPNOTSUPP;
- }
+
a_priv->eos_char = eos_byte;
a_priv->eos_mode = REOS | BIN;
return 0;
}
-static void agilent_82357a_disable_eos(gpib_board_t *board)
+static void agilent_82357a_disable_eos(struct gpib_board *board)
{
struct agilent_82357a_priv *a_priv = board->private_data;
a_priv->eos_mode &= ~REOS;
}
-static unsigned int agilent_82357a_update_status(gpib_board_t *board, unsigned int clear_mask)
+static unsigned int agilent_82357a_update_status(struct gpib_board *board,
+ unsigned int clear_mask)
{
struct agilent_82357a_priv *a_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface);
+ struct usb_device *usb_dev;
struct agilent_82357a_register_pairlet address_status, bus_status;
int retval;
+ if (!a_priv->bus_interface)
+ return -ENODEV;
+ usb_dev = interface_to_usbdev(a_priv->bus_interface);
board->status &= ~clear_mask;
if (a_priv->is_cic)
set_bit(CIC_NUM, &board->status);
@@ -850,8 +874,7 @@ static unsigned int agilent_82357a_update_status(gpib_board_t *board, unsigned i
retval = agilent_82357a_read_registers(a_priv, &address_status, 1, 0);
if (retval) {
if (retval != -EAGAIN)
- dev_err(&usb_dev->dev, "%s: agilent_82357a_read_registers() returned error\n",
- __func__);
+ dev_err(&usb_dev->dev, "read_registers() returned error\n");
return board->status;
}
// check for remote/local
@@ -883,8 +906,7 @@ static unsigned int agilent_82357a_update_status(gpib_board_t *board, unsigned i
retval = agilent_82357a_read_registers(a_priv, &bus_status, 1, 0);
if (retval) {
if (retval != -EAGAIN)
- dev_err(&usb_dev->dev, "%s: agilent_82357a_read_registers() returned error\n",
- __func__);
+ dev_err(&usb_dev->dev, "read_registers() returned error\n");
return board->status;
}
if (bus_status.value & BSR_SRQ_BIT)
@@ -895,40 +917,46 @@ static unsigned int agilent_82357a_update_status(gpib_board_t *board, unsigned i
return board->status;
}
-static int agilent_82357a_primary_address(gpib_board_t *board, unsigned int address)
+static int agilent_82357a_primary_address(struct gpib_board *board, unsigned int address)
{
struct agilent_82357a_priv *a_priv = board->private_data;
struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface);
struct agilent_82357a_register_pairlet write;
int retval;
+ if (!a_priv->bus_interface)
+ return -ENODEV;
+ usb_dev = interface_to_usbdev(a_priv->bus_interface);
// put primary address in address0
write.address = ADR;
write.value = address & ADDRESS_MASK;
retval = agilent_82357a_write_registers(a_priv, &write, 1);
if (retval) {
- dev_err(&usb_dev->dev, "%s: agilent_82357a_write_registers() returned error\n",
- __func__);
+ dev_err(&usb_dev->dev, "write_registers() returned error\n");
return retval;
}
return retval;
}
-static int agilent_82357a_secondary_address(gpib_board_t *board, unsigned int address, int enable)
+static int agilent_82357a_secondary_address(struct gpib_board *board,
+ unsigned int address, int enable)
{
if (enable)
- pr_warn("%s: warning: assigning a secondary address not supported\n", __func__);
- return -EOPNOTSUPP;
+ return -EOPNOTSUPP;
+ return 0;
}
-static int agilent_82357a_parallel_poll(gpib_board_t *board, uint8_t *result)
+static int agilent_82357a_parallel_poll(struct gpib_board *board, uint8_t *result)
{
struct agilent_82357a_priv *a_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface);
+ struct usb_device *usb_dev;
struct agilent_82357a_register_pairlet writes[2];
struct agilent_82357a_register_pairlet read;
int retval;
+ if (!a_priv->bus_interface)
+ return -ENODEV;
+ usb_dev = interface_to_usbdev(a_priv->bus_interface);
// execute parallel poll
writes[0].address = AUXCR;
writes[0].value = AUX_CS | AUX_RPP;
@@ -936,16 +964,14 @@ static int agilent_82357a_parallel_poll(gpib_board_t *board, uint8_t *result)
writes[1].value = a_priv->hw_control_bits & ~NOT_PARALLEL_POLL;
retval = agilent_82357a_write_registers(a_priv, writes, 2);
if (retval) {
- dev_err(&usb_dev->dev, "%s: agilent_82357a_write_registers() returned error\n",
- __func__);
+ dev_err(&usb_dev->dev, "write_registers() returned error\n");
return retval;
}
udelay(2); //silly, since usb write will take way longer
read.address = CPTR;
retval = agilent_82357a_read_registers(a_priv, &read, 1, 1);
if (retval) {
- dev_err(&usb_dev->dev, "%s: agilent_82357a_read_registers() returned error\n",
- __func__);
+ dev_err(&usb_dev->dev, "read_registers() returned error\n");
return retval;
}
*result = read.value;
@@ -956,75 +982,76 @@ static int agilent_82357a_parallel_poll(gpib_board_t *board, uint8_t *result)
writes[1].value = AUX_RPP;
retval = agilent_82357a_write_registers(a_priv, writes, 2);
if (retval) {
- dev_err(&usb_dev->dev, "%s: agilent_82357a_write_registers() returned error\n",
- __func__);
+ dev_err(&usb_dev->dev, "write_registers() returned error\n");
return retval;
}
return 0;
}
-static void agilent_82357a_parallel_poll_configure(gpib_board_t *board, uint8_t config)
+static void agilent_82357a_parallel_poll_configure(struct gpib_board *board, uint8_t config)
{
//board can only be system controller
return;// 0;
}
-static void agilent_82357a_parallel_poll_response(gpib_board_t *board, int ist)
+static void agilent_82357a_parallel_poll_response(struct gpib_board *board, int ist)
{
//board can only be system controller
return;// 0;
}
-static void agilent_82357a_serial_poll_response(gpib_board_t *board, uint8_t status)
+static void agilent_82357a_serial_poll_response(struct gpib_board *board, uint8_t status)
{
//board can only be system controller
return;// 0;
}
-static uint8_t agilent_82357a_serial_poll_status(gpib_board_t *board)
+static uint8_t agilent_82357a_serial_poll_status(struct gpib_board *board)
{
//board can only be system controller
return 0;
}
-static void agilent_82357a_return_to_local(gpib_board_t *board)
+static void agilent_82357a_return_to_local(struct gpib_board *board)
{
//board can only be system controller
return;// 0;
}
-static int agilent_82357a_line_status(const gpib_board_t *board)
+static int agilent_82357a_line_status(const struct gpib_board *board)
{
struct agilent_82357a_priv *a_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface);
+ struct usb_device *usb_dev;
struct agilent_82357a_register_pairlet bus_status;
int retval;
- int status = ValidALL;
+ int status = VALID_ALL;
+ if (!a_priv->bus_interface)
+ return -ENODEV;
+ usb_dev = interface_to_usbdev(a_priv->bus_interface);
bus_status.address = BSR;
retval = agilent_82357a_read_registers(a_priv, &bus_status, 1, 0);
if (retval) {
if (retval != -EAGAIN)
- dev_err(&usb_dev->dev, "%s: agilent_82357a_read_registers() returned error\n",
- __func__);
+ dev_err(&usb_dev->dev, "read_registers() returned error\n");
return retval;
}
if (bus_status.value & BSR_REN_BIT)
- status |= BusREN;
+ status |= BUS_REN;
if (bus_status.value & BSR_IFC_BIT)
- status |= BusIFC;
+ status |= BUS_IFC;
if (bus_status.value & BSR_SRQ_BIT)
- status |= BusSRQ;
+ status |= BUS_SRQ;
if (bus_status.value & BSR_EOI_BIT)
- status |= BusEOI;
+ status |= BUS_EOI;
if (bus_status.value & BSR_NRFD_BIT)
- status |= BusNRFD;
+ status |= BUS_NRFD;
if (bus_status.value & BSR_NDAC_BIT)
- status |= BusNDAC;
+ status |= BUS_NDAC;
if (bus_status.value & BSR_DAV_BIT)
- status |= BusDAV;
+ status |= BUS_DAV;
if (bus_status.value & BSR_ATN_BIT)
- status |= BusATN;
+ status |= BUS_ATN;
return status;
}
@@ -1044,25 +1071,27 @@ static unsigned short nanosec_to_fast_talker_bits(unsigned int *nanosec)
return bits;
}
-static unsigned int agilent_82357a_t1_delay(gpib_board_t *board, unsigned int nanosec)
+static int agilent_82357a_t1_delay(struct gpib_board *board, unsigned int nanosec)
{
struct agilent_82357a_priv *a_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface);
+ struct usb_device *usb_dev;
struct agilent_82357a_register_pairlet write;
int retval;
+ if (!a_priv->bus_interface)
+ return -ENODEV;
+ usb_dev = interface_to_usbdev(a_priv->bus_interface);
write.address = FAST_TALKER_T1;
write.value = nanosec_to_fast_talker_bits(&nanosec);
retval = agilent_82357a_write_registers(a_priv, &write, 1);
if (retval)
- dev_err(&usb_dev->dev, "%s: agilent_82357a_write_registers() returned error\n",
- __func__);
+ dev_err(&usb_dev->dev, "write_registers() returned error\n");
return nanosec;
}
static void agilent_82357a_interrupt_complete(struct urb *urb)
{
- gpib_board_t *board = urb->context;
+ struct gpib_board *board = urb->context;
struct agilent_82357a_priv *a_priv = board->private_data;
struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface);
int retval;
@@ -1081,7 +1110,7 @@ static void agilent_82357a_interrupt_complete(struct urb *urb)
default: /* other error, resubmit */
retval = usb_submit_urb(a_priv->interrupt_urb, GFP_ATOMIC);
if (retval)
- dev_err(&usb_dev->dev, "%s: failed to resubmit interrupt urb\n", __func__);
+ dev_err(&usb_dev->dev, "failed to resubmit interrupt urb\n");
return;
}
@@ -1097,10 +1126,10 @@ static void agilent_82357a_interrupt_complete(struct urb *urb)
retval = usb_submit_urb(a_priv->interrupt_urb, GFP_ATOMIC);
if (retval)
- dev_err(&usb_dev->dev, "%s: failed to resubmit interrupt urb\n", __func__);
+ dev_err(&usb_dev->dev, "failed to resubmit interrupt urb\n");
}
-static int agilent_82357a_setup_urbs(gpib_board_t *board)
+static int agilent_82357a_setup_urbs(struct gpib_board *board)
{
struct agilent_82357a_priv *a_priv = board->private_data;
struct usb_device *usb_dev;
@@ -1133,8 +1162,7 @@ static int agilent_82357a_setup_urbs(gpib_board_t *board)
if (retval) {
usb_free_urb(a_priv->interrupt_urb);
a_priv->interrupt_urb = NULL;
- dev_err(&usb_dev->dev, "%s: failed to submit first interrupt urb, retval=%i\n",
- __func__, retval);
+ dev_err(&usb_dev->dev, "failed to submit first interrupt urb, retval=%i\n", retval);
goto setup_exit;
}
mutex_unlock(&a_priv->interrupt_alloc_lock);
@@ -1165,7 +1193,7 @@ static void agilent_82357a_release_urbs(struct agilent_82357a_priv *a_priv)
}
}
-static int agilent_82357a_allocate_private(gpib_board_t *board)
+static int agilent_82357a_allocate_private(struct gpib_board *board)
{
struct agilent_82357a_priv *a_priv;
@@ -1180,112 +1208,82 @@ static int agilent_82357a_allocate_private(gpib_board_t *board)
return 0;
}
-static void agilent_82357a_free_private(gpib_board_t *board)
+static void agilent_82357a_free_private(struct gpib_board *board)
{
kfree(board->private_data);
board->private_data = NULL;
-
}
-static int agilent_82357a_init(gpib_board_t *board)
+#define INIT_NUM_REG_WRITES 18
+static int agilent_82357a_init(struct gpib_board *board)
{
struct agilent_82357a_priv *a_priv = board->private_data;
struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface);
struct agilent_82357a_register_pairlet hw_control;
- struct agilent_82357a_register_pairlet writes[0x20];
+ struct agilent_82357a_register_pairlet writes[INIT_NUM_REG_WRITES];
int retval;
- int i;
unsigned int nanosec;
- i = 0;
- writes[i].address = LED_CONTROL;
- writes[i].value = FAIL_LED_ON;
- ++i;
- writes[i].address = RESET_TO_POWERUP;
- writes[i].value = RESET_SPACEBALL;
- ++i;
- retval = agilent_82357a_write_registers(a_priv, writes, i);
+ writes[0].address = LED_CONTROL;
+ writes[0].value = FAIL_LED_ON;
+ writes[1].address = RESET_TO_POWERUP;
+ writes[1].value = RESET_SPACEBALL;
+ retval = agilent_82357a_write_registers(a_priv, writes, 2);
if (retval) {
- dev_err(&usb_dev->dev, "%s: agilent_82357a_write_registers() returned error\n",
- __func__);
+ dev_err(&usb_dev->dev, "write_registers() returned error\n");
return -EIO;
}
set_current_state(TASK_INTERRUPTIBLE);
if (schedule_timeout(usec_to_jiffies(2000)))
return -ERESTARTSYS;
- i = 0;
- writes[i].address = AUXCR;
- writes[i].value = AUX_NBAF;
- ++i;
- writes[i].address = AUXCR;
- writes[i].value = AUX_HLDE;
- ++i;
- writes[i].address = AUXCR;
- writes[i].value = AUX_TON;
- ++i;
- writes[i].address = AUXCR;
- writes[i].value = AUX_LON;
- ++i;
- writes[i].address = AUXCR;
- writes[i].value = AUX_RSV2;
- ++i;
- writes[i].address = AUXCR;
- writes[i].value = AUX_INVAL;
- ++i;
- writes[i].address = AUXCR;
- writes[i].value = AUX_RPP;
- ++i;
- writes[i].address = AUXCR;
- writes[i].value = AUX_STDL;
- ++i;
- writes[i].address = AUXCR;
- writes[i].value = AUX_VSTDL;
- ++i;
- writes[i].address = FAST_TALKER_T1;
+ writes[0].address = AUXCR;
+ writes[0].value = AUX_NBAF;
+ writes[1].address = AUXCR;
+ writes[1].value = AUX_HLDE;
+ writes[2].address = AUXCR;
+ writes[2].value = AUX_TON;
+ writes[3].address = AUXCR;
+ writes[3].value = AUX_LON;
+ writes[4].address = AUXCR;
+ writes[4].value = AUX_RSV2;
+ writes[5].address = AUXCR;
+ writes[5].value = AUX_INVAL;
+ writes[6].address = AUXCR;
+ writes[6].value = AUX_RPP;
+ writes[7].address = AUXCR;
+ writes[7].value = AUX_STDL;
+ writes[8].address = AUXCR;
+ writes[8].value = AUX_VSTDL;
+ writes[9].address = FAST_TALKER_T1;
nanosec = board->t1_nano_sec;
- writes[i].value = nanosec_to_fast_talker_bits(&nanosec);
+ writes[9].value = nanosec_to_fast_talker_bits(&nanosec);
board->t1_nano_sec = nanosec;
- ++i;
- writes[i].address = ADR;
- writes[i].value = board->pad & ADDRESS_MASK;
- ++i;
- writes[i].address = PPR;
- writes[i].value = 0;
- ++i;
- writes[i].address = SPMR;
- writes[i].value = 0;
- ++i;
- writes[i].address = PROTOCOL_CONTROL;
- writes[i].value = WRITE_COMPLETE_INTERRUPT_EN;
- ++i;
- writes[i].address = IMR0;
- writes[i].value = HR_BOIE | HR_BIIE;
- ++i;
- writes[i].address = IMR1;
- writes[i].value = HR_SRQIE;
- ++i;
+ writes[10].address = ADR;
+ writes[10].value = board->pad & ADDRESS_MASK;
+ writes[11].address = PPR;
+ writes[11].value = 0;
+ writes[12].address = SPMR;
+ writes[12].value = 0;
+ writes[13].address = PROTOCOL_CONTROL;
+ writes[13].value = WRITE_COMPLETE_INTERRUPT_EN;
+ writes[14].address = IMR0;
+ writes[14].value = HR_BOIE | HR_BIIE;
+ writes[15].address = IMR1;
+ writes[15].value = HR_SRQIE;
// turn off reset state
- writes[i].address = AUXCR;
- writes[i].value = AUX_CHIP_RESET;
- ++i;
- writes[i].address = LED_CONTROL;
- writes[i].value = FIRMWARE_LED_CONTROL;
- ++i;
- if (i > ARRAY_SIZE(writes)) {
- dev_err(&usb_dev->dev, "%s: bug! writes[] overflow\n", __func__);
- return -EFAULT;
- }
- retval = agilent_82357a_write_registers(a_priv, writes, i);
+ writes[16].address = AUXCR;
+ writes[16].value = AUX_CHIP_RESET;
+ writes[17].address = LED_CONTROL;
+ writes[17].value = FIRMWARE_LED_CONTROL;
+ retval = agilent_82357a_write_registers(a_priv, writes, INIT_NUM_REG_WRITES);
if (retval) {
- dev_err(&usb_dev->dev, "%s: agilent_82357a_write_registers() returned error\n",
- __func__);
+ dev_err(&usb_dev->dev, "write_registers() returned error\n");
return -EIO;
}
hw_control.address = HW_CONTROL;
retval = agilent_82357a_read_registers(a_priv, &hw_control, 1, 1);
if (retval) {
- dev_err(&usb_dev->dev, "%s: agilent_82357a_read_registers() returned error\n",
- __func__);
+ dev_err(&usb_dev->dev, "read_registers() returned error\n");
return -EIO;
}
a_priv->hw_control_bits = (hw_control.value & ~0x7) | NOT_TI_RESET | NOT_PARALLEL_POLL;
@@ -1307,7 +1305,7 @@ static inline int agilent_82357a_device_match(struct usb_interface *interface,
return 1;
}
-static int agilent_82357a_attach(gpib_board_t *board, const gpib_board_config_t *config)
+static int agilent_82357a_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
int retval;
int i;
@@ -1336,7 +1334,7 @@ static int agilent_82357a_attach(gpib_board_t *board, const gpib_board_config_t
}
if (i == MAX_NUM_82357A_INTERFACES) {
dev_err(board->gpib_dev,
- "No Agilent 82357 gpib adapters found, have you loaded its firmware?\n");
+ "No supported adapters found, have you loaded its firmware?\n");
retval = -ENODEV;
goto attach_fail;
}
@@ -1372,8 +1370,7 @@ static int agilent_82357a_attach(gpib_board_t *board, const gpib_board_config_t
goto attach_fail;
}
- dev_info(&usb_dev->dev,
- "bus %d dev num %d attached to gpib minor %d, agilent usb interface %i\n",
+ dev_info(&usb_dev->dev, "bus %d dev num %d attached to gpib%d, interface %i\n",
usb_dev->bus->busnum, usb_dev->devnum, board->minor, i);
mutex_unlock(&agilent_82357a_hotplug_lock);
return retval;
@@ -1384,49 +1381,36 @@ attach_fail:
return retval;
}
-static int agilent_82357a_go_idle(gpib_board_t *board)
+static int agilent_82357a_go_idle(struct gpib_board *board)
{
struct agilent_82357a_priv *a_priv = board->private_data;
struct usb_device *usb_dev = interface_to_usbdev(a_priv->bus_interface);
struct agilent_82357a_register_pairlet writes[0x20];
int retval;
- int i;
- i = 0;
// turn on tms9914 reset state
- writes[i].address = AUXCR;
- writes[i].value = AUX_CS | AUX_CHIP_RESET;
- ++i;
+ writes[0].address = AUXCR;
+ writes[0].value = AUX_CS | AUX_CHIP_RESET;
a_priv->hw_control_bits &= ~NOT_TI_RESET;
- writes[i].address = HW_CONTROL;
- writes[i].value = a_priv->hw_control_bits;
- ++i;
- writes[i].address = PROTOCOL_CONTROL;
- writes[i].value = 0;
- ++i;
- writes[i].address = IMR0;
- writes[i].value = 0;
- ++i;
- writes[i].address = IMR1;
- writes[i].value = 0;
- ++i;
- writes[i].address = LED_CONTROL;
- writes[i].value = 0;
- ++i;
- if (i > ARRAY_SIZE(writes)) {
- dev_err(&usb_dev->dev, "%s: bug! writes[] overflow\n", __func__);
- return -EFAULT;
- }
- retval = agilent_82357a_write_registers(a_priv, writes, i);
+ writes[1].address = HW_CONTROL;
+ writes[1].value = a_priv->hw_control_bits;
+ writes[2].address = PROTOCOL_CONTROL;
+ writes[2].value = 0;
+ writes[3].address = IMR0;
+ writes[3].value = 0;
+ writes[4].address = IMR1;
+ writes[4].value = 0;
+ writes[5].address = LED_CONTROL;
+ writes[5].value = 0;
+ retval = agilent_82357a_write_registers(a_priv, writes, 6);
if (retval) {
- dev_err(&usb_dev->dev, "%s: agilent_82357a_write_registers() returned error\n",
- __func__);
+ dev_err(&usb_dev->dev, "write_registers() returned error\n");
return -EIO;
}
return 0;
}
-static void agilent_82357a_detach(gpib_board_t *board)
+static void agilent_82357a_detach(struct gpib_board *board)
{
struct agilent_82357a_priv *a_priv;
@@ -1445,7 +1429,6 @@ static void agilent_82357a_detach(gpib_board_t *board)
agilent_82357a_release_urbs(a_priv);
agilent_82357a_free_private(board);
}
- dev_info(board->gpib_dev, "%s: detached\n", __func__);
mutex_unlock(&agilent_82357a_hotplug_lock);
}
@@ -1510,8 +1493,7 @@ static int agilent_82357a_driver_probe(struct usb_interface *interface,
if (i == MAX_NUM_82357A_INTERFACES) {
usb_put_dev(usb_dev);
mutex_unlock(&agilent_82357a_hotplug_lock);
- dev_err(&usb_dev->dev, "%s: out of space in agilent_82357a_driver_interfaces[]\n",
- __func__);
+ dev_err(&usb_dev->dev, "out of space in agilent_82357a_driver_interfaces[]\n");
return -1;
}
path = kmalloc(path_length, GFP_KERNEL);
@@ -1536,7 +1518,7 @@ static void agilent_82357a_driver_disconnect(struct usb_interface *interface)
for (i = 0; i < MAX_NUM_82357A_INTERFACES; ++i) {
if (agilent_82357a_driver_interfaces[i] == interface) {
- gpib_board_t *board = usb_get_intfdata(interface);
+ struct gpib_board *board = usb_get_intfdata(interface);
if (board) {
struct agilent_82357a_priv *a_priv = board->private_data;
@@ -1552,13 +1534,12 @@ static void agilent_82357a_driver_disconnect(struct usb_interface *interface)
mutex_unlock(&a_priv->control_alloc_lock);
}
}
- dev_dbg(&usb_dev->dev, "nulled agilent_82357a_driver_interfaces[%i]\n", i);
agilent_82357a_driver_interfaces[i] = NULL;
break;
}
}
if (i == MAX_NUM_82357A_INTERFACES)
- dev_err(&usb_dev->dev, "unable to find interface in agilent_82357a_driver_interfaces[]? bug?\n");
+ dev_err(&usb_dev->dev, "unable to find interface - bug?\n");
usb_put_dev(usb_dev);
mutex_unlock(&agilent_82357a_hotplug_lock);
@@ -1573,7 +1554,7 @@ static int agilent_82357a_driver_suspend(struct usb_interface *interface, pm_mes
for (i = 0; i < MAX_NUM_82357A_INTERFACES; ++i) {
if (agilent_82357a_driver_interfaces[i] == interface) {
- gpib_board_t *board = usb_get_intfdata(interface);
+ struct gpib_board *board = usb_get_intfdata(interface);
if (board) {
struct agilent_82357a_priv *a_priv = board->private_data;
@@ -1583,18 +1564,18 @@ static int agilent_82357a_driver_suspend(struct usb_interface *interface, pm_mes
agilent_82357a_abort(a_priv, 0);
retval = agilent_82357a_go_idle(board);
if (retval) {
- dev_err(&usb_dev->dev, "%s: failed to go idle, retval=%i\n",
- __func__, retval);
+ dev_err(&usb_dev->dev, "failed to go idle, retval=%i\n",
+ retval);
mutex_unlock(&agilent_82357a_hotplug_lock);
return retval;
}
mutex_lock(&a_priv->interrupt_alloc_lock);
agilent_82357a_cleanup_urbs(a_priv);
mutex_unlock(&a_priv->interrupt_alloc_lock);
- dev_info(&usb_dev->dev,
- "bus %d dev num %d gpib minor %d, agilent usb interface %i suspended\n",
- usb_dev->bus->busnum, usb_dev->devnum,
- board->minor, i);
+ dev_dbg(&usb_dev->dev,
+ "bus %d dev num %d gpib %d, interface %i suspended\n",
+ usb_dev->bus->busnum, usb_dev->devnum,
+ board->minor, i);
}
}
break;
@@ -1609,7 +1590,7 @@ static int agilent_82357a_driver_suspend(struct usb_interface *interface, pm_mes
static int agilent_82357a_driver_resume(struct usb_interface *interface)
{
struct usb_device *usb_dev = interface_to_usbdev(interface);
- gpib_board_t *board;
+ struct gpib_board *board;
int i, retval;
mutex_lock(&agilent_82357a_hotplug_lock);
@@ -1631,8 +1612,8 @@ static int agilent_82357a_driver_resume(struct usb_interface *interface)
mutex_lock(&a_priv->interrupt_alloc_lock);
retval = usb_submit_urb(a_priv->interrupt_urb, GFP_KERNEL);
if (retval) {
- dev_err(&usb_dev->dev, "%s: failed to resubmit interrupt urb, retval=%i\n",
- __func__, retval);
+ dev_err(&usb_dev->dev, "failed to resubmit interrupt urb in resume, retval=%i\n",
+ retval);
mutex_unlock(&a_priv->interrupt_alloc_lock);
mutex_unlock(&agilent_82357a_hotplug_lock);
return retval;
@@ -1655,9 +1636,9 @@ static int agilent_82357a_driver_resume(struct usb_interface *interface)
// assert/unassert REN
agilent_82357a_remote_enable(board, a_priv->ren_state);
- dev_info(&usb_dev->dev,
- "bus %d dev num %d gpib minor %d, agilent usb interface %i resumed\n",
- usb_dev->bus->busnum, usb_dev->devnum, board->minor, i);
+ dev_dbg(&usb_dev->dev,
+ "bus %d dev num %d gpib%d, interface %i resumed\n",
+ usb_dev->bus->busnum, usb_dev->devnum, board->minor, i);
}
resume_exit:
@@ -1667,7 +1648,7 @@ resume_exit:
}
static struct usb_driver agilent_82357a_bus_driver = {
- .name = "agilent_82357a_gpib",
+ .name = DRV_NAME,
.probe = agilent_82357a_driver_probe,
.disconnect = agilent_82357a_driver_disconnect,
.suspend = agilent_82357a_driver_suspend,
@@ -1680,19 +1661,18 @@ static int __init agilent_82357a_init_module(void)
int i;
int ret;
- pr_info("agilent_82357a_gpib driver loading");
for (i = 0; i < MAX_NUM_82357A_INTERFACES; ++i)
agilent_82357a_driver_interfaces[i] = NULL;
ret = usb_register(&agilent_82357a_bus_driver);
if (ret) {
- pr_err("agilent_82357a: usb_register failed: error = %d\n", ret);
+ pr_err("usb_register failed: error = %d\n", ret);
return ret;
}
ret = gpib_register_driver(&agilent_82357a_gpib_interface, THIS_MODULE);
if (ret) {
- pr_err("agilent_82357a: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
usb_deregister(&agilent_82357a_bus_driver);
return ret;
}
@@ -1702,7 +1682,6 @@ static int __init agilent_82357a_init_module(void)
static void __exit agilent_82357a_exit_module(void)
{
- pr_info("agilent_82357a_gpib driver unloading");
gpib_unregister_driver(&agilent_82357a_gpib_interface);
usb_deregister(&agilent_82357a_bus_driver);
}
diff --git a/drivers/staging/gpib/cb7210/Makefile b/drivers/staging/gpib/cb7210/Makefile
index cda0725d6487..d239ae80b415 100644
--- a/drivers/staging/gpib/cb7210/Makefile
+++ b/drivers/staging/gpib/cb7210/Makefile
@@ -1,4 +1,3 @@
-ccflags-$(CONFIG_GPIB_PCMCIA) := -DGPIB_PCMCIA
obj-$(CONFIG_GPIB_CB7210) += cb7210.o
diff --git a/drivers/staging/gpib/cb7210/cb7210.c b/drivers/staging/gpib/cb7210/cb7210.c
index 4d22f647a453..6b22a33a8c4f 100644
--- a/drivers/staging/gpib/cb7210/cb7210.c
+++ b/drivers/staging/gpib/cb7210/cb7210.c
@@ -5,6 +5,10 @@
* copyright : (C) 2001, 2002 by Frank Mori Hess
***************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define dev_fmt pr_fmt
+#define DRV_NAME KBUILD_MODNAME
+
#include "cb7210.h"
#include <linux/ioport.h>
#include <linux/sched.h>
@@ -23,7 +27,10 @@
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GPIB driver Measurement Computing boards using cb7210.2 and cbi488.2");
-static inline int have_fifo_word(const struct cb7210_priv *cb_priv)
+static int cb7210_read(struct gpib_board *board, uint8_t *buffer, size_t length,
+ int *end, size_t *bytes_read);
+
+ static inline int have_fifo_word(const struct cb7210_priv *cb_priv)
{
if (((cb7210_read_byte(cb_priv, HS_STATUS)) &
(HS_RX_MSB_NOT_EMPTY | HS_RX_LSB_NOT_EMPTY)) ==
@@ -33,7 +40,7 @@ static inline int have_fifo_word(const struct cb7210_priv *cb_priv)
return 0;
}
-static inline void input_fifo_enable(gpib_board_t *board, int enable)
+static inline void input_fifo_enable(struct gpib_board *board, int enable)
{
struct cb7210_priv *cb_priv = board->private_data;
struct nec7210_priv *nec_priv = &cb_priv->nec7210_priv;
@@ -69,7 +76,7 @@ static inline void input_fifo_enable(gpib_board_t *board, int enable)
spin_unlock_irqrestore(&board->spinlock, flags);
}
-static int fifo_read(gpib_board_t *board, struct cb7210_priv *cb_priv, uint8_t *buffer,
+static int fifo_read(struct gpib_board *board, struct cb7210_priv *cb_priv, uint8_t *buffer,
size_t length, int *end, size_t *bytes_read)
{
ssize_t retval = 0;
@@ -80,12 +87,12 @@ static int fifo_read(gpib_board_t *board, struct cb7210_priv *cb_priv, uint8_t *
*bytes_read = 0;
if (cb_priv->fifo_iobase == 0) {
- pr_err("cb7210: fifo iobase is zero!\n");
+ dev_err(board->gpib_dev, "fifo iobase is zero!\n");
return -EIO;
}
*end = 0;
if (length <= cb7210_fifo_size) {
- pr_err("cb7210: bug! %s with length < fifo size\n", __func__);
+ dev_err(board->gpib_dev, " bug! fifo read length < fifo size\n");
return -EINVAL;
}
@@ -100,7 +107,6 @@ static int fifo_read(gpib_board_t *board, struct cb7210_priv *cb_priv, uint8_t *
test_bit(RECEIVED_END_BN, &nec_priv->state) ||
test_bit(DEV_CLEAR_BN, &nec_priv->state) ||
test_bit(TIMO_NUM, &board->status))) {
- pr_warn("cb7210: fifo half full wait interrupted\n");
retval = -ERESTARTSYS;
nec7210_set_reg_bits(nec_priv, IMR2, HR_DMAI, 0);
break;
@@ -150,7 +156,6 @@ static int fifo_read(gpib_board_t *board, struct cb7210_priv *cb_priv, uint8_t *
test_bit(RECEIVED_END_BN, &nec_priv->state) ||
test_bit(DEV_CLEAR_BN, &nec_priv->state) ||
test_bit(TIMO_NUM, &board->status))) {
- pr_warn("cb7210: fifo half full wait interrupted\n");
retval = -ERESTARTSYS;
}
if (test_bit(TIMO_NUM, &board->status))
@@ -165,8 +170,8 @@ static int fifo_read(gpib_board_t *board, struct cb7210_priv *cb_priv, uint8_t *
return retval;
}
-int cb7210_accel_read(gpib_board_t *board, uint8_t *buffer,
- size_t length, int *end, size_t *bytes_read)
+static int cb7210_accel_read(struct gpib_board *board, uint8_t *buffer,
+ size_t length, int *end, size_t *bytes_read)
{
ssize_t retval;
struct cb7210_priv *cb_priv = board->private_data;
@@ -185,7 +190,6 @@ int cb7210_accel_read(gpib_board_t *board, uint8_t *buffer,
test_bit(READ_READY_BN, &nec_priv->state) ||
test_bit(DEV_CLEAR_BN, &nec_priv->state) ||
test_bit(TIMO_NUM, &board->status))) {
- pr_warn("cb7210: read ready wait interrupted\n");
return -ERESTARTSYS;
}
if (test_bit(TIMO_NUM, &board->status))
@@ -225,7 +229,7 @@ static int output_fifo_empty(const struct cb7210_priv *cb_priv)
return 0;
}
-static inline void output_fifo_enable(gpib_board_t *board, int enable)
+static inline void output_fifo_enable(struct gpib_board *board, int enable)
{
struct cb7210_priv *cb_priv = board->private_data;
struct nec7210_priv *nec_priv = &cb_priv->nec7210_priv;
@@ -260,7 +264,8 @@ static inline void output_fifo_enable(gpib_board_t *board, int enable)
spin_unlock_irqrestore(&board->spinlock, flags);
}
-static int fifo_write(gpib_board_t *board, uint8_t *buffer, size_t length, size_t *bytes_written)
+static int fifo_write(struct gpib_board *board, uint8_t *buffer, size_t length,
+ size_t *bytes_written)
{
size_t count = 0;
ssize_t retval = 0;
@@ -271,7 +276,7 @@ static int fifo_write(gpib_board_t *board, uint8_t *buffer, size_t length, size_
*bytes_written = 0;
if (cb_priv->fifo_iobase == 0) {
- pr_err("cb7210: fifo iobase is zero!\n");
+ dev_err(board->gpib_dev, "fifo iobase is zero!\n");
return -EINVAL;
}
if (length == 0)
@@ -290,7 +295,6 @@ static int fifo_write(gpib_board_t *board, uint8_t *buffer, size_t length, size_
test_bit(DEV_CLEAR_BN, &nec_priv->state) ||
test_bit(BUS_ERROR_BN, &nec_priv->state) ||
test_bit(TIMO_NUM, &board->status))) {
- pr_warn("cb7210: fifo wait interrupted\n");
retval = -ERESTARTSYS;
break;
}
@@ -306,7 +310,7 @@ static int fifo_write(gpib_board_t *board, uint8_t *buffer, size_t length, size_
if (num_bytes + count > length)
num_bytes = length - count;
if (num_bytes % cb7210_fifo_width) {
- pr_err("cb7210: bug! %s with odd number of bytes\n", __func__);
+ dev_err(board->gpib_dev, " bug! fifo write with odd number of bytes\n");
retval = -EINVAL;
break;
}
@@ -331,7 +335,6 @@ static int fifo_write(gpib_board_t *board, uint8_t *buffer, size_t length, size_
test_bit(DEV_CLEAR_BN, &nec_priv->state) ||
test_bit(BUS_ERROR_BN, &nec_priv->state) ||
test_bit(TIMO_NUM, &board->status))) {
- pr_err("cb7210: wait for last byte interrupted\n");
retval = -ERESTARTSYS;
}
if (test_bit(TIMO_NUM, &board->status))
@@ -347,8 +350,8 @@ static int fifo_write(gpib_board_t *board, uint8_t *buffer, size_t length, size_
return retval;
}
-int cb7210_accel_write(gpib_board_t *board, uint8_t *buffer, size_t length, int send_eoi,
- size_t *bytes_written)
+static int cb7210_accel_write(struct gpib_board *board, uint8_t *buffer,
+ size_t length, int send_eoi, size_t *bytes_written)
{
struct cb7210_priv *cb_priv = board->private_data;
struct nec7210_priv *nec_priv = &cb_priv->nec7210_priv;
@@ -375,39 +378,37 @@ int cb7210_accel_write(gpib_board_t *board, uint8_t *buffer, size_t length, int
return retval;
}
-int cb7210_line_status(const gpib_board_t *board)
+static int cb7210_line_status(const struct gpib_board *board)
{
- int status = ValidALL;
+ int status = VALID_ALL;
int bsr_bits;
struct cb7210_priv *cb_priv;
- struct nec7210_priv *nec_priv;
cb_priv = board->private_data;
- nec_priv = &cb_priv->nec7210_priv;
bsr_bits = cb7210_paged_read_byte(cb_priv, BUS_STATUS, BUS_STATUS_PAGE);
if ((bsr_bits & BSR_REN_BIT) == 0)
- status |= BusREN;
+ status |= BUS_REN;
if ((bsr_bits & BSR_IFC_BIT) == 0)
- status |= BusIFC;
+ status |= BUS_IFC;
if ((bsr_bits & BSR_SRQ_BIT) == 0)
- status |= BusSRQ;
+ status |= BUS_SRQ;
if ((bsr_bits & BSR_EOI_BIT) == 0)
- status |= BusEOI;
+ status |= BUS_EOI;
if ((bsr_bits & BSR_NRFD_BIT) == 0)
- status |= BusNRFD;
+ status |= BUS_NRFD;
if ((bsr_bits & BSR_NDAC_BIT) == 0)
- status |= BusNDAC;
+ status |= BUS_NDAC;
if ((bsr_bits & BSR_DAV_BIT) == 0)
- status |= BusDAV;
+ status |= BUS_DAV;
if ((bsr_bits & BSR_ATN_BIT) == 0)
- status |= BusATN;
+ status |= BUS_ATN;
return status;
}
-unsigned int cb7210_t1_delay(gpib_board_t *board, unsigned int nano_sec)
+static int cb7210_t1_delay(struct gpib_board *board, unsigned int nano_sec)
{
struct cb7210_priv *cb_priv = board->private_data;
struct nec7210_priv *nec_priv = &cb_priv->nec7210_priv;
@@ -424,16 +425,16 @@ unsigned int cb7210_t1_delay(gpib_board_t *board, unsigned int nano_sec)
return retval;
}
-irqreturn_t cb7210_locked_internal_interrupt(gpib_board_t *board);
+static irqreturn_t cb7210_locked_internal_interrupt(struct gpib_board *board);
/*
* GPIB interrupt service routines
*/
-irqreturn_t cb_pci_interrupt(int irq, void *arg)
+static irqreturn_t cb_pci_interrupt(int irq, void *arg)
{
int bits;
- gpib_board_t *board = arg;
+ struct gpib_board *board = arg;
struct cb7210_priv *priv = board->private_data;
// first task check if this is really our interrupt in a shared irq environment
@@ -462,7 +463,7 @@ irqreturn_t cb_pci_interrupt(int irq, void *arg)
return cb7210_locked_internal_interrupt(arg);
}
-irqreturn_t cb7210_internal_interrupt(gpib_board_t *board)
+static irqreturn_t cb7210_internal_interrupt(struct gpib_board *board)
{
int hs_status, status1, status2;
struct cb7210_priv *priv = board->private_data;
@@ -479,7 +480,7 @@ irqreturn_t cb7210_internal_interrupt(gpib_board_t *board)
status2 = read_byte(nec_priv, ISR2);
nec7210_interrupt_have_status(board, nec_priv, status1, status2);
- dev_dbg(board->gpib_dev, "cb7210: status 0x%x, mode 0x%x\n", hs_status, priv->hs_mode_bits);
+ dev_dbg(board->gpib_dev, "status 0x%x, mode 0x%x\n", hs_status, priv->hs_mode_bits);
clear_bits = 0;
@@ -516,7 +517,7 @@ irqreturn_t cb7210_internal_interrupt(gpib_board_t *board)
return IRQ_HANDLED;
}
-irqreturn_t cb7210_locked_internal_interrupt(gpib_board_t *board)
+static irqreturn_t cb7210_locked_internal_interrupt(struct gpib_board *board)
{
unsigned long flags;
irqreturn_t retval;
@@ -527,55 +528,57 @@ irqreturn_t cb7210_locked_internal_interrupt(gpib_board_t *board)
return retval;
}
-irqreturn_t cb7210_interrupt(int irq, void *arg)
+static irqreturn_t cb7210_interrupt(int irq, void *arg)
{
return cb7210_internal_interrupt(arg);
}
-static int cb_pci_attach(gpib_board_t *board, const gpib_board_config_t *config);
-static int cb_isa_attach(gpib_board_t *board, const gpib_board_config_t *config);
+static int cb_pci_attach(struct gpib_board *board, const gpib_board_config_t *config);
+static int cb_isa_attach(struct gpib_board *board, const gpib_board_config_t *config);
-static void cb_pci_detach(gpib_board_t *board);
-static void cb_isa_detach(gpib_board_t *board);
+static void cb_pci_detach(struct gpib_board *board);
+static void cb_isa_detach(struct gpib_board *board);
// wrappers for interface functions
-int cb7210_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end, size_t *bytes_read)
+static int cb7210_read(struct gpib_board *board, uint8_t *buffer, size_t length,
+ int *end, size_t *bytes_read)
{
struct cb7210_priv *priv = board->private_data;
return nec7210_read(board, &priv->nec7210_priv, buffer, length, end, bytes_read);
}
-int cb7210_write(gpib_board_t *board, uint8_t *buffer, size_t length,
- int send_eoi, size_t *bytes_written)
+static int cb7210_write(struct gpib_board *board, uint8_t *buffer, size_t length,
+ int send_eoi, size_t *bytes_written)
{
struct cb7210_priv *priv = board->private_data;
return nec7210_write(board, &priv->nec7210_priv, buffer, length, send_eoi, bytes_written);
}
-int cb7210_command(gpib_board_t *board, uint8_t *buffer, size_t length, size_t *bytes_written)
+static int cb7210_command(struct gpib_board *board, uint8_t *buffer, size_t length,
+ size_t *bytes_written)
{
struct cb7210_priv *priv = board->private_data;
return nec7210_command(board, &priv->nec7210_priv, buffer, length, bytes_written);
}
-int cb7210_take_control(gpib_board_t *board, int synchronous)
+static int cb7210_take_control(struct gpib_board *board, int synchronous)
{
struct cb7210_priv *priv = board->private_data;
return nec7210_take_control(board, &priv->nec7210_priv, synchronous);
}
-int cb7210_go_to_standby(gpib_board_t *board)
+static int cb7210_go_to_standby(struct gpib_board *board)
{
struct cb7210_priv *priv = board->private_data;
return nec7210_go_to_standby(board, &priv->nec7210_priv);
}
-void cb7210_request_system_control(gpib_board_t *board, int request_control)
+static void cb7210_request_system_control(struct gpib_board *board, int request_control)
{
struct cb7210_priv *priv = board->private_data;
struct nec7210_priv *nec_priv = &priv->nec7210_priv;
@@ -589,91 +592,91 @@ void cb7210_request_system_control(gpib_board_t *board, int request_control)
nec7210_request_system_control(board, nec_priv, request_control);
}
-void cb7210_interface_clear(gpib_board_t *board, int assert)
+static void cb7210_interface_clear(struct gpib_board *board, int assert)
{
struct cb7210_priv *priv = board->private_data;
nec7210_interface_clear(board, &priv->nec7210_priv, assert);
}
-void cb7210_remote_enable(gpib_board_t *board, int enable)
+static void cb7210_remote_enable(struct gpib_board *board, int enable)
{
struct cb7210_priv *priv = board->private_data;
nec7210_remote_enable(board, &priv->nec7210_priv, enable);
}
-int cb7210_enable_eos(gpib_board_t *board, uint8_t eos_byte, int compare_8_bits)
+static int cb7210_enable_eos(struct gpib_board *board, uint8_t eos_byte, int compare_8_bits)
{
struct cb7210_priv *priv = board->private_data;
return nec7210_enable_eos(board, &priv->nec7210_priv, eos_byte, compare_8_bits);
}
-void cb7210_disable_eos(gpib_board_t *board)
+static void cb7210_disable_eos(struct gpib_board *board)
{
struct cb7210_priv *priv = board->private_data;
nec7210_disable_eos(board, &priv->nec7210_priv);
}
-unsigned int cb7210_update_status(gpib_board_t *board, unsigned int clear_mask)
+static unsigned int cb7210_update_status(struct gpib_board *board, unsigned int clear_mask)
{
struct cb7210_priv *priv = board->private_data;
return nec7210_update_status(board, &priv->nec7210_priv, clear_mask);
}
-int cb7210_primary_address(gpib_board_t *board, unsigned int address)
+static int cb7210_primary_address(struct gpib_board *board, unsigned int address)
{
struct cb7210_priv *priv = board->private_data;
return nec7210_primary_address(board, &priv->nec7210_priv, address);
}
-int cb7210_secondary_address(gpib_board_t *board, unsigned int address, int enable)
+static int cb7210_secondary_address(struct gpib_board *board, unsigned int address, int enable)
{
struct cb7210_priv *priv = board->private_data;
return nec7210_secondary_address(board, &priv->nec7210_priv, address, enable);
}
-int cb7210_parallel_poll(gpib_board_t *board, uint8_t *result)
+static int cb7210_parallel_poll(struct gpib_board *board, uint8_t *result)
{
struct cb7210_priv *priv = board->private_data;
return nec7210_parallel_poll(board, &priv->nec7210_priv, result);
}
-void cb7210_parallel_poll_configure(gpib_board_t *board, uint8_t configuration)
+static void cb7210_parallel_poll_configure(struct gpib_board *board, uint8_t configuration)
{
struct cb7210_priv *priv = board->private_data;
nec7210_parallel_poll_configure(board, &priv->nec7210_priv, configuration);
}
-void cb7210_parallel_poll_response(gpib_board_t *board, int ist)
+static void cb7210_parallel_poll_response(struct gpib_board *board, int ist)
{
struct cb7210_priv *priv = board->private_data;
nec7210_parallel_poll_response(board, &priv->nec7210_priv, ist);
}
-void cb7210_serial_poll_response(gpib_board_t *board, uint8_t status)
+static void cb7210_serial_poll_response(struct gpib_board *board, uint8_t status)
{
struct cb7210_priv *priv = board->private_data;
nec7210_serial_poll_response(board, &priv->nec7210_priv, status);
}
-uint8_t cb7210_serial_poll_status(gpib_board_t *board)
+static uint8_t cb7210_serial_poll_status(struct gpib_board *board)
{
struct cb7210_priv *priv = board->private_data;
return nec7210_serial_poll_status(board, &priv->nec7210_priv);
}
-void cb7210_return_to_local(gpib_board_t *board)
+static void cb7210_return_to_local(struct gpib_board *board)
{
struct cb7210_priv *priv = board->private_data;
struct nec7210_priv *nec_priv = &priv->nec7210_priv;
@@ -849,27 +852,27 @@ static gpib_interface_t cb_isa_accel_interface = {
.return_to_local = cb7210_return_to_local,
};
-static int cb7210_allocate_private(gpib_board_t *board)
+static int cb7210_allocate_private(struct gpib_board *board)
{
struct cb7210_priv *priv;
board->private_data = kmalloc(sizeof(struct cb7210_priv), GFP_KERNEL);
if (!board->private_data)
- return -1;
+ return -ENOMEM;
priv = board->private_data;
memset(priv, 0, sizeof(struct cb7210_priv));
init_nec7210_private(&priv->nec7210_priv);
return 0;
}
-void cb7210_generic_detach(gpib_board_t *board)
+static void cb7210_generic_detach(struct gpib_board *board)
{
kfree(board->private_data);
board->private_data = NULL;
}
// generic part of attach functions shared by all cb7210 boards
-int cb7210_generic_attach(gpib_board_t *board)
+static int cb7210_generic_attach(struct gpib_board *board)
{
struct cb7210_priv *cb_priv;
struct nec7210_priv *nec_priv;
@@ -887,7 +890,7 @@ int cb7210_generic_attach(gpib_board_t *board)
return 0;
}
-int cb7210_init(struct cb7210_priv *cb_priv, gpib_board_t *board)
+static int cb7210_init(struct cb7210_priv *cb_priv, struct gpib_board *board)
{
struct nec7210_priv *nec_priv = &cb_priv->nec7210_priv;
@@ -917,13 +920,13 @@ int cb7210_init(struct cb7210_priv *cb_priv, gpib_board_t *board)
/* poll so we can detect assertion of ATN */
if (gpib_request_pseudo_irq(board, cb_pci_interrupt)) {
- pr_err("pc2_gpib: failed to allocate pseudo_irq\n");
+ pr_err("failed to allocate pseudo_irq\n");
return -1;
}
return 0;
}
-int cb_pci_attach(gpib_board_t *board, const gpib_board_config_t *config)
+static int cb_pci_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
struct cb7210_priv *cb_priv;
struct nec7210_priv *nec_priv;
@@ -957,17 +960,17 @@ int cb_pci_attach(gpib_board_t *board, const gpib_board_config_t *config)
}
}
if (!cb_priv->pci_device) {
- pr_warn("cb7210: no supported boards found.\n");
- return -1;
+ dev_err(board->gpib_dev, "no supported boards found.\n");
+ return -ENODEV;
}
if (pci_enable_device(cb_priv->pci_device)) {
- pr_err("cb7210: error enabling pci device\n");
- return -1;
+ dev_err(board->gpib_dev, "error enabling pci device\n");
+ return -EIO;
}
- if (pci_request_regions(cb_priv->pci_device, "cb7210"))
- return -1;
+ if (pci_request_regions(cb_priv->pci_device, DRV_NAME))
+ return -EBUSY;
switch (cb_priv->pci_chip) {
case PCI_CHIP_AMCC_S5933:
cb_priv->amcc_iobase = pci_resource_start(cb_priv->pci_device, 0);
@@ -979,13 +982,14 @@ int cb_pci_attach(gpib_board_t *board, const gpib_board_config_t *config)
cb_priv->fifo_iobase = nec_priv->iobase;
break;
default:
- pr_err("cb7210: bug! unhandled pci_chip=%i\n", cb_priv->pci_chip);
+ dev_err(board->gpib_dev, "bug! unhandled pci_chip=%i\n", cb_priv->pci_chip);
return -EIO;
}
isr_flags |= IRQF_SHARED;
- if (request_irq(cb_priv->pci_device->irq, cb_pci_interrupt, isr_flags, "cb7210", board)) {
- pr_err("cb7210: can't request IRQ %d\n", cb_priv->pci_device->irq);
- return -1;
+ if (request_irq(cb_priv->pci_device->irq, cb_pci_interrupt, isr_flags, DRV_NAME, board)) {
+ dev_err(board->gpib_dev, "can't request IRQ %d\n",
+ cb_priv->pci_device->irq);
+ return -EBUSY;
}
cb_priv->irq = cb_priv->pci_device->irq;
@@ -1004,7 +1008,7 @@ int cb_pci_attach(gpib_board_t *board, const gpib_board_config_t *config)
return cb7210_init(cb_priv, board);
}
-void cb_pci_detach(gpib_board_t *board)
+static void cb_pci_detach(struct gpib_board *board)
{
struct cb7210_priv *cb_priv = board->private_data;
struct nec7210_priv *nec_priv;
@@ -1027,7 +1031,7 @@ void cb_pci_detach(gpib_board_t *board)
cb7210_generic_detach(board);
}
-int cb_isa_attach(gpib_board_t *board, const gpib_board_config_t *config)
+static int cb_isa_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
int isr_flags = 0;
struct cb7210_priv *cb_priv;
@@ -1040,20 +1044,22 @@ int cb_isa_attach(gpib_board_t *board, const gpib_board_config_t *config)
return retval;
cb_priv = board->private_data;
nec_priv = &cb_priv->nec7210_priv;
- if (!request_region(config->ibbase, cb7210_iosize, "cb7210")) {
- pr_err("gpib: ioports starting at 0x%x are already in use\n", config->ibbase);
- return -EIO;
+ if (!request_region(config->ibbase, cb7210_iosize, DRV_NAME)) {
+ dev_err(board->gpib_dev, "ioports starting at 0x%x are already in use\n",
+ config->ibbase);
+ return -EBUSY;
}
nec_priv->iobase = config->ibbase;
cb_priv->fifo_iobase = nec7210_iobase(cb_priv);
bits = irq_bits(config->ibirq);
if (bits == 0)
- pr_err("board incapable of using irq %i, try 2-5, 7, 10, or 11\n", config->ibirq);
+ dev_err(board->gpib_dev, "board incapable of using irq %i, try 2-5, 7, 10, or 11\n",
+ config->ibirq);
// install interrupt handler
- if (request_irq(config->ibirq, cb7210_interrupt, isr_flags, "cb7210", board)) {
- pr_err("gpib: can't request IRQ %d\n", config->ibirq);
+ if (request_irq(config->ibirq, cb7210_interrupt, isr_flags, DRV_NAME, board)) {
+ dev_err(board->gpib_dev, "failed to obtain IRQ %d\n", config->ibirq);
return -EBUSY;
}
cb_priv->irq = config->ibirq;
@@ -1061,7 +1067,7 @@ int cb_isa_attach(gpib_board_t *board, const gpib_board_config_t *config)
return cb7210_init(cb_priv, board);
}
-void cb_isa_detach(gpib_board_t *board)
+static void cb_isa_detach(struct gpib_board *board)
{
struct cb7210_priv *cb_priv = board->private_data;
struct nec7210_priv *nec_priv;
@@ -1093,7 +1099,7 @@ static const struct pci_device_id cb7210_pci_table[] = {
MODULE_DEVICE_TABLE(pci, cb7210_pci_table);
static struct pci_driver cb7210_pci_driver = {
- .name = "cb7210",
+ .name = DRV_NAME,
.id_table = cb7210_pci_table,
.probe = &cb7210_pci_probe
};
@@ -1106,7 +1112,7 @@ static struct pci_driver cb7210_pci_driver = {
* pcmcia skeleton example (presumably David Hinds)
***************************************************************************/
-#ifdef GPIB_PCMCIA
+#ifdef CONFIG_GPIB_PCMCIA
#include <linux/kernel.h>
#include <linux/ptrace.h>
@@ -1117,23 +1123,6 @@ static struct pci_driver cb7210_pci_driver = {
#include <pcmcia/ds.h>
/*
- * All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
- * you do not define PCMCIA_DEBUG at all, all the debug code will be
- * left out. If you compile with PCMCIA_DEBUG=0, the debug code will
- * be present but disabled -- but it can then be enabled for specific
- * modules at load time with a 'pc_debug=#' option to insmod.
- */
-
-#define PCMCIA_DEBUG 1
-
-#ifdef PCMCIA_DEBUG
-static int pc_debug = PCMCIA_DEBUG;
-#define DEBUG(n, args...) do {if (pc_debug > (n)) pr_debug(args); } while (0)
-#else
-#define DEBUG(args...)
-#endif
-
-/*
* The event() function is this driver's Card Services event handler.
* It will be called by Card Services when an appropriate card status
* event is received. The config() and release() entry points are
@@ -1144,8 +1133,8 @@ static int pc_debug = PCMCIA_DEBUG;
static int cb_gpib_config(struct pcmcia_device *link);
static void cb_gpib_release(struct pcmcia_device *link);
-static int cb_pcmcia_attach(gpib_board_t *board, const gpib_board_config_t *config);
-static void cb_pcmcia_detach(gpib_board_t *board);
+static int cb_pcmcia_attach(struct gpib_board *board, const gpib_board_config_t *config);
+static void cb_pcmcia_detach(struct gpib_board *board);
/*
* A linked list of "instances" of the gpib device. Each actual
@@ -1178,7 +1167,7 @@ static struct pcmcia_device *curr_dev;
struct local_info {
struct pcmcia_device *p_dev;
- gpib_board_t *dev;
+ struct gpib_board *dev;
};
/*
@@ -1197,8 +1186,6 @@ static int cb_gpib_probe(struct pcmcia_device *link)
// int ret, i;
- DEBUG(0, "%s(0x%p)\n", __func__, link);
-
/* Allocate space for private device-specific data */
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
@@ -1236,9 +1223,7 @@ static int cb_gpib_probe(struct pcmcia_device *link)
static void cb_gpib_remove(struct pcmcia_device *link)
{
struct local_info *info = link->priv;
- //struct gpib_board_t *dev = info->dev;
-
- DEBUG(0, "%s(0x%p)\n", __func__, link);
+ //struct struct gpib_board *dev = info->dev;
if (info->dev)
cb_pcmcia_detach(info->dev);
@@ -1267,7 +1252,6 @@ static int cb_gpib_config(struct pcmcia_device *link)
handle = link;
dev = link->priv;
- DEBUG(0, "%s(0x%p)\n", __func__, link);
retval = pcmcia_loop_config(link, &cb_gpib_config_iteration, NULL);
if (retval) {
@@ -1276,8 +1260,6 @@ static int cb_gpib_config(struct pcmcia_device *link)
return -ENODEV;
}
- DEBUG(0, "gpib_cs: manufacturer: 0x%x card: 0x%x\n", link->manf_id, link->card_id);
-
/*
* This actually configures the PCMCIA socket -- setting up
* the I/O windows and the interrupt mapping.
@@ -1289,7 +1271,6 @@ static int cb_gpib_config(struct pcmcia_device *link)
return -ENODEV;
}
- pr_info("gpib device loaded\n");
return 0;
} /* gpib_config */
@@ -1301,18 +1282,16 @@ static int cb_gpib_config(struct pcmcia_device *link)
static void cb_gpib_release(struct pcmcia_device *link)
{
- DEBUG(0, "%s(0x%p)\n", __func__, link);
pcmcia_disable_device(link);
}
static int cb_gpib_suspend(struct pcmcia_device *link)
{
//struct local_info *info = link->priv;
- //struct gpib_board_t *dev = info->dev;
- DEBUG(0, "%s(0x%p)\n", __func__, link);
+ //struct struct gpib_board *dev = info->dev;
if (link->open)
- pr_warn("Device still open ???\n");
+ dev_warn(&link->dev, "Device still open\n");
//netif_device_detach(dev);
return 0;
@@ -1321,12 +1300,10 @@ static int cb_gpib_suspend(struct pcmcia_device *link)
static int cb_gpib_resume(struct pcmcia_device *link)
{
//struct local_info *info = link->priv;
- //struct gpib_board_t *dev = info->dev;
- DEBUG(0, "%s(0x%p)\n", __func__, link);
+ //struct struct gpib_board *dev = info->dev;
/*if (link->open) {
* ni_gpib_probe(dev); / really?
- * printk("Gpib resumed ???\n");
* //netif_device_attach(dev);
*
*/
@@ -1342,8 +1319,8 @@ static struct pcmcia_device_id cb_pcmcia_ids[] = {
MODULE_DEVICE_TABLE(pcmcia, cb_pcmcia_ids);
static struct pcmcia_driver cb_gpib_cs_driver = {
+ .name = "cb_gpib_cs",
.owner = THIS_MODULE,
- .drv = { .name = "cb_gpib_cs", },
.id_table = cb_pcmcia_ids,
.probe = cb_gpib_probe,
.remove = cb_gpib_remove,
@@ -1351,9 +1328,8 @@ static struct pcmcia_driver cb_gpib_cs_driver = {
.resume = cb_gpib_resume,
};
-void cb_pcmcia_cleanup_module(void)
+static void cb_pcmcia_cleanup_module(void)
{
- DEBUG(0, "cb_gpib_cs: unloading\n");
pcmcia_unregister_driver(&cb_gpib_cs_driver);
}
@@ -1441,15 +1417,15 @@ static gpib_interface_t cb_pcmcia_accel_interface = {
.return_to_local = cb7210_return_to_local,
};
-int cb_pcmcia_attach(gpib_board_t *board, const gpib_board_config_t *config)
+static int cb_pcmcia_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
struct cb7210_priv *cb_priv;
struct nec7210_priv *nec_priv;
int retval;
if (!curr_dev) {
- pr_err("no cb pcmcia cards found\n");
- return -1;
+ dev_err(board->gpib_dev, "no cb pcmcia cards found\n");
+ return -ENODEV;
}
retval = cb7210_generic_attach(board);
@@ -1460,25 +1436,24 @@ int cb_pcmcia_attach(gpib_board_t *board, const gpib_board_config_t *config)
nec_priv = &cb_priv->nec7210_priv;
if (!request_region(curr_dev->resource[0]->start, resource_size(curr_dev->resource[0]),
- "cb7210")) {
- pr_err("gpib: ioports starting at 0x%lx are already in use\n",
- (unsigned long)curr_dev->resource[0]->start);
- return -EIO;
+ DRV_NAME)) {
+ dev_err(board->gpib_dev, "ioports starting at 0x%lx are already in use\n",
+ (unsigned long)curr_dev->resource[0]->start);
+ return -EBUSY;
}
nec_priv->iobase = curr_dev->resource[0]->start;
cb_priv->fifo_iobase = curr_dev->resource[0]->start;
- if (request_irq(curr_dev->irq, cb7210_interrupt, IRQF_SHARED,
- "cb7210", board)) {
- pr_err("cb7210: failed to request IRQ %d\n", curr_dev->irq);
- return -1;
+ if (request_irq(curr_dev->irq, cb7210_interrupt, IRQF_SHARED, DRV_NAME, board)) {
+ dev_err(board->gpib_dev, "failed to request IRQ %d\n", curr_dev->irq);
+ return -EBUSY;
}
cb_priv->irq = curr_dev->irq;
return cb7210_init(cb_priv, board);
}
-void cb_pcmcia_detach(gpib_board_t *board)
+static void cb_pcmcia_detach(struct gpib_board *board)
{
struct cb7210_priv *cb_priv = board->private_data;
struct nec7210_priv *nec_priv;
@@ -1496,7 +1471,7 @@ void cb_pcmcia_detach(gpib_board_t *board)
cb7210_generic_detach(board);
}
-#endif /* GPIB_PCMCIA */
+#endif /* CONFIG_GPIB_PCMCIA */
static int __init cb7210_init_module(void)
{
@@ -1504,75 +1479,75 @@ static int __init cb7210_init_module(void)
ret = pci_register_driver(&cb7210_pci_driver);
if (ret) {
- pr_err("cb7210: pci_register_driver failed: error = %d\n", ret);
+ pr_err("pci_register_driver failed: error = %d\n", ret);
return ret;
}
ret = gpib_register_driver(&cb_pci_interface, THIS_MODULE);
if (ret) {
- pr_err("cb7210: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
goto err_pci;
}
ret = gpib_register_driver(&cb_isa_interface, THIS_MODULE);
if (ret) {
- pr_err("cb7210: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
goto err_isa;
}
ret = gpib_register_driver(&cb_pci_accel_interface, THIS_MODULE);
if (ret) {
- pr_err("cb7210: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
goto err_pci_accel;
}
ret = gpib_register_driver(&cb_pci_unaccel_interface, THIS_MODULE);
if (ret) {
- pr_err("cb7210: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
goto err_pci_unaccel;
}
ret = gpib_register_driver(&cb_isa_accel_interface, THIS_MODULE);
if (ret) {
- pr_err("cb7210: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
goto err_isa_accel;
}
ret = gpib_register_driver(&cb_isa_unaccel_interface, THIS_MODULE);
if (ret) {
- pr_err("cb7210: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
goto err_isa_unaccel;
}
-#ifdef GPIB_PCMCIA
+#ifdef CONFIG_GPIB_PCMCIA
ret = gpib_register_driver(&cb_pcmcia_interface, THIS_MODULE);
if (ret) {
- pr_err("cb7210: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
goto err_pcmcia;
}
ret = gpib_register_driver(&cb_pcmcia_accel_interface, THIS_MODULE);
if (ret) {
- pr_err("cb7210: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
goto err_pcmcia_accel;
}
ret = gpib_register_driver(&cb_pcmcia_unaccel_interface, THIS_MODULE);
if (ret) {
- pr_err("cb7210: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
goto err_pcmcia_unaccel;
}
ret = pcmcia_register_driver(&cb_gpib_cs_driver);
if (ret) {
- pr_err("cb7210: pcmcia_register_driver failed: error = %d\n", ret);
+ pr_err("pcmcia_register_driver failed: error = %d\n", ret);
goto err_pcmcia_driver;
}
#endif
return 0;
-#ifdef GPIB_PCMCIA
+#ifdef CONFIG_GPIB_PCMCIA
err_pcmcia_driver:
gpib_unregister_driver(&cb_pcmcia_unaccel_interface);
err_pcmcia_unaccel:
@@ -1606,7 +1581,7 @@ static void __exit cb7210_exit_module(void)
gpib_unregister_driver(&cb_pci_unaccel_interface);
gpib_unregister_driver(&cb_isa_accel_interface);
gpib_unregister_driver(&cb_isa_unaccel_interface);
-#ifdef GPIB_PCMCIA
+#ifdef CONFIG_GPIB_PCMCIA
gpib_unregister_driver(&cb_pcmcia_interface);
gpib_unregister_driver(&cb_pcmcia_accel_interface);
gpib_unregister_driver(&cb_pcmcia_unaccel_interface);
diff --git a/drivers/staging/gpib/cb7210/cb7210.h b/drivers/staging/gpib/cb7210/cb7210.h
index d56cd905cc8c..2108fe7a8ce5 100644
--- a/drivers/staging/gpib/cb7210/cb7210.h
+++ b/drivers/staging/gpib/cb7210/cb7210.h
@@ -36,51 +36,6 @@ struct cb7210_priv {
unsigned in_fifo_half_full : 1;
};
-// interrupt service routines
-irqreturn_t cb_pci_interrupt(int irq, void *arg);
-irqreturn_t cb7210_interrupt(int irq, void *arg);
-irqreturn_t cb7210_internal_interrupt(gpib_board_t *board);
-
-// interface functions
-int cb7210_read(gpib_board_t *board, uint8_t *buffer, size_t length,
- int *end, size_t *bytes_read);
-int cb7210_accel_read(gpib_board_t *board, uint8_t *buffer, size_t length,
- int *end, size_t *bytes_read);
-int cb7210_write(gpib_board_t *board, uint8_t *buffer, size_t length,
- int send_eoi, size_t *bytes_written);
-int cb7210_accel_write(gpib_board_t *board, uint8_t *buffer, size_t length,
- int send_eoi, size_t *bytes_written);
-int cb7210_command(gpib_board_t *board, uint8_t *buffer, size_t length, size_t *bytes_written);
-int cb7210_take_control(gpib_board_t *board, int synchronous);
-int cb7210_go_to_standby(gpib_board_t *board);
-void cb7210_request_system_control(gpib_board_t *board, int request_control);
-void cb7210_interface_clear(gpib_board_t *board, int assert);
-void cb7210_remote_enable(gpib_board_t *board, int enable);
-int cb7210_enable_eos(gpib_board_t *board, uint8_t eos_byte,
- int compare_8_bits);
-void cb7210_disable_eos(gpib_board_t *board);
-unsigned int cb7210_update_status(gpib_board_t *board, unsigned int clear_mask);
-int cb7210_primary_address(gpib_board_t *board, unsigned int address);
-int cb7210_secondary_address(gpib_board_t *board, unsigned int address,
- int enable);
-int cb7210_parallel_poll(gpib_board_t *board, uint8_t *result);
-void cb7210_serial_poll_response(gpib_board_t *board, uint8_t status);
-uint8_t cb7210_serial_poll_status(gpib_board_t *board);
-void cb7210_parallel_poll_configure(gpib_board_t *board, uint8_t configuration);
-void cb7210_parallel_poll_response(gpib_board_t *board, int ist);
-int cb7210_line_status(const gpib_board_t *board);
-unsigned int cb7210_t1_delay(gpib_board_t *board, unsigned int nano_sec);
-void cb7210_return_to_local(gpib_board_t *board);
-
-// utility functions
-void cb7210_generic_detach(gpib_board_t *board);
-int cb7210_generic_attach(gpib_board_t *board);
-int cb7210_init(struct cb7210_priv *priv, gpib_board_t *board);
-
-// pcmcia init/cleanup
-int cb_pcmcia_init_module(void);
-void cb_pcmcia_cleanup_module(void);
-
// pci-gpib register offset
static const int cb7210_reg_offset = 1;
diff --git a/drivers/staging/gpib/cec/cec.h b/drivers/staging/gpib/cec/cec.h
index 040ca70ed708..3ce2869c7429 100644
--- a/drivers/staging/gpib/cec/cec.h
+++ b/drivers/staging/gpib/cec/cec.h
@@ -16,34 +16,5 @@ struct cec_priv {
unsigned int irq;
};
-// interface functions
-int cec_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end, size_t *bytes_read);
-int cec_write(gpib_board_t *board, uint8_t *buffer, size_t length, int send_eoi,
- size_t *bytes_written);
-int cec_command(gpib_board_t *board, uint8_t *buffer, size_t length, size_t *bytes_written);
-int cec_take_control(gpib_board_t *board, int synchronous);
-int cec_go_to_standby(gpib_board_t *board);
-void cec_request_system_control(gpib_board_t *board, int request_control);
-void cec_interface_clear(gpib_board_t *board, int assert);
-void cec_remote_enable(gpib_board_t *board, int enable);
-int cec_enable_eos(gpib_board_t *board, uint8_t eos_byte, int compare_8_bits);
-void cec_disable_eos(gpib_board_t *board);
-unsigned int cec_update_status(gpib_board_t *board, unsigned int clear_mask);
-int cec_primary_address(gpib_board_t *board, unsigned int address);
-int cec_secondary_address(gpib_board_t *board, unsigned int address, int enable);
-int cec_parallel_poll(gpib_board_t *board, uint8_t *result);
-void cec_parallel_poll_configure(gpib_board_t *board, uint8_t configuration);
-void cec_parallel_poll_response(gpib_board_t *board, int ist);
-void cec_serial_poll_response(gpib_board_t *board, uint8_t status);
-void cec_return_to_local(gpib_board_t *board);
-
-// interrupt service routines
-irqreturn_t cec_interrupt(int irq, void *arg);
-
-// utility functions
-void cec_free_private(gpib_board_t *board);
-int cec_generic_attach(gpib_board_t *board);
-void cec_init(struct cec_priv *priv, const gpib_board_t *board);
-
// offset between consecutive nec7210 registers
static const int cec_reg_offset = 1;
diff --git a/drivers/staging/gpib/cec/cec_gpib.c b/drivers/staging/gpib/cec/cec_gpib.c
index d056cd1d6b3e..a822fa428cd0 100644
--- a/drivers/staging/gpib/cec/cec_gpib.c
+++ b/drivers/staging/gpib/cec/cec_gpib.c
@@ -4,6 +4,10 @@
* copyright : (C) 2002 by Frank Mori Hess
***************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define dev_fmt pr_fmt
+#define DRV_NAME KBUILD_MODNAME
+
#include "cec.h"
#include <linux/pci.h>
#include <linux/io.h>
@@ -19,9 +23,9 @@ MODULE_DESCRIPTION("GPIB driver for CEC PCI and PCMCIA boards");
* GPIB interrupt service routines
*/
-irqreturn_t cec_interrupt(int irq, void *arg)
+static irqreturn_t cec_interrupt(int irq, void *arg)
{
- gpib_board_t *board = arg;
+ struct gpib_board *board = arg;
struct cec_priv *priv = board->private_data;
unsigned long flags;
irqreturn_t retval;
@@ -36,146 +40,148 @@ irqreturn_t cec_interrupt(int irq, void *arg)
#define CEC_DEV_ID 0x5cec
#define CEC_SUBID 0x9050
-static int cec_pci_attach(gpib_board_t *board, const gpib_board_config_t *config);
+static int cec_pci_attach(struct gpib_board *board, const gpib_board_config_t *config);
-static void cec_pci_detach(gpib_board_t *board);
+static void cec_pci_detach(struct gpib_board *board);
// wrappers for interface functions
-int cec_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end, size_t *bytes_read)
+static int cec_read(struct gpib_board *board, uint8_t *buffer, size_t length, int *end,
+ size_t *bytes_read)
{
struct cec_priv *priv = board->private_data;
return nec7210_read(board, &priv->nec7210_priv, buffer, length, end, bytes_read);
}
-int cec_write(gpib_board_t *board, uint8_t *buffer, size_t length, int send_eoi,
- size_t *bytes_written)
+static int cec_write(struct gpib_board *board, uint8_t *buffer, size_t length, int send_eoi,
+ size_t *bytes_written)
{
struct cec_priv *priv = board->private_data;
return nec7210_write(board, &priv->nec7210_priv, buffer, length, send_eoi, bytes_written);
}
-int cec_command(gpib_board_t *board, uint8_t *buffer, size_t length, size_t *bytes_written)
+static int cec_command(struct gpib_board *board, uint8_t *buffer,
+ size_t length, size_t *bytes_written)
{
struct cec_priv *priv = board->private_data;
return nec7210_command(board, &priv->nec7210_priv, buffer, length, bytes_written);
}
-int cec_take_control(gpib_board_t *board, int synchronous)
+static int cec_take_control(struct gpib_board *board, int synchronous)
{
struct cec_priv *priv = board->private_data;
return nec7210_take_control(board, &priv->nec7210_priv, synchronous);
}
-int cec_go_to_standby(gpib_board_t *board)
+static int cec_go_to_standby(struct gpib_board *board)
{
struct cec_priv *priv = board->private_data;
return nec7210_go_to_standby(board, &priv->nec7210_priv);
}
-void cec_request_system_control(gpib_board_t *board, int request_control)
+static void cec_request_system_control(struct gpib_board *board, int request_control)
{
struct cec_priv *priv = board->private_data;
nec7210_request_system_control(board, &priv->nec7210_priv, request_control);
}
-void cec_interface_clear(gpib_board_t *board, int assert)
+static void cec_interface_clear(struct gpib_board *board, int assert)
{
struct cec_priv *priv = board->private_data;
nec7210_interface_clear(board, &priv->nec7210_priv, assert);
}
-void cec_remote_enable(gpib_board_t *board, int enable)
+static void cec_remote_enable(struct gpib_board *board, int enable)
{
struct cec_priv *priv = board->private_data;
nec7210_remote_enable(board, &priv->nec7210_priv, enable);
}
-int cec_enable_eos(gpib_board_t *board, uint8_t eos_byte, int compare_8_bits)
+static int cec_enable_eos(struct gpib_board *board, uint8_t eos_byte, int compare_8_bits)
{
struct cec_priv *priv = board->private_data;
return nec7210_enable_eos(board, &priv->nec7210_priv, eos_byte, compare_8_bits);
}
-void cec_disable_eos(gpib_board_t *board)
+static void cec_disable_eos(struct gpib_board *board)
{
struct cec_priv *priv = board->private_data;
nec7210_disable_eos(board, &priv->nec7210_priv);
}
-unsigned int cec_update_status(gpib_board_t *board, unsigned int clear_mask)
+static unsigned int cec_update_status(struct gpib_board *board, unsigned int clear_mask)
{
struct cec_priv *priv = board->private_data;
return nec7210_update_status(board, &priv->nec7210_priv, clear_mask);
}
-int cec_primary_address(gpib_board_t *board, unsigned int address)
+static int cec_primary_address(struct gpib_board *board, unsigned int address)
{
struct cec_priv *priv = board->private_data;
return nec7210_primary_address(board, &priv->nec7210_priv, address);
}
-int cec_secondary_address(gpib_board_t *board, unsigned int address, int enable)
+static int cec_secondary_address(struct gpib_board *board, unsigned int address, int enable)
{
struct cec_priv *priv = board->private_data;
return nec7210_secondary_address(board, &priv->nec7210_priv, address, enable);
}
-int cec_parallel_poll(gpib_board_t *board, uint8_t *result)
+static int cec_parallel_poll(struct gpib_board *board, uint8_t *result)
{
struct cec_priv *priv = board->private_data;
return nec7210_parallel_poll(board, &priv->nec7210_priv, result);
}
-void cec_parallel_poll_configure(gpib_board_t *board, uint8_t config)
+static void cec_parallel_poll_configure(struct gpib_board *board, uint8_t config)
{
struct cec_priv *priv = board->private_data;
nec7210_parallel_poll_configure(board, &priv->nec7210_priv, config);
}
-void cec_parallel_poll_response(gpib_board_t *board, int ist)
+static void cec_parallel_poll_response(struct gpib_board *board, int ist)
{
struct cec_priv *priv = board->private_data;
nec7210_parallel_poll_response(board, &priv->nec7210_priv, ist);
}
-void cec_serial_poll_response(gpib_board_t *board, uint8_t status)
+static void cec_serial_poll_response(struct gpib_board *board, uint8_t status)
{
struct cec_priv *priv = board->private_data;
nec7210_serial_poll_response(board, &priv->nec7210_priv, status);
}
-static uint8_t cec_serial_poll_status(gpib_board_t *board)
+static uint8_t cec_serial_poll_status(struct gpib_board *board)
{
struct cec_priv *priv = board->private_data;
return nec7210_serial_poll_status(board, &priv->nec7210_priv);
}
-static unsigned int cec_t1_delay(gpib_board_t *board, unsigned int nano_sec)
+static int cec_t1_delay(struct gpib_board *board, unsigned int nano_sec)
{
struct cec_priv *priv = board->private_data;
return nec7210_t1_delay(board, &priv->nec7210_priv, nano_sec);
}
-void cec_return_to_local(gpib_board_t *board)
+static void cec_return_to_local(struct gpib_board *board)
{
struct cec_priv *priv = board->private_data;
@@ -210,7 +216,7 @@ static gpib_interface_t cec_pci_interface = {
.return_to_local = cec_return_to_local,
};
-static int cec_allocate_private(gpib_board_t *board)
+static int cec_allocate_private(struct gpib_board *board)
{
struct cec_priv *priv;
@@ -223,13 +229,13 @@ static int cec_allocate_private(gpib_board_t *board)
return 0;
}
-void cec_free_private(gpib_board_t *board)
+static void cec_free_private(struct gpib_board *board)
{
kfree(board->private_data);
board->private_data = NULL;
}
-int cec_generic_attach(gpib_board_t *board)
+static int cec_generic_attach(struct gpib_board *board)
{
struct cec_priv *cec_priv;
struct nec7210_priv *nec_priv;
@@ -247,7 +253,7 @@ int cec_generic_attach(gpib_board_t *board)
return 0;
}
-void cec_init(struct cec_priv *cec_priv, const gpib_board_t *board)
+static void cec_init(struct cec_priv *cec_priv, const struct gpib_board *board)
{
struct nec7210_priv *nec_priv = &cec_priv->nec7210_priv;
@@ -259,7 +265,7 @@ void cec_init(struct cec_priv *cec_priv, const gpib_board_t *board)
nec7210_board_online(nec_priv, board);
}
-int cec_pci_attach(gpib_board_t *board, const gpib_board_config_t *config)
+static int cec_pci_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
struct cec_priv *cec_priv;
struct nec7210_priv *nec_priv;
@@ -283,31 +289,29 @@ int cec_pci_attach(gpib_board_t *board, const gpib_board_config_t *config)
break;
}
if (!cec_priv->pci_device) {
- pr_err("gpib: no cec PCI board found\n");
- return -1;
+ dev_err(board->gpib_dev, "no cec PCI board found\n");
+ return -ENODEV;
}
if (pci_enable_device(cec_priv->pci_device)) {
- pr_err("error enabling pci device\n");
- return -1;
+ dev_err(board->gpib_dev, "error enabling pci device\n");
+ return -EIO;
}
if (pci_request_regions(cec_priv->pci_device, "cec-gpib"))
- return -1;
+ return -EBUSY;
cec_priv->plx_iobase = pci_resource_start(cec_priv->pci_device, 1);
- pr_info(" plx9050 base address 0x%lx\n", cec_priv->plx_iobase);
- nec_priv->iobase = pci_resource_start(cec_priv->pci_device, 3);
- pr_info(" nec7210 base address 0x%x\n", nec_priv->iobase);
+ nec_priv->iobase = pci_resource_start(cec_priv->pci_device, 3);
isr_flags |= IRQF_SHARED;
- if (request_irq(cec_priv->pci_device->irq, cec_interrupt, isr_flags, "pci-gpib", board)) {
- pr_err("gpib: can't request IRQ %d\n", cec_priv->pci_device->irq);
- return -1;
+ if (request_irq(cec_priv->pci_device->irq, cec_interrupt, isr_flags, DRV_NAME, board)) {
+ dev_err(board->gpib_dev, "failed to obtain IRQ %d\n", cec_priv->pci_device->irq);
+ return -EBUSY;
}
cec_priv->irq = cec_priv->pci_device->irq;
if (gpib_request_pseudo_irq(board, cec_interrupt)) {
- pr_err("cec: failed to allocate pseudo irq\n");
+ dev_err(board->gpib_dev, "failed to allocate pseudo irq\n");
return -1;
}
cec_init(cec_priv, board);
@@ -319,7 +323,7 @@ int cec_pci_attach(gpib_board_t *board, const gpib_board_config_t *config)
return 0;
}
-void cec_pci_detach(gpib_board_t *board)
+static void cec_pci_detach(struct gpib_board *board)
{
struct cec_priv *cec_priv = board->private_data;
struct nec7210_priv *nec_priv;
@@ -354,7 +358,7 @@ static const struct pci_device_id cec_pci_table[] = {
MODULE_DEVICE_TABLE(pci, cec_pci_table);
static struct pci_driver cec_pci_driver = {
- .name = "cec_gpib",
+ .name = DRV_NAME,
.id_table = cec_pci_table,
.probe = &cec_pci_probe
};
@@ -365,13 +369,13 @@ static int __init cec_init_module(void)
result = pci_register_driver(&cec_pci_driver);
if (result) {
- pr_err("cec_gpib: pci_register_driver failed: error = %d\n", result);
+ pr_err("pci_register_driver failed: error = %d\n", result);
return result;
}
result = gpib_register_driver(&cec_pci_interface, THIS_MODULE);
if (result) {
- pr_err("cec_gpib: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
return result;
}
diff --git a/drivers/staging/gpib/common/gpib_os.c b/drivers/staging/gpib/common/gpib_os.c
index 4901e660242e..cb77fe0a4b9a 100644
--- a/drivers/staging/gpib/common/gpib_os.c
+++ b/drivers/staging/gpib/common/gpib_os.c
@@ -5,6 +5,9 @@
***************************************************************************
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define dev_fmt pr_fmt
+
#include "ibsys.h"
#include <linux/module.h>
#include <linux/wait.h>
@@ -23,53 +26,53 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GPIB base support");
MODULE_ALIAS_CHARDEV_MAJOR(GPIB_CODE);
-static int board_type_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board, unsigned long arg);
-static int read_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board,
+static int board_type_ioctl(gpib_file_private_t *file_priv, struct gpib_board *board, unsigned long arg);
+static int read_ioctl(gpib_file_private_t *file_priv, struct gpib_board *board,
unsigned long arg);
-static int write_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board,
+static int write_ioctl(gpib_file_private_t *file_priv, struct gpib_board *board,
unsigned long arg);
-static int command_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board,
+static int command_ioctl(gpib_file_private_t *file_priv, struct gpib_board *board,
unsigned long arg);
-static int open_dev_ioctl(struct file *filep, gpib_board_t *board, unsigned long arg);
-static int close_dev_ioctl(struct file *filep, gpib_board_t *board, unsigned long arg);
-static int serial_poll_ioctl(gpib_board_t *board, unsigned long arg);
-static int wait_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board, unsigned long arg);
-static int parallel_poll_ioctl(gpib_board_t *board, unsigned long arg);
-static int online_ioctl(gpib_board_t *board, unsigned long arg);
-static int remote_enable_ioctl(gpib_board_t *board, unsigned long arg);
-static int take_control_ioctl(gpib_board_t *board, unsigned long arg);
-static int line_status_ioctl(gpib_board_t *board, unsigned long arg);
-static int pad_ioctl(gpib_board_t *board, gpib_file_private_t *file_priv,
+static int open_dev_ioctl(struct file *filep, struct gpib_board *board, unsigned long arg);
+static int close_dev_ioctl(struct file *filep, struct gpib_board *board, unsigned long arg);
+static int serial_poll_ioctl(struct gpib_board *board, unsigned long arg);
+static int wait_ioctl(gpib_file_private_t *file_priv, struct gpib_board *board, unsigned long arg);
+static int parallel_poll_ioctl(struct gpib_board *board, unsigned long arg);
+static int online_ioctl(struct gpib_board *board, unsigned long arg);
+static int remote_enable_ioctl(struct gpib_board *board, unsigned long arg);
+static int take_control_ioctl(struct gpib_board *board, unsigned long arg);
+static int line_status_ioctl(struct gpib_board *board, unsigned long arg);
+static int pad_ioctl(struct gpib_board *board, gpib_file_private_t *file_priv,
unsigned long arg);
-static int sad_ioctl(gpib_board_t *board, gpib_file_private_t *file_priv,
+static int sad_ioctl(struct gpib_board *board, gpib_file_private_t *file_priv,
unsigned long arg);
-static int eos_ioctl(gpib_board_t *board, unsigned long arg);
-static int request_service_ioctl(gpib_board_t *board, unsigned long arg);
-static int request_service2_ioctl(gpib_board_t *board, unsigned long arg);
+static int eos_ioctl(struct gpib_board *board, unsigned long arg);
+static int request_service_ioctl(struct gpib_board *board, unsigned long arg);
+static int request_service2_ioctl(struct gpib_board *board, unsigned long arg);
static int iobase_ioctl(gpib_board_config_t *config, unsigned long arg);
static int irq_ioctl(gpib_board_config_t *config, unsigned long arg);
static int dma_ioctl(gpib_board_config_t *config, unsigned long arg);
-static int autospoll_ioctl(gpib_board_t *board, gpib_file_private_t *file_priv,
+static int autospoll_ioctl(struct gpib_board *board, gpib_file_private_t *file_priv,
unsigned long arg);
-static int mutex_ioctl(gpib_board_t *board, gpib_file_private_t *file_priv,
+static int mutex_ioctl(struct gpib_board *board, gpib_file_private_t *file_priv,
unsigned long arg);
-static int timeout_ioctl(gpib_board_t *board, unsigned long arg);
-static int status_bytes_ioctl(gpib_board_t *board, unsigned long arg);
-static int board_info_ioctl(const gpib_board_t *board, unsigned long arg);
-static int ppc_ioctl(gpib_board_t *board, unsigned long arg);
-static int set_local_ppoll_mode_ioctl(gpib_board_t *board, unsigned long arg);
-static int get_local_ppoll_mode_ioctl(gpib_board_t *board, unsigned long arg);
-static int query_board_rsv_ioctl(gpib_board_t *board, unsigned long arg);
-static int interface_clear_ioctl(gpib_board_t *board, unsigned long arg);
+static int timeout_ioctl(struct gpib_board *board, unsigned long arg);
+static int status_bytes_ioctl(struct gpib_board *board, unsigned long arg);
+static int board_info_ioctl(const struct gpib_board *board, unsigned long arg);
+static int ppc_ioctl(struct gpib_board *board, unsigned long arg);
+static int set_local_ppoll_mode_ioctl(struct gpib_board *board, unsigned long arg);
+static int get_local_ppoll_mode_ioctl(struct gpib_board *board, unsigned long arg);
+static int query_board_rsv_ioctl(struct gpib_board *board, unsigned long arg);
+static int interface_clear_ioctl(struct gpib_board *board, unsigned long arg);
static int select_pci_ioctl(gpib_board_config_t *config, unsigned long arg);
static int select_device_path_ioctl(gpib_board_config_t *config, unsigned long arg);
-static int event_ioctl(gpib_board_t *board, unsigned long arg);
-static int request_system_control_ioctl(gpib_board_t *board, unsigned long arg);
-static int t1_delay_ioctl(gpib_board_t *board, unsigned long arg);
+static int event_ioctl(struct gpib_board *board, unsigned long arg);
+static int request_system_control_ioctl(struct gpib_board *board, unsigned long arg);
+static int t1_delay_ioctl(struct gpib_board *board, unsigned long arg);
-static int cleanup_open_devices(gpib_file_private_t *file_priv, gpib_board_t *board);
+static int cleanup_open_devices(gpib_file_private_t *file_priv, struct gpib_board *board);
-static int pop_gpib_event_nolock(gpib_board_t *board, gpib_event_queue_t *queue, short *event_type);
+static int pop_gpib_event_nolock(struct gpib_board *board, gpib_event_queue_t *queue, short *event_type);
/*
* Timer functions
@@ -79,18 +82,18 @@ static int pop_gpib_event_nolock(gpib_board_t *board, gpib_event_queue_t *queue,
static void watchdog_timeout(struct timer_list *t)
{
- gpib_board_t *board = from_timer(board, t, timer);
+ struct gpib_board *board = from_timer(board, t, timer);
set_bit(TIMO_NUM, &board->status);
wake_up_interruptible(&board->wait);
}
/* install timer interrupt handler */
-void os_start_timer(gpib_board_t *board, unsigned int usec_timeout)
+void os_start_timer(struct gpib_board *board, unsigned int usec_timeout)
/* Starts the timeout task */
{
if (timer_pending(&board->timer)) {
- pr_err("gpib: bug! timer already running?\n");
+ dev_err(board->gpib_dev, "bug! timer already running?\n");
return;
}
clear_bit(TIMO_NUM, &board->status);
@@ -102,14 +105,14 @@ void os_start_timer(gpib_board_t *board, unsigned int usec_timeout)
}
}
-void os_remove_timer(gpib_board_t *board)
+void os_remove_timer(struct gpib_board *board)
/* Removes the timeout task */
{
if (timer_pending(&board->timer))
del_timer_sync(&board->timer);
}
-int io_timed_out(gpib_board_t *board)
+int io_timed_out(struct gpib_board *board)
{
if (test_bit(TIMO_NUM, &board->status))
return 1;
@@ -137,10 +140,10 @@ static void pseudo_irq_handler(struct timer_list *t)
mod_timer(&pseudo_irq->timer, jiffies + pseudo_irq_period());
}
-int gpib_request_pseudo_irq(gpib_board_t *board, irqreturn_t (*handler)(int, void *))
+int gpib_request_pseudo_irq(struct gpib_board *board, irqreturn_t (*handler)(int, void *))
{
if (timer_pending(&board->pseudo_irq.timer) || board->pseudo_irq.handler) {
- pr_err("gpib: only one pseudo interrupt per board allowed\n");
+ dev_err(board->gpib_dev, "only one pseudo interrupt per board allowed\n");
return -1;
}
@@ -156,7 +159,7 @@ int gpib_request_pseudo_irq(gpib_board_t *board, irqreturn_t (*handler)(int, voi
}
EXPORT_SYMBOL(gpib_request_pseudo_irq);
-void gpib_free_pseudo_irq(gpib_board_t *board)
+void gpib_free_pseudo_irq(struct gpib_board *board)
{
atomic_set(&board->pseudo_irq.active, 0);
@@ -175,7 +178,7 @@ unsigned int num_status_bytes(const gpib_status_queue_t *dev)
}
// push status byte onto back of status byte fifo
-int push_status_byte(gpib_board_t *board, gpib_status_queue_t *device, u8 poll_byte)
+int push_status_byte(struct gpib_board *board, gpib_status_queue_t *device, u8 poll_byte)
{
struct list_head *head = &device->status_bytes;
status_byte_t *status;
@@ -209,7 +212,7 @@ int push_status_byte(gpib_board_t *board, gpib_status_queue_t *device, u8 poll_b
}
// pop status byte from front of status byte fifo
-int pop_status_byte(gpib_board_t *board, gpib_status_queue_t *device, u8 *poll_byte)
+int pop_status_byte(struct gpib_board *board, gpib_status_queue_t *device, u8 *poll_byte)
{
struct list_head *head = &device->status_bytes;
struct list_head *front = head->next;
@@ -240,7 +243,7 @@ int pop_status_byte(gpib_board_t *board, gpib_status_queue_t *device, u8 *poll_b
return 0;
}
-gpib_status_queue_t *get_gpib_status_queue(gpib_board_t *board, unsigned int pad, int sad)
+gpib_status_queue_t *get_gpib_status_queue(struct gpib_board *board, unsigned int pad, int sad)
{
gpib_status_queue_t *device;
struct list_head *list_ptr;
@@ -255,13 +258,11 @@ gpib_status_queue_t *get_gpib_status_queue(gpib_board_t *board, unsigned int pad
return NULL;
}
-int get_serial_poll_byte(gpib_board_t *board, unsigned int pad, int sad, unsigned int usec_timeout,
+int get_serial_poll_byte(struct gpib_board *board, unsigned int pad, int sad, unsigned int usec_timeout,
uint8_t *poll_byte)
{
gpib_status_queue_t *device;
- dev_dbg(board->gpib_dev, "%s:()\n", __func__);
-
device = get_gpib_status_queue(board, pad, sad);
if (num_status_bytes(device))
return pop_status_byte(board, device, poll_byte);
@@ -269,11 +270,10 @@ int get_serial_poll_byte(gpib_board_t *board, unsigned int pad, int sad, unsigne
return dvrsp(board, pad, sad, usec_timeout, poll_byte);
}
-int autopoll_all_devices(gpib_board_t *board)
+int autopoll_all_devices(struct gpib_board *board)
{
int retval;
- dev_dbg(board->gpib_dev, "entering %s()\n", __func__);
if (mutex_lock_interruptible(&board->user_mutex))
return -ERESTARTSYS;
if (mutex_lock_interruptible(&board->big_gpib_mutex)) {
@@ -290,7 +290,7 @@ int autopoll_all_devices(gpib_board_t *board)
return retval;
}
- dev_dbg(board->gpib_dev, "%s complete\n", __func__);
+ dev_dbg(board->gpib_dev, "complete\n");
/* need to wake wait queue in case someone is
* waiting on RQS
*/
@@ -301,15 +301,13 @@ int autopoll_all_devices(gpib_board_t *board)
return retval;
}
-static int setup_serial_poll(gpib_board_t *board, unsigned int usec_timeout)
+static int setup_serial_poll(struct gpib_board *board, unsigned int usec_timeout)
{
u8 cmd_string[8];
int i;
size_t bytes_written;
int ret;
- dev_dbg(board->gpib_dev, "entering %s()\n", __func__);
-
os_start_timer(board, usec_timeout);
ret = ibcac(board, 1, 1);
if (ret < 0) {
@@ -326,7 +324,7 @@ static int setup_serial_poll(gpib_board_t *board, unsigned int usec_timeout)
ret = board->interface->command(board, cmd_string, i, &bytes_written);
if (ret < 0 || bytes_written < i) {
- pr_err("gpib: failed to setup serial poll\n");
+ dev_dbg(board->gpib_dev, "failed to setup serial poll\n");
os_remove_timer(board);
return -EIO;
}
@@ -335,7 +333,7 @@ static int setup_serial_poll(gpib_board_t *board, unsigned int usec_timeout)
return 0;
}
-static int read_serial_poll_byte(gpib_board_t *board, unsigned int pad,
+static int read_serial_poll_byte(struct gpib_board *board, unsigned int pad,
int sad, unsigned int usec_timeout, uint8_t *result)
{
u8 cmd_string[8];
@@ -344,7 +342,7 @@ static int read_serial_poll_byte(gpib_board_t *board, unsigned int pad,
int i;
size_t nbytes;
- dev_dbg(board->gpib_dev, "entering %s(), pad=%i sad=%i\n", __func__, pad, sad);
+ dev_dbg(board->gpib_dev, "entering pad=%i sad=%i\n", pad, sad);
os_start_timer(board, usec_timeout);
ret = ibcac(board, 1, 1);
@@ -361,7 +359,7 @@ static int read_serial_poll_byte(gpib_board_t *board, unsigned int pad,
ret = board->interface->command(board, cmd_string, i, &nbytes);
if (ret < 0 || nbytes < i) {
- pr_err("gpib: failed to setup serial poll\n");
+ dev_err(board->gpib_dev, "failed to setup serial poll\n");
os_remove_timer(board);
return -EIO;
}
@@ -371,7 +369,7 @@ static int read_serial_poll_byte(gpib_board_t *board, unsigned int pad,
// read poll result
ret = board->interface->read(board, result, 1, &end_flag, &nbytes);
if (ret < 0 || nbytes < 1) {
- pr_err("gpib: serial poll failed\n");
+ dev_err(board->gpib_dev, "serial poll failed\n");
os_remove_timer(board);
return -EIO;
}
@@ -380,14 +378,12 @@ static int read_serial_poll_byte(gpib_board_t *board, unsigned int pad,
return 0;
}
-static int cleanup_serial_poll(gpib_board_t *board, unsigned int usec_timeout)
+static int cleanup_serial_poll(struct gpib_board *board, unsigned int usec_timeout)
{
u8 cmd_string[8];
int ret;
size_t bytes_written;
- dev_dbg(board->gpib_dev, "entering %s()\n", __func__);
-
os_start_timer(board, usec_timeout);
ret = ibcac(board, 1, 1);
if (ret < 0) {
@@ -399,7 +395,7 @@ static int cleanup_serial_poll(gpib_board_t *board, unsigned int usec_timeout)
cmd_string[1] = UNT;
ret = board->interface->command(board, cmd_string, 2, &bytes_written);
if (ret < 0 || bytes_written < 2) {
- pr_err("gpib: failed to disable serial poll\n");
+ dev_err(board->gpib_dev, "failed to disable serial poll\n");
os_remove_timer(board);
return -EIO;
}
@@ -408,7 +404,7 @@ static int cleanup_serial_poll(gpib_board_t *board, unsigned int usec_timeout)
return 0;
}
-static int serial_poll_single(gpib_board_t *board, unsigned int pad, int sad,
+static int serial_poll_single(struct gpib_board *board, unsigned int pad, int sad,
unsigned int usec_timeout, uint8_t *result)
{
int retval, cleanup_retval;
@@ -426,7 +422,7 @@ static int serial_poll_single(gpib_board_t *board, unsigned int pad, int sad,
return 0;
}
-int serial_poll_all(gpib_board_t *board, unsigned int usec_timeout)
+int serial_poll_all(struct gpib_board *board, unsigned int usec_timeout)
{
int retval = 0;
struct list_head *cur;
@@ -435,8 +431,6 @@ int serial_poll_all(gpib_board_t *board, unsigned int usec_timeout)
u8 result;
unsigned int num_bytes = 0;
- dev_dbg(board->gpib_dev, "entering %s()\n", __func__);
-
head = &board->device_list;
if (head->next == head)
return 0;
@@ -475,19 +469,19 @@ int serial_poll_all(gpib_board_t *board, unsigned int usec_timeout)
* SPD and UNT are sent at the completion of the poll.
*/
-int dvrsp(gpib_board_t *board, unsigned int pad, int sad,
+int dvrsp(struct gpib_board *board, unsigned int pad, int sad,
unsigned int usec_timeout, uint8_t *result)
{
int status = ibstatus(board);
int retval;
if ((status & CIC) == 0) {
- pr_err("gpib: not CIC during serial poll\n");
+ dev_err(board->gpib_dev, "not CIC during serial poll\n");
return -1;
}
if (pad > MAX_GPIB_PRIMARY_ADDRESS || sad > MAX_GPIB_SECONDARY_ADDRESS || sad < -1) {
- pr_err("gpib: bad address for serial poll");
+ dev_err(board->gpib_dev, "bad address for serial poll");
return -1;
}
@@ -527,7 +521,7 @@ static int init_gpib_file_private(gpib_file_private_t *priv)
int ibopen(struct inode *inode, struct file *filep)
{
unsigned int minor = iminor(inode);
- gpib_board_t *board;
+ struct gpib_board *board;
gpib_file_private_t *priv;
if (minor >= GPIB_MAX_NUM_BOARDS) {
@@ -544,20 +538,16 @@ int ibopen(struct inode *inode, struct file *filep)
priv = filep->private_data;
init_gpib_file_private((gpib_file_private_t *)filep->private_data);
- dev_dbg(board->gpib_dev, "pid %i, gpib: opening minor %d\n", current->pid, minor);
-
if (board->use_count == 0) {
int retval;
retval = request_module("gpib%i", minor);
- if (retval) {
- dev_dbg(board->gpib_dev, "pid %i, gpib: request module returned %i\n",
- current->pid, retval);
- }
+ if (retval)
+ dev_dbg(board->gpib_dev, "request module returned %i\n", retval);
}
if (board->interface) {
if (!try_module_get(board->provider_module)) {
- pr_err("gpib: try_module_get() failed\n");
+ dev_err(board->gpib_dev, "try_module_get() failed\n");
return -EIO;
}
board->use_count++;
@@ -569,7 +559,7 @@ int ibopen(struct inode *inode, struct file *filep)
int ibclose(struct inode *inode, struct file *filep)
{
unsigned int minor = iminor(inode);
- gpib_board_t *board;
+ struct gpib_board *board;
gpib_file_private_t *priv = filep->private_data;
gpib_descriptor_t *desc;
@@ -580,21 +570,19 @@ int ibclose(struct inode *inode, struct file *filep)
board = &board_array[minor];
- dev_dbg(board->gpib_dev, "pid %i, closing minor %d\n", current->pid, minor);
-
if (priv) {
desc = handle_to_descriptor(priv, 0);
if (desc) {
if (desc->autopoll_enabled) {
- dev_dbg(board->gpib_dev, "pid %i, decrementing autospollers\n",
- current->pid);
+ dev_dbg(board->gpib_dev, "decrementing autospollers\n");
if (board->autospollers > 0)
board->autospollers--;
else
- pr_err("gpib: Attempt to decrement zero autospollers\n");
+ dev_err(board->gpib_dev,
+ "Attempt to decrement zero autospollers\n");
}
} else {
- pr_err("gpib: Unexpected null gpib_descriptor\n");
+ dev_err(board->gpib_dev, "Unexpected null gpib_descriptor\n");
}
cleanup_open_devices(priv, board);
@@ -617,7 +605,7 @@ int ibclose(struct inode *inode, struct file *filep)
long ibioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
unsigned int minor = iminor(filep->f_path.dentry->d_inode);
- gpib_board_t *board;
+ struct gpib_board *board;
gpib_file_private_t *file_priv = filep->private_data;
long retval = -ENOTTY;
@@ -630,8 +618,8 @@ long ibioctl(struct file *filep, unsigned int cmd, unsigned long arg)
if (mutex_lock_interruptible(&board->big_gpib_mutex))
return -ERESTARTSYS;
- dev_dbg(board->gpib_dev, "pid %i, ioctl %d, interface=%s, use=%d, onl=%d\n",
- current->pid, cmd & 0xff,
+ dev_dbg(board->gpib_dev, "ioctl %d, interface=%s, use=%d, onl=%d\n",
+ cmd & 0xff,
board->interface ? board->interface->name : "",
board->use_count,
board->online);
@@ -647,13 +635,13 @@ long ibioctl(struct file *filep, unsigned int cmd, unsigned long arg)
break;
}
if (!board->interface) {
- pr_err("gpib: no gpib board configured on /dev/gpib%i\n", minor);
+ dev_err(board->gpib_dev, "no gpib board configured\n");
retval = -ENODEV;
goto done;
}
if (file_priv->got_module == 0) {
if (!try_module_get(board->provider_module)) {
- pr_err("gpib: try_module_get() failed\n");
+ dev_err(board->gpib_dev, "try_module_get() failed\n");
retval = -EIO;
goto done;
}
@@ -699,8 +687,6 @@ long ibioctl(struct file *filep, unsigned int cmd, unsigned long arg)
}
if (!board->online) {
- pr_err("gpib: ioctl %i invalid for offline board\n",
- cmd & 0xff);
retval = -EINVAL;
goto done;
}
@@ -737,8 +723,6 @@ long ibioctl(struct file *filep, unsigned int cmd, unsigned long arg)
spin_lock(&board->locking_pid_spinlock);
if (current->pid != board->locking_pid) {
spin_unlock(&board->locking_pid_spinlock);
- pr_err("gpib: need to hold board lock to perform ioctl %i\n",
- cmd & 0xff);
retval = -EPERM;
goto done;
}
@@ -822,7 +806,7 @@ done:
return retval;
}
-static int board_type_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board, unsigned long arg)
+static int board_type_ioctl(gpib_file_private_t *file_priv, struct gpib_board *board, unsigned long arg)
{
struct list_head *list_ptr;
board_type_ioctl_t cmd;
@@ -830,10 +814,8 @@ static int board_type_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (board->online) {
- pr_err("gpib: can't change board type while board is online.\n");
+ if (board->online)
return -EBUSY;
- }
retval = copy_from_user(&cmd, (void __user *)arg, sizeof(board_type_ioctl_t));
if (retval)
@@ -875,7 +857,7 @@ static int board_type_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board,
return -EINVAL;
}
-static int read_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board,
+static int read_ioctl(gpib_file_private_t *file_priv, struct gpib_board *board,
unsigned long arg)
{
read_write_ioctl_t read_cmd;
@@ -951,7 +933,7 @@ static int read_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board,
}
static int command_ioctl(gpib_file_private_t *file_priv,
- gpib_board_t *board, unsigned long arg)
+ struct gpib_board *board, unsigned long arg)
{
read_write_ioctl_t cmd;
u8 __user *userbuf;
@@ -1034,7 +1016,7 @@ static int command_ioctl(gpib_file_private_t *file_priv,
return retval;
}
-static int write_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board,
+static int write_ioctl(gpib_file_private_t *file_priv, struct gpib_board *board,
unsigned long arg)
{
read_write_ioctl_t write_cmd;
@@ -1105,7 +1087,7 @@ static int write_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board,
return retval;
}
-static int status_bytes_ioctl(gpib_board_t *board, unsigned long arg)
+static int status_bytes_ioctl(struct gpib_board *board, unsigned long arg)
{
gpib_status_queue_t *device;
spoll_bytes_ioctl_t cmd;
@@ -1128,7 +1110,7 @@ static int status_bytes_ioctl(gpib_board_t *board, unsigned long arg)
return 0;
}
-static int increment_open_device_count(gpib_board_t *board, struct list_head *head,
+static int increment_open_device_count(struct gpib_board *board, struct list_head *head,
unsigned int pad, int sad)
{
struct list_head *list_ptr;
@@ -1140,8 +1122,8 @@ static int increment_open_device_count(gpib_board_t *board, struct list_head *he
for (list_ptr = head->next; list_ptr != head; list_ptr = list_ptr->next) {
device = list_entry(list_ptr, gpib_status_queue_t, list);
if (gpib_address_equal(device->pad, device->sad, pad, sad)) {
- dev_dbg(board->gpib_dev, "pid %i, incrementing open count for pad %i, sad %i\n",
- current->pid, device->pad, device->sad);
+ dev_dbg(board->gpib_dev, "incrementing open count for pad %i, sad %i\n",
+ device->pad, device->sad);
device->reference_count++;
return 0;
}
@@ -1158,13 +1140,12 @@ static int increment_open_device_count(gpib_board_t *board, struct list_head *he
list_add(&device->list, head);
- dev_dbg(board->gpib_dev, "pid %i, opened pad %i, sad %i\n",
- current->pid, device->pad, device->sad);
+ dev_dbg(board->gpib_dev, "opened pad %i, sad %i\n", device->pad, device->sad);
return 0;
}
-static int subtract_open_device_count(gpib_board_t *board, struct list_head *head,
+static int subtract_open_device_count(struct gpib_board *board, struct list_head *head,
unsigned int pad, int sad, unsigned int count)
{
gpib_status_queue_t *device;
@@ -1173,33 +1154,33 @@ static int subtract_open_device_count(gpib_board_t *board, struct list_head *hea
for (list_ptr = head->next; list_ptr != head; list_ptr = list_ptr->next) {
device = list_entry(list_ptr, gpib_status_queue_t, list);
if (gpib_address_equal(device->pad, device->sad, pad, sad)) {
- dev_dbg(board->gpib_dev, "pid %i, decrementing open count for pad %i, sad %i\n",
- current->pid, device->pad, device->sad);
+ dev_dbg(board->gpib_dev, "decrementing open count for pad %i, sad %i\n",
+ device->pad, device->sad);
if (count > device->reference_count) {
- pr_err("gpib: bug! in %s()\n", __func__);
+ dev_err(board->gpib_dev, "bug! in %s()\n", __func__);
return -EINVAL;
}
device->reference_count -= count;
if (device->reference_count == 0) {
- dev_dbg(board->gpib_dev, "pid %i, closing pad %i, sad %i\n",
- current->pid, device->pad, device->sad);
+ dev_dbg(board->gpib_dev, "closing pad %i, sad %i\n",
+ device->pad, device->sad);
list_del(list_ptr);
kfree(device);
}
return 0;
}
}
- pr_err("gpib: bug! tried to close address that was never opened!\n");
+ dev_err(board->gpib_dev, "bug! tried to close address that was never opened!\n");
return -EINVAL;
}
-static inline int decrement_open_device_count(gpib_board_t *board, struct list_head *head,
+static inline int decrement_open_device_count(struct gpib_board *board, struct list_head *head,
unsigned int pad, int sad)
{
return subtract_open_device_count(board, head, pad, sad, 1);
}
-static int cleanup_open_devices(gpib_file_private_t *file_priv, gpib_board_t *board)
+static int cleanup_open_devices(gpib_file_private_t *file_priv, struct gpib_board *board)
{
int retval = 0;
int i;
@@ -1224,7 +1205,7 @@ static int cleanup_open_devices(gpib_file_private_t *file_priv, gpib_board_t *bo
return 0;
}
-static int open_dev_ioctl(struct file *filep, gpib_board_t *board, unsigned long arg)
+static int open_dev_ioctl(struct file *filep, struct gpib_board *board, unsigned long arg)
{
open_dev_ioctl_t open_dev_cmd;
int retval;
@@ -1274,7 +1255,7 @@ static int open_dev_ioctl(struct file *filep, gpib_board_t *board, unsigned long
return 0;
}
-static int close_dev_ioctl(struct file *filep, gpib_board_t *board, unsigned long arg)
+static int close_dev_ioctl(struct file *filep, struct gpib_board *board, unsigned long arg)
{
close_dev_ioctl_t cmd;
gpib_file_private_t *file_priv = filep->private_data;
@@ -1301,13 +1282,11 @@ static int close_dev_ioctl(struct file *filep, gpib_board_t *board, unsigned lon
return 0;
}
-static int serial_poll_ioctl(gpib_board_t *board, unsigned long arg)
+static int serial_poll_ioctl(struct gpib_board *board, unsigned long arg)
{
serial_poll_ioctl_t serial_cmd;
int retval;
- dev_dbg(board->gpib_dev, "pid %i, entering %s()\n", current->pid, __func__);
-
retval = copy_from_user(&serial_cmd, (void __user *)arg, sizeof(serial_cmd));
if (retval)
return -EFAULT;
@@ -1324,7 +1303,7 @@ static int serial_poll_ioctl(gpib_board_t *board, unsigned long arg)
return 0;
}
-static int wait_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board,
+static int wait_ioctl(gpib_file_private_t *file_priv, struct gpib_board *board,
unsigned long arg)
{
wait_ioctl_t wait_cmd;
@@ -1351,7 +1330,7 @@ static int wait_ioctl(gpib_file_private_t *file_priv, gpib_board_t *board,
return 0;
}
-static int parallel_poll_ioctl(gpib_board_t *board, unsigned long arg)
+static int parallel_poll_ioctl(struct gpib_board *board, unsigned long arg)
{
u8 poll_byte;
int retval;
@@ -1367,7 +1346,7 @@ static int parallel_poll_ioctl(gpib_board_t *board, unsigned long arg)
return 0;
}
-static int online_ioctl(gpib_board_t *board, unsigned long arg)
+static int online_ioctl(struct gpib_board *board, unsigned long arg)
{
online_ioctl_t online_cmd;
int retval;
@@ -1411,7 +1390,7 @@ static int online_ioctl(gpib_board_t *board, unsigned long arg)
return retval;
}
-static int remote_enable_ioctl(gpib_board_t *board, unsigned long arg)
+static int remote_enable_ioctl(struct gpib_board *board, unsigned long arg)
{
int enable;
int retval;
@@ -1423,7 +1402,7 @@ static int remote_enable_ioctl(gpib_board_t *board, unsigned long arg)
return ibsre(board, enable);
}
-static int take_control_ioctl(gpib_board_t *board, unsigned long arg)
+static int take_control_ioctl(struct gpib_board *board, unsigned long arg)
{
int synchronous;
int retval;
@@ -1435,7 +1414,7 @@ static int take_control_ioctl(gpib_board_t *board, unsigned long arg)
return ibcac(board, synchronous, 1);
}
-static int line_status_ioctl(gpib_board_t *board, unsigned long arg)
+static int line_status_ioctl(struct gpib_board *board, unsigned long arg)
{
short lines;
int retval;
@@ -1451,7 +1430,7 @@ static int line_status_ioctl(gpib_board_t *board, unsigned long arg)
return 0;
}
-static int pad_ioctl(gpib_board_t *board, gpib_file_private_t *file_priv,
+static int pad_ioctl(struct gpib_board *board, gpib_file_private_t *file_priv,
unsigned long arg)
{
pad_ioctl_t cmd;
@@ -1487,7 +1466,7 @@ static int pad_ioctl(gpib_board_t *board, gpib_file_private_t *file_priv,
return 0;
}
-static int sad_ioctl(gpib_board_t *board, gpib_file_private_t *file_priv,
+static int sad_ioctl(struct gpib_board *board, gpib_file_private_t *file_priv,
unsigned long arg)
{
sad_ioctl_t cmd;
@@ -1522,7 +1501,7 @@ static int sad_ioctl(gpib_board_t *board, gpib_file_private_t *file_priv,
return 0;
}
-static int eos_ioctl(gpib_board_t *board, unsigned long arg)
+static int eos_ioctl(struct gpib_board *board, unsigned long arg)
{
eos_ioctl_t eos_cmd;
int retval;
@@ -1534,7 +1513,7 @@ static int eos_ioctl(gpib_board_t *board, unsigned long arg)
return ibeos(board, eos_cmd.eos, eos_cmd.eos_flags);
}
-static int request_service_ioctl(gpib_board_t *board, unsigned long arg)
+static int request_service_ioctl(struct gpib_board *board, unsigned long arg)
{
u8 status_byte;
int retval;
@@ -1546,7 +1525,7 @@ static int request_service_ioctl(gpib_board_t *board, unsigned long arg)
return ibrsv2(board, status_byte, status_byte & request_service_bit);
}
-static int request_service2_ioctl(gpib_board_t *board, unsigned long arg)
+static int request_service2_ioctl(struct gpib_board *board, unsigned long arg)
{
request_service2_t request_service2_cmd;
int retval;
@@ -1613,7 +1592,7 @@ static int dma_ioctl(gpib_board_config_t *config, unsigned long arg)
return 0;
}
-static int autospoll_ioctl(gpib_board_t *board, gpib_file_private_t *file_priv,
+static int autospoll_ioctl(struct gpib_board *board, gpib_file_private_t *file_priv,
unsigned long arg)
{
autospoll_ioctl_t enable;
@@ -1639,18 +1618,19 @@ static int autospoll_ioctl(gpib_board_t *board, gpib_file_private_t *file_priv,
board->autospollers--;
retval = 0;
} else {
- pr_err("gpib: tried to set number of autospollers negative\n");
+ dev_err(board->gpib_dev,
+ "tried to set number of autospollers negative\n");
retval = -EINVAL;
}
} else {
- pr_err("gpib: autopoll disable requested before enable\n");
+ dev_err(board->gpib_dev, "autopoll disable requested before enable\n");
retval = -EINVAL;
}
}
return retval;
}
-static int mutex_ioctl(gpib_board_t *board, gpib_file_private_t *file_priv,
+static int mutex_ioctl(struct gpib_board *board, gpib_file_private_t *file_priv,
unsigned long arg)
{
int retval, lock_mutex;
@@ -1661,10 +1641,8 @@ static int mutex_ioctl(gpib_board_t *board, gpib_file_private_t *file_priv,
if (lock_mutex) {
retval = mutex_lock_interruptible(&board->user_mutex);
- if (retval) {
- pr_warn("gpib: ioctl interrupted while waiting on lock\n");
+ if (retval)
return -ERESTARTSYS;
- }
spin_lock(&board->locking_pid_spinlock);
board->locking_pid = current->pid;
@@ -1672,13 +1650,12 @@ static int mutex_ioctl(gpib_board_t *board, gpib_file_private_t *file_priv,
atomic_set(&file_priv->holding_mutex, 1);
- dev_dbg(board->gpib_dev, "pid %i, locked board %d mutex\n",
- current->pid, board->minor);
+ dev_dbg(board->gpib_dev, "locked board mutex\n");
} else {
spin_lock(&board->locking_pid_spinlock);
if (current->pid != board->locking_pid) {
- pr_err("gpib: bug! pid %i tried to release mutex held by pid %i\n",
- current->pid, board->locking_pid);
+ dev_err(board->gpib_dev, "bug! pid %i tried to release mutex held by pid %i\n",
+ current->pid, board->locking_pid);
spin_unlock(&board->locking_pid_spinlock);
return -EPERM;
}
@@ -1688,13 +1665,12 @@ static int mutex_ioctl(gpib_board_t *board, gpib_file_private_t *file_priv,
atomic_set(&file_priv->holding_mutex, 0);
mutex_unlock(&board->user_mutex);
- dev_dbg(board->gpib_dev, "pid %i, unlocked board %i mutex\n",
- current->pid, board->minor);
+ dev_dbg(board->gpib_dev, "unlocked board mutex\n");
}
return 0;
}
-static int timeout_ioctl(gpib_board_t *board, unsigned long arg)
+static int timeout_ioctl(struct gpib_board *board, unsigned long arg)
{
unsigned int timeout;
int retval;
@@ -1704,12 +1680,12 @@ static int timeout_ioctl(gpib_board_t *board, unsigned long arg)
return -EFAULT;
board->usec_timeout = timeout;
- dev_dbg(board->gpib_dev, "pid %i, timeout set to %i usec\n", current->pid, timeout);
+ dev_dbg(board->gpib_dev, "timeout set to %i usec\n", timeout);
return 0;
}
-static int ppc_ioctl(gpib_board_t *board, unsigned long arg)
+static int ppc_ioctl(struct gpib_board *board, unsigned long arg)
{
ppoll_config_ioctl_t cmd;
int retval;
@@ -1735,7 +1711,7 @@ static int ppc_ioctl(gpib_board_t *board, unsigned long arg)
return 0;
}
-static int set_local_ppoll_mode_ioctl(gpib_board_t *board, unsigned long arg)
+static int set_local_ppoll_mode_ioctl(struct gpib_board *board, unsigned long arg)
{
local_ppoll_mode_ioctl_t cmd;
int retval;
@@ -1744,17 +1720,15 @@ static int set_local_ppoll_mode_ioctl(gpib_board_t *board, unsigned long arg)
if (retval)
return -EFAULT;
- if (!board->interface->local_parallel_poll_mode) {
- pr_warn("gpib: local/remote parallel poll mode not supported by driver.");
- return -EIO;
- }
+ if (!board->interface->local_parallel_poll_mode)
+ return -ENOENT;
board->local_ppoll_mode = cmd != 0;
board->interface->local_parallel_poll_mode(board, board->local_ppoll_mode);
return 0;
}
-static int get_local_ppoll_mode_ioctl(gpib_board_t *board, unsigned long arg)
+static int get_local_ppoll_mode_ioctl(struct gpib_board *board, unsigned long arg)
{
local_ppoll_mode_ioctl_t cmd;
int retval;
@@ -1767,7 +1741,7 @@ static int get_local_ppoll_mode_ioctl(gpib_board_t *board, unsigned long arg)
return 0;
}
-static int query_board_rsv_ioctl(gpib_board_t *board, unsigned long arg)
+static int query_board_rsv_ioctl(struct gpib_board *board, unsigned long arg)
{
int status;
int retval;
@@ -1781,7 +1755,7 @@ static int query_board_rsv_ioctl(gpib_board_t *board, unsigned long arg)
return 0;
}
-static int board_info_ioctl(const gpib_board_t *board, unsigned long arg)
+static int board_info_ioctl(const struct gpib_board *board, unsigned long arg)
{
board_info_ioctl_t info;
int retval;
@@ -1804,7 +1778,7 @@ static int board_info_ioctl(const gpib_board_t *board, unsigned long arg)
return 0;
}
-static int interface_clear_ioctl(gpib_board_t *board, unsigned long arg)
+static int interface_clear_ioctl(struct gpib_board *board, unsigned long arg)
{
unsigned int usec_duration;
int retval;
@@ -1867,7 +1841,7 @@ unsigned int num_gpib_events(const gpib_event_queue_t *queue)
return queue->num_events;
}
-static int push_gpib_event_nolock(gpib_board_t *board, short event_type)
+static int push_gpib_event_nolock(struct gpib_board *board, short event_type)
{
gpib_event_queue_t *queue = &board->event_queue;
struct list_head *head = &queue->event_head;
@@ -1887,7 +1861,7 @@ static int push_gpib_event_nolock(gpib_board_t *board, short event_type)
event = kmalloc(sizeof(gpib_event_t), GFP_ATOMIC);
if (!event) {
queue->dropped_event = 1;
- pr_err("gpib: failed to allocate memory for event\n");
+ dev_err(board->gpib_dev, "failed to allocate memory for event\n");
return -ENOMEM;
}
@@ -1905,7 +1879,7 @@ static int push_gpib_event_nolock(gpib_board_t *board, short event_type)
}
// push event onto back of event queue
-int push_gpib_event(gpib_board_t *board, short event_type)
+int push_gpib_event(struct gpib_board *board, short event_type)
{
unsigned long flags;
int retval;
@@ -1923,7 +1897,7 @@ int push_gpib_event(gpib_board_t *board, short event_type)
}
EXPORT_SYMBOL(push_gpib_event);
-static int pop_gpib_event_nolock(gpib_board_t *board, gpib_event_queue_t *queue, short *event_type)
+static int pop_gpib_event_nolock(struct gpib_board *board, gpib_event_queue_t *queue, short *event_type)
{
struct list_head *head = &queue->event_head;
struct list_head *front = head->next;
@@ -1957,7 +1931,7 @@ static int pop_gpib_event_nolock(gpib_board_t *board, gpib_event_queue_t *queue,
}
// pop event from front of event queue
-int pop_gpib_event(gpib_board_t *board, gpib_event_queue_t *queue, short *event_type)
+int pop_gpib_event(struct gpib_board *board, gpib_event_queue_t *queue, short *event_type)
{
unsigned long flags;
int retval;
@@ -1968,7 +1942,7 @@ int pop_gpib_event(gpib_board_t *board, gpib_event_queue_t *queue, short *event_
return retval;
}
-static int event_ioctl(gpib_board_t *board, unsigned long arg)
+static int event_ioctl(struct gpib_board *board, unsigned long arg)
{
event_ioctl_t user_event;
int retval;
@@ -1987,7 +1961,7 @@ static int event_ioctl(gpib_board_t *board, unsigned long arg)
return 0;
}
-static int request_system_control_ioctl(gpib_board_t *board, unsigned long arg)
+static int request_system_control_ioctl(struct gpib_board *board, unsigned long arg)
{
rsc_ioctl_t request_control;
int retval;
@@ -2001,16 +1975,14 @@ static int request_system_control_ioctl(gpib_board_t *board, unsigned long arg)
return 0;
}
-static int t1_delay_ioctl(gpib_board_t *board, unsigned long arg)
+static int t1_delay_ioctl(struct gpib_board *board, unsigned long arg)
{
t1_delay_ioctl_t cmd;
unsigned int delay;
int retval;
- if (!board->interface->t1_delay) {
- pr_warn("gpib: t1 delay not implemented in driver!\n");
- return -EIO;
- }
+ if (!board->interface->t1_delay)
+ return -ENOENT;
retval = copy_from_user(&cmd, (void __user *)arg, sizeof(cmd));
if (retval)
@@ -2018,8 +1990,11 @@ static int t1_delay_ioctl(gpib_board_t *board, unsigned long arg)
delay = cmd;
- board->t1_nano_sec = board->interface->t1_delay(board, delay);
+ retval = board->interface->t1_delay(board, delay);
+ if (retval < 0)
+ return retval;
+ board->t1_nano_sec = retval;
return 0;
}
@@ -2032,7 +2007,7 @@ static const struct file_operations ib_fops = {
.release = &ibclose,
};
-gpib_board_t board_array[GPIB_MAX_NUM_BOARDS];
+struct gpib_board board_array[GPIB_MAX_NUM_BOARDS];
LIST_HEAD(registered_drivers);
@@ -2067,7 +2042,7 @@ void gpib_unregister_driver(gpib_interface_t *interface)
struct list_head *list_ptr;
for (i = 0; i < GPIB_MAX_NUM_BOARDS; i++) {
- gpib_board_t *board = &board_array[i];
+ struct gpib_board *board = &board_array[i];
if (board->interface == interface) {
if (board->use_count > 0)
@@ -2087,7 +2062,6 @@ void gpib_unregister_driver(gpib_interface_t *interface)
kfree(entry);
}
}
- pr_info("gpib: unregistered %s interface\n", interface->name);
}
EXPORT_SYMBOL(gpib_unregister_driver);
@@ -2098,7 +2072,7 @@ static void init_gpib_board_config(gpib_board_config_t *config)
config->pci_slot = -1;
}
-void init_gpib_board(gpib_board_t *board)
+void init_gpib_board(struct gpib_board *board)
{
board->interface = NULL;
board->provider_module = NULL;
@@ -2133,7 +2107,7 @@ void init_gpib_board(gpib_board_t *board)
board->local_ppoll_mode = 0;
}
-int gpib_allocate_board(gpib_board_t *board)
+int gpib_allocate_board(struct gpib_board *board)
{
if (!board->buffer) {
board->buffer_length = 0x4000;
@@ -2146,7 +2120,7 @@ int gpib_allocate_board(gpib_board_t *board)
return 0;
}
-void gpib_deallocate_board(gpib_board_t *board)
+void gpib_deallocate_board(struct gpib_board *board)
{
short dummy;
@@ -2159,7 +2133,7 @@ void gpib_deallocate_board(gpib_board_t *board)
pop_gpib_event(board, &board->event_queue, &dummy);
}
-static void init_board_array(gpib_board_t *board_array, unsigned int length)
+static void init_board_array(struct gpib_board *board_array, unsigned int length)
{
int i;
@@ -2184,7 +2158,7 @@ static int __init gpib_common_init_module(void)
{
int i;
- pr_info("Linux-GPIB core driver\n");
+ pr_info("GPIB core driver\n");
init_board_array(board_array, GPIB_MAX_NUM_BOARDS);
if (register_chrdev(GPIB_CODE, "gpib", &ib_fops)) {
pr_err("gpib: can't get major %d\n", GPIB_CODE);
diff --git a/drivers/staging/gpib/common/iblib.c b/drivers/staging/gpib/common/iblib.c
index 5f6fa135f505..6cca8a49e839 100644
--- a/drivers/staging/gpib/common/iblib.c
+++ b/drivers/staging/gpib/common/iblib.c
@@ -4,6 +4,8 @@
* copyright : (C) 2001, 2002 by Frank Mori Hess
***************************************************************************/
+#define dev_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "ibsys.h"
#include <linux/delay.h>
#include <linux/kthread.h>
@@ -19,15 +21,13 @@
* If fallback_to_async is non-zero, try to take control asynchronously
* if synchronous attempt fails.
*/
-int ibcac(gpib_board_t *board, int sync, int fallback_to_async)
+int ibcac(struct gpib_board *board, int sync, int fallback_to_async)
{
int status = ibstatus(board);
int retval;
- if ((status & CIC) == 0) {
- pr_err("gpib: not CIC during %s()\n", __func__);
- return -1;
- }
+ if ((status & CIC) == 0)
+ return -EINVAL;
if (status & ATN)
return 0;
@@ -61,7 +61,7 @@ int ibcac(gpib_board_t *board, int sync, int fallback_to_async)
* set the skip_check_for_command_acceptors flag in their
* gpib_interface_struct to avoid useless overhead.
*/
-static int check_for_command_acceptors(gpib_board_t *board)
+static int check_for_command_acceptors(struct gpib_board *board)
{
int lines;
@@ -76,15 +76,8 @@ static int check_for_command_acceptors(gpib_board_t *board)
if (lines < 0)
return lines;
- if (lines & ValidATN) {
- if ((lines & BusATN) == 0) {
- pr_err("gpib: ATN not asserted in %s()?", __func__);
- return 0;
- }
- }
-
- if ((lines & ValidNRFD) && (lines & ValidNDAC)) {
- if ((lines & BusNRFD) == 0 && (lines & BusNDAC) == 0)
+ if ((lines & VALID_NRFD) && (lines & VALID_NDAC)) {
+ if ((lines & BUS_NRFD) == 0 && (lines & BUS_NDAC) == 0)
return -ENOTCONN;
}
@@ -103,7 +96,7 @@ static int check_for_command_acceptors(gpib_board_t *board)
* must be called to initialize the GPIB and enable
* the interface to leave the controller idle state.
*/
-int ibcmd(gpib_board_t *board, uint8_t *buf, size_t length, size_t *bytes_written)
+int ibcmd(struct gpib_board *board, uint8_t *buf, size_t length, size_t *bytes_written)
{
ssize_t ret = 0;
int status;
@@ -112,10 +105,8 @@ int ibcmd(gpib_board_t *board, uint8_t *buf, size_t length, size_t *bytes_writte
status = ibstatus(board);
- if ((status & CIC) == 0) {
- pr_err("gpib: cannot send command when not controller-in-charge\n");
- return -EIO;
- }
+ if ((status & CIC) == 0)
+ return -EINVAL;
os_start_timer(board, board->usec_timeout);
@@ -140,26 +131,22 @@ int ibcmd(gpib_board_t *board, uint8_t *buf, size_t length, size_t *bytes_writte
* active state, i.e., turn ATN off.
*/
-int ibgts(gpib_board_t *board)
+int ibgts(struct gpib_board *board)
{
int status = ibstatus(board);
int retval;
- if ((status & CIC) == 0) {
- pr_err("gpib: not CIC during %s()\n", __func__);
- return -1;
- }
+ if ((status & CIC) == 0)
+ return -EINVAL;
retval = board->interface->go_to_standby(board); /* go to standby */
- if (retval < 0)
- pr_err("gpib: error while going to standby\n");
board->interface->update_status(board, 0);
return retval;
}
-static int autospoll_wait_should_wake_up(gpib_board_t *board)
+static int autospoll_wait_should_wake_up(struct gpib_board *board)
{
int retval;
@@ -175,7 +162,7 @@ static int autospoll_wait_should_wake_up(gpib_board_t *board)
static int autospoll_thread(void *board_void)
{
- gpib_board_t *board = board_void;
+ struct gpib_board *board = board_void;
int retval = 0;
dev_dbg(board->gpib_dev, "entering autospoll thread\n");
@@ -200,20 +187,19 @@ static int autospoll_thread(void *board_void)
retval = autopoll_all_devices(board);
module_put(board->provider_module);
} else {
- pr_err("gpib%i: %s: try_module_get() failed!\n", board->minor, __func__);
+ dev_err(board->gpib_dev, "try_module_get() failed!\n");
}
if (retval <= 0) {
- pr_err("gpib%i: %s: stuck SRQ\n", board->minor, __func__);
+ dev_err(board->gpib_dev, "stuck SRQ\n");
atomic_set(&board->stuck_srq, 1); // XXX could be better
set_bit(SRQI_NUM, &board->status);
}
}
- pr_info("gpib%i: exiting autospoll thread\n", board->minor);
return retval;
}
-int ibonline(gpib_board_t *board)
+int ibonline(struct gpib_board *board)
{
int retval;
@@ -230,7 +216,6 @@ int ibonline(gpib_board_t *board)
retval = board->interface->attach(board, &board->config);
if (retval < 0) {
board->interface->detach(board);
- pr_err("gpib: interface attach failed\n");
return retval;
}
/* nios2nommu on 2.6.11 uclinux kernel has weird problems
@@ -241,19 +226,19 @@ int ibonline(gpib_board_t *board)
"gpib%d_autospoll_kthread", board->minor);
retval = IS_ERR(board->autospoll_task);
if (retval) {
- pr_err("gpib: failed to create autospoll thread\n");
+ dev_err(board->gpib_dev, "failed to create autospoll thread\n");
board->interface->detach(board);
return retval;
}
#endif
board->online = 1;
- dev_dbg(board->gpib_dev, "gpib: board online\n");
+ dev_dbg(board->gpib_dev, "board online\n");
return 0;
}
/* XXX need to make sure board is generally not in use (grab board lock?) */
-int iboffline(gpib_board_t *board)
+int iboffline(struct gpib_board *board)
{
int retval;
@@ -265,14 +250,14 @@ int iboffline(gpib_board_t *board)
if (board->autospoll_task && !IS_ERR(board->autospoll_task)) {
retval = kthread_stop(board->autospoll_task);
if (retval)
- pr_err("gpib: kthread_stop returned %i\n", retval);
+ dev_err(board->gpib_dev, "kthread_stop returned %i\n", retval);
board->autospoll_task = NULL;
}
board->interface->detach(board);
gpib_deallocate_board(board);
board->online = 0;
- dev_dbg(board->gpib_dev, "gpib: board offline\n");
+ dev_dbg(board->gpib_dev, "board offline\n");
return 0;
}
@@ -285,7 +270,7 @@ int iboffline(gpib_board_t *board)
* Next LSB (bits 8-15) - STATUS lines mask (lines that are currently set).
*
*/
-int iblines(const gpib_board_t *board, short *lines)
+int iblines(const struct gpib_board *board, short *lines)
{
int retval;
@@ -312,7 +297,7 @@ int iblines(const gpib_board_t *board, short *lines)
* calling ibcmd.
*/
-int ibrd(gpib_board_t *board, uint8_t *buf, size_t length, int *end_flag, size_t *nbytes)
+int ibrd(struct gpib_board *board, uint8_t *buf, size_t length, int *end_flag, size_t *nbytes)
{
ssize_t ret = 0;
int retval;
@@ -320,10 +305,8 @@ int ibrd(gpib_board_t *board, uint8_t *buf, size_t length, int *end_flag, size_t
*nbytes = 0;
*end_flag = 0;
- if (length == 0) {
- pr_warn("gpib: %s() called with zero length?\n", __func__);
+ if (length == 0)
return 0;
- }
if (board->master) {
retval = ibgts(board);
@@ -338,10 +321,9 @@ int ibrd(gpib_board_t *board, uint8_t *buf, size_t length, int *end_flag, size_t
do {
ret = board->interface->read(board, buf, length - *nbytes, end_flag, &bytes_read);
- if (ret < 0) {
- pr_err("gpib read error\n");
+ if (ret < 0)
goto ibrd_out;
- }
+
buf += bytes_read;
*nbytes += bytes_read;
if (need_resched())
@@ -361,7 +343,7 @@ ibrd_out:
* 1. Prior to conducting the poll the interface is placed
* in the controller active state.
*/
-int ibrpp(gpib_board_t *board, uint8_t *result)
+int ibrpp(struct gpib_board *board, uint8_t *result)
{
int retval = 0;
@@ -370,15 +352,13 @@ int ibrpp(gpib_board_t *board, uint8_t *result)
if (retval)
return -1;
- if (board->interface->parallel_poll(board, result)) {
- pr_err("gpib: parallel poll failed\n");
- retval = -1;
- }
+ retval = board->interface->parallel_poll(board, result);
+
os_remove_timer(board);
return retval;
}
-int ibppc(gpib_board_t *board, uint8_t configuration)
+int ibppc(struct gpib_board *board, uint8_t configuration)
{
configuration &= 0x1f;
board->interface->parallel_poll_configure(board, configuration);
@@ -387,15 +367,13 @@ int ibppc(gpib_board_t *board, uint8_t configuration)
return 0;
}
-int ibrsv2(gpib_board_t *board, uint8_t status_byte, int new_reason_for_service)
+int ibrsv2(struct gpib_board *board, uint8_t status_byte, int new_reason_for_service)
{
int board_status = ibstatus(board);
const unsigned int MSS = status_byte & request_service_bit;
- if ((board_status & CIC)) {
- pr_err("gpib: interface requested service while CIC\n");
+ if ((board_status & CIC))
return -EINVAL;
- }
if (MSS == 0 && new_reason_for_service)
return -EINVAL;
@@ -422,21 +400,17 @@ int ibrsv2(gpib_board_t *board, uint8_t status_byte, int new_reason_for_service)
* ibcmd in order to initialize the bus and enable the
* interface to leave the controller idle state.
*/
-int ibsic(gpib_board_t *board, unsigned int usec_duration)
+int ibsic(struct gpib_board *board, unsigned int usec_duration)
{
- if (board->master == 0) {
- pr_err("gpib: tried to assert IFC when not system controller\n");
- return -1;
- }
+ if (board->master == 0)
+ return -EINVAL;
if (usec_duration < 100)
usec_duration = 100;
- if (usec_duration > 1000) {
+ if (usec_duration > 1000)
usec_duration = 1000;
- pr_warn("gpib: warning, shortening long udelay\n");
- }
- dev_dbg(board->gpib_dev, "sending interface clear\n");
+ dev_dbg(board->gpib_dev, "sending interface clear, delay = %ius\n", usec_duration);
board->interface->interface_clear(board, 1);
udelay(usec_duration);
board->interface->interface_clear(board, 0);
@@ -444,26 +418,22 @@ int ibsic(gpib_board_t *board, unsigned int usec_duration)
return 0;
}
-void ibrsc(gpib_board_t *board, int request_control)
+ /* FIXME make int */
+void ibrsc(struct gpib_board *board, int request_control)
{
board->master = request_control != 0;
- if (!board->interface->request_system_control) {
- pr_err("gpib: bug! driver does not implement request_system_control()\n");
- return;
- }
- board->interface->request_system_control(board, request_control);
+ if (board->interface->request_system_control)
+ board->interface->request_system_control(board, request_control);
}
/*
* IBSRE
* Send REN true if v is non-zero or false if v is zero.
*/
-int ibsre(gpib_board_t *board, int enable)
+int ibsre(struct gpib_board *board, int enable)
{
- if (board->master == 0) {
- pr_err("gpib: tried to set REN when not system controller\n");
- return -1;
- }
+ if (board->master == 0)
+ return -EINVAL;
board->interface->remote_enable(board, enable); /* set or clear REN */
if (!enable)
@@ -477,12 +447,11 @@ int ibsre(gpib_board_t *board, int enable)
* change the GPIB address of the interface board. The address
* must be 0 through 30. ibonl resets the address to PAD.
*/
-int ibpad(gpib_board_t *board, unsigned int addr)
+int ibpad(struct gpib_board *board, unsigned int addr)
{
- if (addr > MAX_GPIB_PRIMARY_ADDRESS) {
- pr_err("gpib: invalid primary address %u\n", addr);
- return -1;
- }
+ if (addr > MAX_GPIB_PRIMARY_ADDRESS)
+ return -EINVAL;
+
board->pad = addr;
if (board->online)
board->interface->primary_address(board, board->pad);
@@ -496,12 +465,10 @@ int ibpad(gpib_board_t *board, unsigned int addr)
* The address must be 0 through 30, or negative disables. ibonl resets the
* address to SAD.
*/
-int ibsad(gpib_board_t *board, int addr)
+int ibsad(struct gpib_board *board, int addr)
{
- if (addr > MAX_GPIB_SECONDARY_ADDRESS) {
- pr_err("gpib: invalid secondary address %i\n", addr);
- return -1;
- }
+ if (addr > MAX_GPIB_SECONDARY_ADDRESS)
+ return -EINVAL;
board->sad = addr;
if (board->online) {
if (board->sad >= 0)
@@ -519,14 +486,12 @@ int ibsad(gpib_board_t *board, int addr)
* Set the end-of-string modes for I/O operations to v.
*
*/
-int ibeos(gpib_board_t *board, int eos, int eosflags)
+int ibeos(struct gpib_board *board, int eos, int eosflags)
{
int retval;
- if (eosflags & ~EOS_MASK) {
- pr_err("bad EOS modes\n");
+ if (eosflags & ~EOS_MASK)
return -EINVAL;
- }
if (eosflags & REOS) {
retval = board->interface->enable_eos(board, eos, eosflags & BIN);
} else {
@@ -536,12 +501,12 @@ int ibeos(gpib_board_t *board, int eos, int eosflags)
return retval;
}
-int ibstatus(gpib_board_t *board)
+int ibstatus(struct gpib_board *board)
{
return general_ibstatus(board, NULL, 0, 0, NULL);
}
-int general_ibstatus(gpib_board_t *board, const gpib_status_queue_t *device,
+int general_ibstatus(struct gpib_board *board, const gpib_status_queue_t *device,
int clear_mask, int set_mask, gpib_descriptor_t *desc)
{
int status = 0;
@@ -555,8 +520,8 @@ int general_ibstatus(gpib_board_t *board, const gpib_status_queue_t *device,
status &= ~TIMO;
/* get real SRQI status if we can */
if (iblines(board, &line_status) == 0) {
- if ((line_status & ValidSRQ)) {
- if ((line_status & BusSRQ))
+ if ((line_status & VALID_SRQ)) {
+ if ((line_status & BUS_SRQ))
status |= SRQI;
else
status &= ~SRQI;
@@ -587,7 +552,7 @@ int general_ibstatus(gpib_board_t *board, const gpib_status_queue_t *device,
}
struct wait_info {
- gpib_board_t *board;
+ struct gpib_board *board;
struct timer_list timer;
int timed_out;
unsigned long usec_timeout;
@@ -611,7 +576,7 @@ static void init_wait_info(struct wait_info *winfo)
static int wait_satisfied(struct wait_info *winfo, gpib_status_queue_t *status_queue,
int wait_mask, int *status, gpib_descriptor_t *desc)
{
- gpib_board_t *board = winfo->board;
+ struct gpib_board *board = winfo->board;
int temp_status;
if (mutex_lock_interruptible(&board->big_gpib_mutex))
@@ -657,7 +622,7 @@ static void remove_wait_timer(struct wait_info *winfo)
* If the mask is 0 then
* no condition is waited for.
*/
-int ibwait(gpib_board_t *board, int wait_mask, int clear_mask, int set_mask,
+int ibwait(struct gpib_board *board, int wait_mask, int clear_mask, int set_mask,
int *status, unsigned long usec_timeout, gpib_descriptor_t *desc)
{
int retval = 0;
@@ -712,15 +677,13 @@ int ibwait(gpib_board_t *board, int wait_mask, int clear_mask, int set_mask,
* well as the interface board itself must be
* addressed by calling ibcmd.
*/
-int ibwrt(gpib_board_t *board, uint8_t *buf, size_t cnt, int send_eoi, size_t *bytes_written)
+int ibwrt(struct gpib_board *board, uint8_t *buf, size_t cnt, int send_eoi, size_t *bytes_written)
{
int ret = 0;
int retval;
- if (cnt == 0) {
- pr_warn("gpib: %s() called with zero length?\n", __func__);
+ if (cnt == 0)
return 0;
- }
if (board->master) {
retval = ibgts(board);
diff --git a/drivers/staging/gpib/common/ibsys.h b/drivers/staging/gpib/common/ibsys.h
index da20971e9c7e..19960af809c2 100644
--- a/drivers/staging/gpib/common/ibsys.h
+++ b/drivers/staging/gpib/common/ibsys.h
@@ -19,13 +19,13 @@
#define MAX_GPIB_PRIMARY_ADDRESS 30
#define MAX_GPIB_SECONDARY_ADDRESS 31
-int gpib_allocate_board(gpib_board_t *board);
-void gpib_deallocate_board(gpib_board_t *board);
+int gpib_allocate_board(struct gpib_board *board);
+void gpib_deallocate_board(struct gpib_board *board);
unsigned int num_status_bytes(const gpib_status_queue_t *dev);
-int push_status_byte(gpib_board_t *board, gpib_status_queue_t *device, uint8_t poll_byte);
-int pop_status_byte(gpib_board_t *board, gpib_status_queue_t *device, uint8_t *poll_byte);
-gpib_status_queue_t *get_gpib_status_queue(gpib_board_t *board, unsigned int pad, int sad);
-int get_serial_poll_byte(gpib_board_t *board, unsigned int pad, int sad,
+int push_status_byte(struct gpib_board *board, gpib_status_queue_t *device, uint8_t poll_byte);
+int pop_status_byte(struct gpib_board *board, gpib_status_queue_t *device, uint8_t *poll_byte);
+gpib_status_queue_t *get_gpib_status_queue(struct gpib_board *board, unsigned int pad, int sad);
+int get_serial_poll_byte(struct gpib_board *board, unsigned int pad, int sad,
unsigned int usec_timeout, uint8_t *poll_byte);
-int autopoll_all_devices(gpib_board_t *board);
+int autopoll_all_devices(struct gpib_board *board);
diff --git a/drivers/staging/gpib/eastwood/fluke_gpib.c b/drivers/staging/gpib/eastwood/fluke_gpib.c
index 0304c5de4ccd..a6b1ac169f94 100644
--- a/drivers/staging/gpib/eastwood/fluke_gpib.c
+++ b/drivers/staging/gpib/eastwood/fluke_gpib.c
@@ -7,6 +7,10 @@
* copyright: (C) 2006, 2010, 2015 Fluke Corporation
***************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define dev_fmt pr_fmt
+#define DRV_NAME KBUILD_MODNAME
+
#include "fluke_gpib.h"
#include "gpibP.h"
@@ -20,11 +24,11 @@
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GPIB Driver for Fluke cda devices");
-static int fluke_attach_holdoff_all(gpib_board_t *board, const gpib_board_config_t *config);
-static int fluke_attach_holdoff_end(gpib_board_t *board, const gpib_board_config_t *config);
-static void fluke_detach(gpib_board_t *board);
-static int fluke_config_dma(gpib_board_t *board, int output);
-static irqreturn_t fluke_gpib_internal_interrupt(gpib_board_t *board);
+static int fluke_attach_holdoff_all(struct gpib_board *board, const gpib_board_config_t *config);
+static int fluke_attach_holdoff_end(struct gpib_board *board, const gpib_board_config_t *config);
+static void fluke_detach(struct gpib_board *board);
+static int fluke_config_dma(struct gpib_board *board, int output);
+static irqreturn_t fluke_gpib_internal_interrupt(struct gpib_board *board);
static struct platform_device *fluke_gpib_pdev;
@@ -50,7 +54,7 @@ static void fluke_locking_write_byte(struct nec7210_priv *nec_priv, uint8_t byte
}
// wrappers for interface functions
-static int fluke_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end,
+static int fluke_read(struct gpib_board *board, uint8_t *buffer, size_t length, int *end,
size_t *bytes_read)
{
struct fluke_priv *priv = board->private_data;
@@ -58,7 +62,7 @@ static int fluke_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *
return nec7210_read(board, &priv->nec7210_priv, buffer, length, end, bytes_read);
}
-static int fluke_write(gpib_board_t *board, uint8_t *buffer, size_t length,
+static int fluke_write(struct gpib_board *board, uint8_t *buffer, size_t length,
int send_eoi, size_t *bytes_written)
{
struct fluke_priv *priv = board->private_data;
@@ -66,28 +70,29 @@ static int fluke_write(gpib_board_t *board, uint8_t *buffer, size_t length,
return nec7210_write(board, &priv->nec7210_priv, buffer, length, send_eoi, bytes_written);
}
-static int fluke_command(gpib_board_t *board, uint8_t *buffer, size_t length, size_t *bytes_written)
+static int fluke_command(struct gpib_board *board, uint8_t *buffer,
+ size_t length, size_t *bytes_written)
{
struct fluke_priv *priv = board->private_data;
return nec7210_command(board, &priv->nec7210_priv, buffer, length, bytes_written);
}
-static int fluke_take_control(gpib_board_t *board, int synchronous)
+static int fluke_take_control(struct gpib_board *board, int synchronous)
{
struct fluke_priv *priv = board->private_data;
return nec7210_take_control(board, &priv->nec7210_priv, synchronous);
}
-static int fluke_go_to_standby(gpib_board_t *board)
+static int fluke_go_to_standby(struct gpib_board *board)
{
struct fluke_priv *priv = board->private_data;
return nec7210_go_to_standby(board, &priv->nec7210_priv);
}
-static void fluke_request_system_control(gpib_board_t *board, int request_control)
+static void fluke_request_system_control(struct gpib_board *board, int request_control)
{
struct fluke_priv *priv = board->private_data;
struct nec7210_priv *nec_priv = &priv->nec7210_priv;
@@ -95,91 +100,91 @@ static void fluke_request_system_control(gpib_board_t *board, int request_contro
nec7210_request_system_control(board, nec_priv, request_control);
}
-static void fluke_interface_clear(gpib_board_t *board, int assert)
+static void fluke_interface_clear(struct gpib_board *board, int assert)
{
struct fluke_priv *priv = board->private_data;
nec7210_interface_clear(board, &priv->nec7210_priv, assert);
}
-static void fluke_remote_enable(gpib_board_t *board, int enable)
+static void fluke_remote_enable(struct gpib_board *board, int enable)
{
struct fluke_priv *priv = board->private_data;
nec7210_remote_enable(board, &priv->nec7210_priv, enable);
}
-static int fluke_enable_eos(gpib_board_t *board, uint8_t eos_byte, int compare_8_bits)
+static int fluke_enable_eos(struct gpib_board *board, uint8_t eos_byte, int compare_8_bits)
{
struct fluke_priv *priv = board->private_data;
return nec7210_enable_eos(board, &priv->nec7210_priv, eos_byte, compare_8_bits);
}
-static void fluke_disable_eos(gpib_board_t *board)
+static void fluke_disable_eos(struct gpib_board *board)
{
struct fluke_priv *priv = board->private_data;
nec7210_disable_eos(board, &priv->nec7210_priv);
}
-static unsigned int fluke_update_status(gpib_board_t *board, unsigned int clear_mask)
+static unsigned int fluke_update_status(struct gpib_board *board, unsigned int clear_mask)
{
struct fluke_priv *priv = board->private_data;
return nec7210_update_status(board, &priv->nec7210_priv, clear_mask);
}
-static int fluke_primary_address(gpib_board_t *board, unsigned int address)
+static int fluke_primary_address(struct gpib_board *board, unsigned int address)
{
struct fluke_priv *priv = board->private_data;
return nec7210_primary_address(board, &priv->nec7210_priv, address);
}
-static int fluke_secondary_address(gpib_board_t *board, unsigned int address, int enable)
+static int fluke_secondary_address(struct gpib_board *board, unsigned int address, int enable)
{
struct fluke_priv *priv = board->private_data;
return nec7210_secondary_address(board, &priv->nec7210_priv, address, enable);
}
-static int fluke_parallel_poll(gpib_board_t *board, uint8_t *result)
+static int fluke_parallel_poll(struct gpib_board *board, uint8_t *result)
{
struct fluke_priv *priv = board->private_data;
return nec7210_parallel_poll(board, &priv->nec7210_priv, result);
}
-static void fluke_parallel_poll_configure(gpib_board_t *board, uint8_t configuration)
+static void fluke_parallel_poll_configure(struct gpib_board *board, uint8_t configuration)
{
struct fluke_priv *priv = board->private_data;
nec7210_parallel_poll_configure(board, &priv->nec7210_priv, configuration);
}
-static void fluke_parallel_poll_response(gpib_board_t *board, int ist)
+static void fluke_parallel_poll_response(struct gpib_board *board, int ist)
{
struct fluke_priv *priv = board->private_data;
nec7210_parallel_poll_response(board, &priv->nec7210_priv, ist);
}
-static void fluke_serial_poll_response(gpib_board_t *board, uint8_t status)
+static void fluke_serial_poll_response(struct gpib_board *board, uint8_t status)
{
struct fluke_priv *priv = board->private_data;
nec7210_serial_poll_response(board, &priv->nec7210_priv, status);
}
-static uint8_t fluke_serial_poll_status(gpib_board_t *board)
+static uint8_t fluke_serial_poll_status(struct gpib_board *board)
{
struct fluke_priv *priv = board->private_data;
return nec7210_serial_poll_status(board, &priv->nec7210_priv);
}
-static void fluke_return_to_local(gpib_board_t *board)
+static void fluke_return_to_local(struct gpib_board *board)
{
struct fluke_priv *priv = board->private_data;
struct nec7210_priv *nec_priv = &priv->nec7210_priv;
@@ -189,39 +194,37 @@ static void fluke_return_to_local(gpib_board_t *board)
write_byte(nec_priv, AUX_RTL, AUXMR);
}
-static int fluke_line_status(const gpib_board_t *board)
+static int fluke_line_status(const struct gpib_board *board)
{
- int status = ValidALL;
+ int status = VALID_ALL;
int bsr_bits;
struct fluke_priv *e_priv;
- struct nec7210_priv *nec_priv;
e_priv = board->private_data;
- nec_priv = &e_priv->nec7210_priv;
bsr_bits = fluke_paged_read_byte(e_priv, BUS_STATUS, BUS_STATUS_PAGE);
if ((bsr_bits & BSR_REN_BIT) == 0)
- status |= BusREN;
+ status |= BUS_REN;
if ((bsr_bits & BSR_IFC_BIT) == 0)
- status |= BusIFC;
+ status |= BUS_IFC;
if ((bsr_bits & BSR_SRQ_BIT) == 0)
- status |= BusSRQ;
+ status |= BUS_SRQ;
if ((bsr_bits & BSR_EOI_BIT) == 0)
- status |= BusEOI;
+ status |= BUS_EOI;
if ((bsr_bits & BSR_NRFD_BIT) == 0)
- status |= BusNRFD;
+ status |= BUS_NRFD;
if ((bsr_bits & BSR_NDAC_BIT) == 0)
- status |= BusNDAC;
+ status |= BUS_NDAC;
if ((bsr_bits & BSR_DAV_BIT) == 0)
- status |= BusDAV;
+ status |= BUS_DAV;
if ((bsr_bits & BSR_ATN_BIT) == 0)
- status |= BusATN;
+ status |= BUS_ATN;
return status;
}
-static unsigned int fluke_t1_delay(gpib_board_t *board, unsigned int nano_sec)
+static int fluke_t1_delay(struct gpib_board *board, unsigned int nano_sec)
{
struct fluke_priv *e_priv = board->private_data;
struct nec7210_priv *nec_priv = &e_priv->nec7210_priv;
@@ -238,7 +241,7 @@ static unsigned int fluke_t1_delay(gpib_board_t *board, unsigned int nano_sec)
return retval;
}
-static int lacs_or_read_ready(gpib_board_t *board)
+static int lacs_or_read_ready(struct gpib_board *board)
{
const struct fluke_priv *e_priv = board->private_data;
const struct nec7210_priv *nec_priv = &e_priv->nec7210_priv;
@@ -254,7 +257,7 @@ static int lacs_or_read_ready(gpib_board_t *board)
/* Wait until it is possible for a read to do something useful. This
* is not essential, it only exists to prevent RFD holdoff from being released pointlessly.
*/
-static int wait_for_read(gpib_board_t *board)
+static int wait_for_read(struct gpib_board *board)
{
struct fluke_priv *e_priv = board->private_data;
struct nec7210_priv *nec_priv = &e_priv->nec7210_priv;
@@ -263,9 +266,9 @@ static int wait_for_read(gpib_board_t *board)
if (wait_event_interruptible(board->wait,
lacs_or_read_ready(board) ||
test_bit(DEV_CLEAR_BN, &nec_priv->state) ||
- test_bit(TIMO_NUM, &board->status))) {
+ test_bit(TIMO_NUM, &board->status)))
retval = -ERESTARTSYS;
- }
+
if (test_bit(TIMO_NUM, &board->status))
retval = -ETIMEDOUT;
if (test_and_clear_bit(DEV_CLEAR_BN, &nec_priv->state))
@@ -311,34 +314,30 @@ static int source_handshake_is_sids_or_sgns(struct fluke_priv *e_priv)
* If the chip is SGNS it is probably waiting for a a byte to
* be written to it.
*/
-static int wait_for_data_out_ready(gpib_board_t *board)
+static int wait_for_data_out_ready(struct gpib_board *board)
{
struct fluke_priv *e_priv = board->private_data;
struct nec7210_priv *nec_priv = &e_priv->nec7210_priv;
int retval = 0;
-// printk("%s: enter\n", __FUNCTION__);
if (wait_event_interruptible(board->wait,
(test_bit(TACS_NUM, &board->status) &&
source_handshake_is_sgns(e_priv)) ||
test_bit(DEV_CLEAR_BN, &nec_priv->state) ||
- test_bit(TIMO_NUM, &board->status))) {
+ test_bit(TIMO_NUM, &board->status)))
retval = -ERESTARTSYS;
- }
if (test_bit(TIMO_NUM, &board->status))
retval = -ETIMEDOUT;
if (test_and_clear_bit(DEV_CLEAR_BN, &nec_priv->state))
retval = -EINTR;
-// printk("%s: exit, retval=%i\n", __FUNCTION__, retval);
return retval;
}
-static int wait_for_sids_or_sgns(gpib_board_t *board)
+static int wait_for_sids_or_sgns(struct gpib_board *board)
{
struct fluke_priv *e_priv = board->private_data;
struct nec7210_priv *nec_priv = &e_priv->nec7210_priv;
int retval = 0;
-// printk("%s: enter\n", __FUNCTION__);
if (wait_event_interruptible(board->wait,
source_handshake_is_sids_or_sgns(e_priv) ||
@@ -350,19 +349,17 @@ static int wait_for_sids_or_sgns(gpib_board_t *board)
retval = -ETIMEDOUT;
if (test_and_clear_bit(DEV_CLEAR_BN, &nec_priv->state))
retval = -EINTR;
-// printk("%s: exit, retval=%i\n", __FUNCTION__, retval);
return retval;
}
static void fluke_dma_callback(void *arg)
{
- gpib_board_t *board = arg;
+ struct gpib_board *board = arg;
struct fluke_priv *e_priv = board->private_data;
struct nec7210_priv *nec_priv = &e_priv->nec7210_priv;
unsigned long flags;
spin_lock_irqsave(&board->spinlock, flags);
-// printk("%s: enter\n", __FUNCTION__);
nec7210_set_reg_bits(nec_priv, IMR1, HR_DOIE | HR_DIIE, HR_DOIE | HR_DIIE);
wake_up_interruptible(&board->wait);
@@ -370,11 +367,11 @@ static void fluke_dma_callback(void *arg)
fluke_gpib_internal_interrupt(board);
clear_bit(DMA_WRITE_IN_PROGRESS_BN, &nec_priv->state);
clear_bit(DMA_READ_IN_PROGRESS_BN, &nec_priv->state);
-// printk("%s: exit\n", __FUNCTION__);
+
spin_unlock_irqrestore(&board->spinlock, flags);
}
-static int fluke_dma_write(gpib_board_t *board, uint8_t *buffer, size_t length,
+static int fluke_dma_write(struct gpib_board *board, uint8_t *buffer, size_t length,
size_t *bytes_written)
{
struct fluke_priv *e_priv = board->private_data;
@@ -385,7 +382,7 @@ static int fluke_dma_write(gpib_board_t *board, uint8_t *buffer, size_t length,
struct dma_async_tx_descriptor *tx_desc;
*bytes_written = 0;
-// printk("%s: enter\n", __FUNCTION__);
+
if (WARN_ON_ONCE(length > e_priv->dma_buffer_size))
return -EFAULT;
dmaengine_terminate_all(e_priv->dma_channel);
@@ -403,7 +400,7 @@ static int fluke_dma_write(gpib_board_t *board, uint8_t *buffer, size_t length,
tx_desc = dmaengine_prep_slave_single(e_priv->dma_channel, address, length, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!tx_desc) {
- pr_err("fluke_gpib: failed to allocate dma transmit descriptor\n");
+ dev_err(board->gpib_dev, "failed to allocate dma transmit descriptor\n");
retval = -ENOMEM;
goto cleanup;
}
@@ -419,10 +416,8 @@ static int fluke_dma_write(gpib_board_t *board, uint8_t *buffer, size_t length,
clear_bit(WRITE_READY_BN, &nec_priv->state);
set_bit(DMA_WRITE_IN_PROGRESS_BN, &nec_priv->state);
- // printk("%s: in spin lock\n", __FUNCTION__);
spin_unlock_irqrestore(&board->spinlock, flags);
-// printk("%s: waiting for write.\n", __FUNCTION__);
// suspend until message is sent
if (wait_event_interruptible(board->wait,
((readl(e_priv->write_transfer_counter) &
@@ -430,7 +425,6 @@ static int fluke_dma_write(gpib_board_t *board, uint8_t *buffer, size_t length,
test_bit(BUS_ERROR_BN, &nec_priv->state) ||
test_bit(DEV_CLEAR_BN, &nec_priv->state) ||
test_bit(TIMO_NUM, &board->status))) {
- dev_dbg(board->gpib_dev, "gpib write interrupted!\n");
retval = -ERESTARTSYS;
}
if (test_bit(TIMO_NUM, &board->status))
@@ -459,11 +453,10 @@ static int fluke_dma_write(gpib_board_t *board, uint8_t *buffer, size_t length,
cleanup:
dma_unmap_single(board->dev, address, length, DMA_TO_DEVICE);
-// printk("%s: exit, retval=%d\n", __FUNCTION__, retval);
return retval;
}
-static int fluke_accel_write(gpib_board_t *board, uint8_t *buffer, size_t length,
+static int fluke_accel_write(struct gpib_board *board, uint8_t *buffer, size_t length,
int send_eoi, size_t *bytes_written)
{
struct fluke_priv *e_priv = board->private_data;
@@ -474,7 +467,7 @@ static int fluke_accel_write(gpib_board_t *board, uint8_t *buffer, size_t length
size_t dma_remainder = remainder;
if (!e_priv->dma_channel) {
- pr_err("fluke_gpib: No dma channel available, cannot do accel write.");
+ dev_err(board->gpib_dev, "No dma channel available, cannot do accel write.");
return -ENXIO;
}
@@ -486,7 +479,6 @@ static int fluke_accel_write(gpib_board_t *board, uint8_t *buffer, size_t length
if (send_eoi)
--dma_remainder;
-// printk("%s: entering while loop\n", __FUNCTION__);
while (dma_remainder > 0) {
size_t num_bytes;
@@ -512,7 +504,7 @@ static int fluke_accel_write(gpib_board_t *board, uint8_t *buffer, size_t length
//handle sending of last byte with eoi
if (send_eoi) {
size_t num_bytes;
- // printk("%s: handling last byte\n", __FUNCTION__);
+
if (WARN_ON_ONCE(remainder != 1))
return -EFAULT;
@@ -533,7 +525,6 @@ static int fluke_accel_write(gpib_board_t *board, uint8_t *buffer, size_t length
return retval;
remainder -= num_bytes;
}
-// printk("%s: bytes send=%i\n", __FUNCTION__, (int)(length - remainder));
return 0;
}
@@ -544,7 +535,7 @@ static int fluke_get_dma_residue(struct dma_chan *chan, dma_cookie_t cookie)
result = dmaengine_pause(chan);
if (result < 0) {
- pr_err("fluke_gpib: dma pause failed?\n");
+ pr_err("dma pause failed?\n");
return result;
}
dmaengine_tx_status(chan, cookie, &state);
@@ -553,7 +544,7 @@ static int fluke_get_dma_residue(struct dma_chan *chan, dma_cookie_t cookie)
return state.residue;
}
-static int fluke_dma_read(gpib_board_t *board, uint8_t *buffer,
+static int fluke_dma_read(struct gpib_board *board, uint8_t *buffer,
size_t length, int *end, size_t *bytes_read)
{
struct fluke_priv *e_priv = board->private_data;
@@ -567,10 +558,6 @@ static int fluke_dma_read(gpib_board_t *board, uint8_t *buffer,
int i;
static const int timeout = 10;
- // printk("%s: enter, bus_address=0x%x, length=%i\n", __FUNCTION__,
- // (unsigned)bus_address,
- // (int)length);
-
*bytes_read = 0;
*end = 0;
if (length == 0)
@@ -589,7 +576,7 @@ static int fluke_dma_read(gpib_board_t *board, uint8_t *buffer,
bus_address, length, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!tx_desc) {
- pr_err("fluke_gpib: failed to allocate dma transmit descriptor\n");
+ dev_err(board->gpib_dev, "failed to allocate dma transmit descriptor\n");
dma_unmap_single(NULL, bus_address, length, DMA_FROM_DEVICE);
return -EIO;
}
@@ -608,14 +595,12 @@ static int fluke_dma_read(gpib_board_t *board, uint8_t *buffer,
clear_bit(READ_READY_BN, &nec_priv->state);
spin_unlock_irqrestore(&board->spinlock, flags);
-// printk("waiting for data transfer.\n");
// wait for data to transfer
if (wait_event_interruptible(board->wait,
test_bit(DMA_READ_IN_PROGRESS_BN, &nec_priv->state) == 0 ||
test_bit(RECEIVED_END_BN, &nec_priv->state) ||
test_bit(DEV_CLEAR_BN, &nec_priv->state) ||
test_bit(TIMO_NUM, &board->status))) {
- pr_warn("fluke: dma read wait interrupted\n");
retval = -ERESTARTSYS;
}
if (test_bit(TIMO_NUM, &board->status))
@@ -672,7 +657,7 @@ static int fluke_dma_read(gpib_board_t *board, uint8_t *buffer,
return retval;
}
-static int fluke_accel_read(gpib_board_t *board, uint8_t *buffer, size_t length,
+static int fluke_accel_read(struct gpib_board *board, uint8_t *buffer, size_t length,
int *end, size_t *bytes_read)
{
struct fluke_priv *e_priv = board->private_data;
@@ -682,10 +667,6 @@ static int fluke_accel_read(gpib_board_t *board, uint8_t *buffer, size_t length,
int retval = 0;
size_t dma_nbytes;
-/* printk("%s: enter, buffer=0x%p, length=%i\n", __FUNCTION__,
- * buffer, (int)length);
- * printk("\t dma_buffer=0x%p\n", e_priv->dma_buffer);
- */
*end = 0;
*bytes_read = 0;
@@ -699,7 +680,6 @@ static int fluke_accel_read(gpib_board_t *board, uint8_t *buffer, size_t length,
nec7210_release_rfd_holdoff(board, nec_priv);
-// printk("%s: entering while loop\n", __FUNCTION__);
while (remain > 0) {
transfer_size = (e_priv->dma_buffer_size < remain) ?
e_priv->dma_buffer_size : remain;
@@ -709,14 +689,12 @@ static int fluke_accel_read(gpib_board_t *board, uint8_t *buffer, size_t length,
*bytes_read += dma_nbytes;
if (*end)
break;
- if (retval < 0) {
-// printk("%s: early exit, retval=%i\n", __FUNCTION__, (int)retval);
+ if (retval < 0)
return retval;
- }
if (need_resched())
schedule();
}
-// printk("%s: exit, retval=%i\n", __FUNCTION__, (int)retval);
+
return retval;
}
@@ -809,7 +787,7 @@ static gpib_interface_t fluke_interface = {
.return_to_local = fluke_return_to_local,
};
-irqreturn_t fluke_gpib_internal_interrupt(gpib_board_t *board)
+irqreturn_t fluke_gpib_internal_interrupt(struct gpib_board *board)
{
int status0, status1, status2;
struct fluke_priv *priv = board->private_data;
@@ -830,13 +808,6 @@ irqreturn_t fluke_gpib_internal_interrupt(gpib_board_t *board)
if (nec7210_interrupt_have_status(board, nec_priv, status1, status2) == IRQ_HANDLED)
retval = IRQ_HANDLED;
-/*
- * if((status1 & nec_priv->reg_bits[IMR1]) ||
- * (status2 & (nec_priv->reg_bits[IMR2] & IMR2_ENABLE_INTR_MASK)))
- * {
- * printk("fluke: status1 0x%x, status2 0x%x\n", status1, status2);
- * }
- */
if (read_byte(nec_priv, ADR0) & DATA_IN_STATUS) {
if (test_bit(RFD_HOLDOFF_BN, &nec_priv->state))
@@ -853,7 +824,7 @@ irqreturn_t fluke_gpib_internal_interrupt(gpib_board_t *board)
static irqreturn_t fluke_gpib_interrupt(int irq, void *arg)
{
- gpib_board_t *board = arg;
+ struct gpib_board *board = arg;
unsigned long flags;
irqreturn_t retval;
@@ -863,7 +834,7 @@ static irqreturn_t fluke_gpib_interrupt(int irq, void *arg)
return retval;
}
-static int fluke_allocate_private(gpib_board_t *board)
+static int fluke_allocate_private(struct gpib_board *board)
{
struct fluke_priv *priv;
@@ -880,7 +851,7 @@ static int fluke_allocate_private(gpib_board_t *board)
return 0;
}
-static void fluke_generic_detach(gpib_board_t *board)
+static void fluke_generic_detach(struct gpib_board *board)
{
if (board->private_data) {
struct fluke_priv *e_priv = board->private_data;
@@ -892,7 +863,7 @@ static void fluke_generic_detach(gpib_board_t *board)
}
// generic part of attach functions shared by all cb7210 boards
-static int fluke_generic_attach(gpib_board_t *board)
+static int fluke_generic_attach(struct gpib_board *board)
{
struct fluke_priv *e_priv;
struct nec7210_priv *nec_priv;
@@ -912,7 +883,7 @@ static int fluke_generic_attach(gpib_board_t *board)
return 0;
}
-static int fluke_config_dma(gpib_board_t *board, int output)
+static int fluke_config_dma(struct gpib_board *board, int output)
{
struct fluke_priv *e_priv = board->private_data;
struct dma_slave_config config;
@@ -937,7 +908,7 @@ static int fluke_config_dma(gpib_board_t *board, int output)
return dmaengine_slave_config(e_priv->dma_channel, &config);
}
-static int fluke_init(struct fluke_priv *e_priv, gpib_board_t *board, int handshake_mode)
+static int fluke_init(struct fluke_priv *e_priv, struct gpib_board *board, int handshake_mode)
{
struct nec7210_priv *nec_priv = &e_priv->nec7210_priv;
@@ -954,7 +925,7 @@ static int fluke_init(struct fluke_priv *e_priv, gpib_board_t *board, int handsh
/* poll so we can detect ATN changes */
if (gpib_request_pseudo_irq(board, fluke_gpib_interrupt)) {
- pr_err("fluke_gpib: failed to allocate pseudo_irq\n");
+ dev_err(board->gpib_dev, "failed to allocate pseudo_irq\n");
return -EINVAL;
}
@@ -972,7 +943,7 @@ static bool gpib_dma_channel_filter(struct dma_chan *chan, void *filter_param)
return chan->chan_id == 0;
}
-static int fluke_attach_impl(gpib_board_t *board, const gpib_board_config_t *config,
+static int fluke_attach_impl(struct gpib_board *board, const gpib_board_config_t *config,
unsigned int handshake_mode)
{
struct fluke_priv *e_priv;
@@ -984,7 +955,7 @@ static int fluke_attach_impl(gpib_board_t *board, const gpib_board_config_t *con
dma_cap_mask_t dma_cap;
if (!fluke_gpib_pdev) {
- pr_err("No gpib platform device was found, attach failed.\n");
+ dev_err(board->gpib_dev, "No fluke device was found, attach failed.\n");
return -ENODEV;
}
@@ -999,7 +970,7 @@ static int fluke_attach_impl(gpib_board_t *board, const gpib_board_config_t *con
res = platform_get_resource(fluke_gpib_pdev, IORESOURCE_MEM, 0);
if (!res) {
- dev_err(&fluke_gpib_pdev->dev, "Unable to locate mmio resource for cb7210 gpib\n");
+ dev_err(&fluke_gpib_pdev->dev, "Unable to locate mmio resource\n");
return -ENODEV;
}
@@ -1012,10 +983,7 @@ static int fluke_attach_impl(gpib_board_t *board, const gpib_board_config_t *con
e_priv->gpib_iomem_res = res;
nec_priv->mmiobase = ioremap(e_priv->gpib_iomem_res->start,
- resource_size(e_priv->gpib_iomem_res));
- pr_info("gpib: mmiobase %llx remapped to %p, length=%d\n",
- (u64)e_priv->gpib_iomem_res->start,
- nec_priv->mmiobase, (int)resource_size(e_priv->gpib_iomem_res));
+ resource_size(e_priv->gpib_iomem_res));
if (!nec_priv->mmiobase) {
dev_err(&fluke_gpib_pdev->dev, "Could not map I/O memory\n");
return -ENOMEM;
@@ -1050,19 +1018,14 @@ static int fluke_attach_impl(gpib_board_t *board, const gpib_board_config_t *con
e_priv->write_transfer_counter = ioremap(e_priv->write_transfer_counter_res->start,
resource_size(e_priv->write_transfer_counter_res));
- pr_info("gpib: write transfer counter %lx remapped to %p, length=%d\n",
- (unsigned long)e_priv->write_transfer_counter_res->start,
- e_priv->write_transfer_counter,
- (int)resource_size(e_priv->write_transfer_counter_res));
if (!e_priv->write_transfer_counter) {
dev_err(&fluke_gpib_pdev->dev, "Could not map I/O memory\n");
return -ENOMEM;
}
irq = platform_get_irq(fluke_gpib_pdev, 0);
- pr_info("gpib: irq %d\n", irq);
if (irq < 0) {
- dev_err(&fluke_gpib_pdev->dev, "fluke_gpib: request for IRQ failed\n");
+ dev_err(&fluke_gpib_pdev->dev, "failed to obtain IRQ\n");
return -EBUSY;
}
retval = request_irq(irq, fluke_gpib_interrupt, isr_flags, fluke_gpib_pdev->name, board);
@@ -1078,7 +1041,7 @@ static int fluke_attach_impl(gpib_board_t *board, const gpib_board_config_t *con
dma_cap_set(DMA_SLAVE, dma_cap);
e_priv->dma_channel = dma_request_channel(dma_cap, gpib_dma_channel_filter, NULL);
if (!e_priv->dma_channel) {
- pr_err("fluke_gpib: failed to allocate a dma channel.\n");
+ dev_err(board->gpib_dev, "failed to allocate a dma channel.\n");
// we don't error out here because unaccel interface will still
// work without dma
}
@@ -1086,17 +1049,17 @@ static int fluke_attach_impl(gpib_board_t *board, const gpib_board_config_t *con
return fluke_init(e_priv, board, handshake_mode);
}
-int fluke_attach_holdoff_all(gpib_board_t *board, const gpib_board_config_t *config)
+int fluke_attach_holdoff_all(struct gpib_board *board, const gpib_board_config_t *config)
{
return fluke_attach_impl(board, config, HR_HLDA);
}
-int fluke_attach_holdoff_end(gpib_board_t *board, const gpib_board_config_t *config)
+int fluke_attach_holdoff_end(struct gpib_board *board, const gpib_board_config_t *config)
{
return fluke_attach_impl(board, config, HR_HLDE);
}
-void fluke_detach(gpib_board_t *board)
+void fluke_detach(struct gpib_board *board)
{
struct fluke_priv *e_priv = board->private_data;
struct nec7210_priv *nec_priv;
@@ -1142,8 +1105,7 @@ MODULE_DEVICE_TABLE(of, fluke_gpib_of_match);
static struct platform_driver fluke_gpib_platform_driver = {
.driver = {
- .name = "fluke_gpib",
- .owner = THIS_MODULE,
+ .name = DRV_NAME,
.of_match_table = fluke_gpib_of_match,
},
.probe = &fluke_gpib_probe
@@ -1155,25 +1117,25 @@ static int __init fluke_init_module(void)
result = platform_driver_register(&fluke_gpib_platform_driver);
if (result) {
- pr_err("fluke_gpib: platform_driver_register failed: error = %d\n", result);
+ pr_err("platform_driver_register failed: error = %d\n", result);
return result;
}
result = gpib_register_driver(&fluke_unaccel_interface, THIS_MODULE);
if (result) {
- pr_err("fluke_gpib: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
goto err_unaccel;
}
result = gpib_register_driver(&fluke_hybrid_interface, THIS_MODULE);
if (result) {
- pr_err("fluke_gpib: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
goto err_hybrid;
}
result = gpib_register_driver(&fluke_interface, THIS_MODULE);
if (result) {
- pr_err("fluke_gpib: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
goto err_interface;
}
diff --git a/drivers/staging/gpib/fmh_gpib/fmh_gpib.c b/drivers/staging/gpib/fmh_gpib/fmh_gpib.c
index f950e7cdd8f8..53f4b3fccc3c 100644
--- a/drivers/staging/gpib/fmh_gpib/fmh_gpib.c
+++ b/drivers/staging/gpib/fmh_gpib/fmh_gpib.c
@@ -12,6 +12,10 @@
* (C) 2017 Frank Mori Hess
***************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define dev_fmt pr_fmt
+#define DRV_NAME KBUILD_MODNAME
+
#include "fmh_gpib.h"
#include "gpibP.h"
@@ -28,19 +32,21 @@ MODULE_DESCRIPTION("GPIB Driver for fmh_gpib_core");
MODULE_AUTHOR("Frank Mori Hess <fmh6jj@gmail.com>");
static irqreturn_t fmh_gpib_interrupt(int irq, void *arg);
-static int fmh_gpib_attach_holdoff_all(gpib_board_t *board, const gpib_board_config_t *config);
-static int fmh_gpib_attach_holdoff_end(gpib_board_t *board, const gpib_board_config_t *config);
-static void fmh_gpib_detach(gpib_board_t *board);
-static int fmh_gpib_pci_attach_holdoff_all(gpib_board_t *board, const gpib_board_config_t *config);
-static int fmh_gpib_pci_attach_holdoff_end(gpib_board_t *board, const gpib_board_config_t *config);
-static void fmh_gpib_pci_detach(gpib_board_t *board);
-static int fmh_gpib_config_dma(gpib_board_t *board, int output);
-static irqreturn_t fmh_gpib_internal_interrupt(gpib_board_t *board);
+static int fmh_gpib_attach_holdoff_all(struct gpib_board *board, const gpib_board_config_t *config);
+static int fmh_gpib_attach_holdoff_end(struct gpib_board *board, const gpib_board_config_t *config);
+static void fmh_gpib_detach(struct gpib_board *board);
+static int fmh_gpib_pci_attach_holdoff_all(struct gpib_board *board,
+ const gpib_board_config_t *config);
+static int fmh_gpib_pci_attach_holdoff_end(struct gpib_board *board,
+ const gpib_board_config_t *config);
+static void fmh_gpib_pci_detach(struct gpib_board *board);
+static int fmh_gpib_config_dma(struct gpib_board *board, int output);
+static irqreturn_t fmh_gpib_internal_interrupt(struct gpib_board *board);
static struct platform_driver fmh_gpib_platform_driver;
static struct pci_driver fmh_gpib_pci_driver;
// wrappers for interface functions
-static int fmh_gpib_read(gpib_board_t *board, uint8_t *buffer, size_t length,
+static int fmh_gpib_read(struct gpib_board *board, uint8_t *buffer, size_t length,
int *end, size_t *bytes_read)
{
struct fmh_priv *priv = board->private_data;
@@ -48,7 +54,7 @@ static int fmh_gpib_read(gpib_board_t *board, uint8_t *buffer, size_t length,
return nec7210_read(board, &priv->nec7210_priv, buffer, length, end, bytes_read);
}
-static int fmh_gpib_write(gpib_board_t *board, uint8_t *buffer, size_t length,
+static int fmh_gpib_write(struct gpib_board *board, uint8_t *buffer, size_t length,
int send_eoi, size_t *bytes_written)
{
struct fmh_priv *priv = board->private_data;
@@ -56,7 +62,7 @@ static int fmh_gpib_write(gpib_board_t *board, uint8_t *buffer, size_t length,
return nec7210_write(board, &priv->nec7210_priv, buffer, length, send_eoi, bytes_written);
}
-static int fmh_gpib_command(gpib_board_t *board, uint8_t *buffer, size_t length,
+static int fmh_gpib_command(struct gpib_board *board, uint8_t *buffer, size_t length,
size_t *bytes_written)
{
struct fmh_priv *priv = board->private_data;
@@ -64,21 +70,21 @@ static int fmh_gpib_command(gpib_board_t *board, uint8_t *buffer, size_t length,
return nec7210_command(board, &priv->nec7210_priv, buffer, length, bytes_written);
}
-static int fmh_gpib_take_control(gpib_board_t *board, int synchronous)
+static int fmh_gpib_take_control(struct gpib_board *board, int synchronous)
{
struct fmh_priv *priv = board->private_data;
return nec7210_take_control(board, &priv->nec7210_priv, synchronous);
}
-static int fmh_gpib_go_to_standby(gpib_board_t *board)
+static int fmh_gpib_go_to_standby(struct gpib_board *board)
{
struct fmh_priv *priv = board->private_data;
return nec7210_go_to_standby(board, &priv->nec7210_priv);
}
-static void fmh_gpib_request_system_control(gpib_board_t *board, int request_control)
+static void fmh_gpib_request_system_control(struct gpib_board *board, int request_control)
{
struct fmh_priv *priv = board->private_data;
struct nec7210_priv *nec_priv = &priv->nec7210_priv;
@@ -86,77 +92,77 @@ static void fmh_gpib_request_system_control(gpib_board_t *board, int request_con
nec7210_request_system_control(board, nec_priv, request_control);
}
-static void fmh_gpib_interface_clear(gpib_board_t *board, int assert)
+static void fmh_gpib_interface_clear(struct gpib_board *board, int assert)
{
struct fmh_priv *priv = board->private_data;
nec7210_interface_clear(board, &priv->nec7210_priv, assert);
}
-static void fmh_gpib_remote_enable(gpib_board_t *board, int enable)
+static void fmh_gpib_remote_enable(struct gpib_board *board, int enable)
{
struct fmh_priv *priv = board->private_data;
nec7210_remote_enable(board, &priv->nec7210_priv, enable);
}
-static int fmh_gpib_enable_eos(gpib_board_t *board, uint8_t eos_byte, int compare_8_bits)
+static int fmh_gpib_enable_eos(struct gpib_board *board, uint8_t eos_byte, int compare_8_bits)
{
struct fmh_priv *priv = board->private_data;
return nec7210_enable_eos(board, &priv->nec7210_priv, eos_byte, compare_8_bits);
}
-static void fmh_gpib_disable_eos(gpib_board_t *board)
+static void fmh_gpib_disable_eos(struct gpib_board *board)
{
struct fmh_priv *priv = board->private_data;
nec7210_disable_eos(board, &priv->nec7210_priv);
}
-static unsigned int fmh_gpib_update_status(gpib_board_t *board, unsigned int clear_mask)
+static unsigned int fmh_gpib_update_status(struct gpib_board *board, unsigned int clear_mask)
{
struct fmh_priv *priv = board->private_data;
return nec7210_update_status(board, &priv->nec7210_priv, clear_mask);
}
-static int fmh_gpib_primary_address(gpib_board_t *board, unsigned int address)
+static int fmh_gpib_primary_address(struct gpib_board *board, unsigned int address)
{
struct fmh_priv *priv = board->private_data;
return nec7210_primary_address(board, &priv->nec7210_priv, address);
}
-static int fmh_gpib_secondary_address(gpib_board_t *board, unsigned int address, int enable)
+static int fmh_gpib_secondary_address(struct gpib_board *board, unsigned int address, int enable)
{
struct fmh_priv *priv = board->private_data;
return nec7210_secondary_address(board, &priv->nec7210_priv, address, enable);
}
-static int fmh_gpib_parallel_poll(gpib_board_t *board, uint8_t *result)
+static int fmh_gpib_parallel_poll(struct gpib_board *board, uint8_t *result)
{
struct fmh_priv *priv = board->private_data;
return nec7210_parallel_poll(board, &priv->nec7210_priv, result);
}
-static void fmh_gpib_parallel_poll_configure(gpib_board_t *board, uint8_t configuration)
+static void fmh_gpib_parallel_poll_configure(struct gpib_board *board, uint8_t configuration)
{
struct fmh_priv *priv = board->private_data;
nec7210_parallel_poll_configure(board, &priv->nec7210_priv, configuration);
}
-static void fmh_gpib_parallel_poll_response(gpib_board_t *board, int ist)
+static void fmh_gpib_parallel_poll_response(struct gpib_board *board, int ist)
{
struct fmh_priv *priv = board->private_data;
nec7210_parallel_poll_response(board, &priv->nec7210_priv, ist);
}
-static void fmh_gpib_local_parallel_poll_mode(gpib_board_t *board, int local)
+static void fmh_gpib_local_parallel_poll_mode(struct gpib_board *board, int local)
{
struct fmh_priv *priv = board->private_data;
@@ -171,7 +177,7 @@ static void fmh_gpib_local_parallel_poll_mode(gpib_board_t *board, int local)
}
}
-static void fmh_gpib_serial_poll_response2(gpib_board_t *board, uint8_t status,
+static void fmh_gpib_serial_poll_response2(struct gpib_board *board, uint8_t status,
int new_reason_for_service)
{
struct fmh_priv *priv = board->private_data;
@@ -206,14 +212,14 @@ static void fmh_gpib_serial_poll_response2(gpib_board_t *board, uint8_t status,
spin_unlock_irqrestore(&board->spinlock, flags);
}
-static uint8_t fmh_gpib_serial_poll_status(gpib_board_t *board)
+static uint8_t fmh_gpib_serial_poll_status(struct gpib_board *board)
{
struct fmh_priv *priv = board->private_data;
return nec7210_serial_poll_status(board, &priv->nec7210_priv);
}
-static void fmh_gpib_return_to_local(gpib_board_t *board)
+static void fmh_gpib_return_to_local(struct gpib_board *board)
{
struct fmh_priv *priv = board->private_data;
struct nec7210_priv *nec_priv = &priv->nec7210_priv;
@@ -223,9 +229,9 @@ static void fmh_gpib_return_to_local(gpib_board_t *board)
write_byte(nec_priv, AUX_RTL, AUXMR);
}
-static int fmh_gpib_line_status(const gpib_board_t *board)
+static int fmh_gpib_line_status(const struct gpib_board *board)
{
- int status = ValidALL;
+ int status = VALID_ALL;
int bsr_bits;
struct fmh_priv *e_priv;
struct nec7210_priv *nec_priv;
@@ -236,26 +242,26 @@ static int fmh_gpib_line_status(const gpib_board_t *board)
bsr_bits = read_byte(nec_priv, BUS_STATUS_REG);
if ((bsr_bits & BSR_REN_BIT) == 0)
- status |= BusREN;
+ status |= BUS_REN;
if ((bsr_bits & BSR_IFC_BIT) == 0)
- status |= BusIFC;
+ status |= BUS_IFC;
if ((bsr_bits & BSR_SRQ_BIT) == 0)
- status |= BusSRQ;
+ status |= BUS_SRQ;
if ((bsr_bits & BSR_EOI_BIT) == 0)
- status |= BusEOI;
+ status |= BUS_EOI;
if ((bsr_bits & BSR_NRFD_BIT) == 0)
- status |= BusNRFD;
+ status |= BUS_NRFD;
if ((bsr_bits & BSR_NDAC_BIT) == 0)
- status |= BusNDAC;
+ status |= BUS_NDAC;
if ((bsr_bits & BSR_DAV_BIT) == 0)
- status |= BusDAV;
+ status |= BUS_DAV;
if ((bsr_bits & BSR_ATN_BIT) == 0)
- status |= BusATN;
+ status |= BUS_ATN;
return status;
}
-static unsigned int fmh_gpib_t1_delay(gpib_board_t *board, unsigned int nano_sec)
+static int fmh_gpib_t1_delay(struct gpib_board *board, unsigned int nano_sec)
{
struct fmh_priv *e_priv = board->private_data;
struct nec7210_priv *nec_priv = &e_priv->nec7210_priv;
@@ -272,7 +278,7 @@ static unsigned int fmh_gpib_t1_delay(gpib_board_t *board, unsigned int nano_sec
return retval;
}
-static int lacs_or_read_ready(gpib_board_t *board)
+static int lacs_or_read_ready(struct gpib_board *board)
{
const struct fmh_priv *e_priv = board->private_data;
const struct nec7210_priv *nec_priv = &e_priv->nec7210_priv;
@@ -287,7 +293,7 @@ static int lacs_or_read_ready(gpib_board_t *board)
return retval;
}
-static int wait_for_read(gpib_board_t *board)
+static int wait_for_read(struct gpib_board *board)
{
struct fmh_priv *e_priv = board->private_data;
struct nec7210_priv *nec_priv = &e_priv->nec7210_priv;
@@ -306,7 +312,7 @@ static int wait_for_read(gpib_board_t *board)
return retval;
}
-static int wait_for_rx_fifo_half_full_or_end(gpib_board_t *board)
+static int wait_for_rx_fifo_half_full_or_end(struct gpib_board *board)
{
struct fmh_priv *e_priv = board->private_data;
struct nec7210_priv *nec_priv = &e_priv->nec7210_priv;
@@ -329,12 +335,11 @@ static int wait_for_rx_fifo_half_full_or_end(gpib_board_t *board)
/* Wait until the gpib chip is ready to accept a data out byte.
*/
-static int wait_for_data_out_ready(gpib_board_t *board)
+static int wait_for_data_out_ready(struct gpib_board *board)
{
struct fmh_priv *e_priv = board->private_data;
struct nec7210_priv *nec_priv = &e_priv->nec7210_priv;
int retval = 0;
-// printk("%s: enter\n", __FUNCTION__);
if (wait_event_interruptible(board->wait,
(test_bit(TACS_NUM, &board->status) &&
@@ -348,19 +353,18 @@ static int wait_for_data_out_ready(gpib_board_t *board)
retval = -ETIMEDOUT;
if (test_and_clear_bit(DEV_CLEAR_BN, &nec_priv->state))
retval = -EINTR;
-// printk("%s: exit, retval=%i\n", __FUNCTION__, retval);
+
return retval;
}
static void fmh_gpib_dma_callback(void *arg)
{
- gpib_board_t *board = arg;
+ struct gpib_board *board = arg;
struct fmh_priv *e_priv = board->private_data;
struct nec7210_priv *nec_priv = &e_priv->nec7210_priv;
unsigned long flags;
spin_lock_irqsave(&board->spinlock, flags);
-// printk("%s: enter\n", __FUNCTION__);
nec7210_set_reg_bits(nec_priv, IMR1, HR_DOIE | HR_DIIE, HR_DOIE | HR_DIIE);
wake_up_interruptible(&board->wait);
@@ -370,7 +374,6 @@ static void fmh_gpib_dma_callback(void *arg)
clear_bit(DMA_WRITE_IN_PROGRESS_BN, &nec_priv->state);
clear_bit(DMA_READ_IN_PROGRESS_BN, &nec_priv->state);
- // printk("%s: exit\n", __FUNCTION__);
spin_unlock_irqrestore(&board->spinlock, flags);
}
@@ -388,7 +391,7 @@ static int fmh_gpib_all_bytes_are_sent(struct fmh_priv *e_priv)
return 1;
}
-static int fmh_gpib_dma_write(gpib_board_t *board, uint8_t *buffer, size_t length,
+static int fmh_gpib_dma_write(struct gpib_board *board, uint8_t *buffer, size_t length,
size_t *bytes_written)
{
struct fmh_priv *e_priv = board->private_data;
@@ -399,14 +402,13 @@ static int fmh_gpib_dma_write(gpib_board_t *board, uint8_t *buffer, size_t lengt
struct dma_async_tx_descriptor *tx_desc;
*bytes_written = 0;
-// printk("%s: enter\n", __FUNCTION__);
if (WARN_ON_ONCE(length > e_priv->dma_buffer_size))
return -EFAULT;
dmaengine_terminate_all(e_priv->dma_channel);
memcpy(e_priv->dma_buffer, buffer, length);
address = dma_map_single(board->dev, e_priv->dma_buffer, length, DMA_TO_DEVICE);
if (dma_mapping_error(board->dev, address))
- pr_err("dma mapping error in dma write!\n");
+ dev_err(board->gpib_dev, "dma mapping error in dma write!\n");
/* program dma controller */
retval = fmh_gpib_config_dma(board, 1);
if (retval)
@@ -415,7 +417,7 @@ static int fmh_gpib_dma_write(gpib_board_t *board, uint8_t *buffer, size_t lengt
tx_desc = dmaengine_prep_slave_single(e_priv->dma_channel, address, length, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!tx_desc) {
- pr_err("fmh_gpib_gpib: failed to allocate dma transmit descriptor\n");
+ dev_err(board->gpib_dev, "failed to allocate dma transmit descriptor\n");
retval = -ENOMEM;
goto cleanup;
}
@@ -432,19 +434,17 @@ static int fmh_gpib_dma_write(gpib_board_t *board, uint8_t *buffer, size_t lengt
dma_async_issue_pending(e_priv->dma_channel);
clear_bit(WRITE_READY_BN, &nec_priv->state);
set_bit(DMA_WRITE_IN_PROGRESS_BN, &nec_priv->state);
-// printk("%s: in spin lock\n", __FUNCTION__);
+
spin_unlock_irqrestore(&board->spinlock, flags);
-// printk("%s: waiting for write.\n", __FUNCTION__);
// suspend until message is sent
if (wait_event_interruptible(board->wait,
fmh_gpib_all_bytes_are_sent(e_priv) ||
test_bit(BUS_ERROR_BN, &nec_priv->state) ||
test_bit(DEV_CLEAR_BN, &nec_priv->state) ||
- test_bit(TIMO_NUM, &board->status))) {
- dev_dbg(board->gpib_dev, "gpib write interrupted!\n");
+ test_bit(TIMO_NUM, &board->status)))
retval = -ERESTARTSYS;
- }
+
if (test_bit(TIMO_NUM, &board->status))
retval = -ETIMEDOUT;
if (test_and_clear_bit(DEV_CLEAR_BN, &nec_priv->state))
@@ -464,16 +464,12 @@ static int fmh_gpib_dma_write(gpib_board_t *board, uint8_t *buffer, size_t lengt
fifo_xfer_counter_mask);
if (WARN_ON_ONCE(*bytes_written > length))
return -EFAULT;
- /* printk("length=%i, *bytes_written=%i, residue=%i, retval=%i\n",
- * length, *bytes_written, get_dma_residue(e_priv->dma_channel), retval);
- */
cleanup:
dma_unmap_single(board->dev, address, length, DMA_TO_DEVICE);
-// printk("%s: exit, retval=%d\n", __FUNCTION__, retval);
return retval;
}
-static int fmh_gpib_accel_write(gpib_board_t *board, uint8_t *buffer,
+static int fmh_gpib_accel_write(struct gpib_board *board, uint8_t *buffer,
size_t length, int send_eoi, size_t *bytes_written)
{
struct fmh_priv *e_priv = board->private_data;
@@ -484,7 +480,7 @@ static int fmh_gpib_accel_write(gpib_board_t *board, uint8_t *buffer,
size_t dma_remainder = remainder;
if (!e_priv->dma_channel) {
- pr_err("fmh_gpib_gpib: No dma channel available, cannot do accel write.");
+ dev_err(board->gpib_dev, "No dma channel available, cannot do accel write.");
return -ENXIO;
}
@@ -498,7 +494,6 @@ static int fmh_gpib_accel_write(gpib_board_t *board, uint8_t *buffer,
if (send_eoi)
--dma_remainder;
-// printk("%s: entering while loop\n", __FUNCTION__);
while (dma_remainder > 0) {
size_t num_bytes;
@@ -524,7 +519,7 @@ static int fmh_gpib_accel_write(gpib_board_t *board, uint8_t *buffer,
//handle sending of last byte with eoi
if (send_eoi) {
size_t num_bytes;
- // printk("%s: handling last byte\n", __FUNCTION__);
+
if (WARN_ON_ONCE(remainder != 1))
return -EFAULT;
@@ -545,7 +540,6 @@ static int fmh_gpib_accel_write(gpib_board_t *board, uint8_t *buffer,
return retval;
remainder -= num_bytes;
}
-// printk("%s: bytes send=%i\n", __FUNCTION__, (int)(length - remainder));
return 0;
}
@@ -556,7 +550,7 @@ static int fmh_gpib_get_dma_residue(struct dma_chan *chan, dma_cookie_t cookie)
result = dmaengine_pause(chan);
if (result < 0) {
- pr_err("fmh_gpib_gpib: dma pause failed?\n");
+ pr_err("dma pause failed?\n");
return result;
}
dmaengine_tx_status(chan, cookie, &state);
@@ -565,12 +559,11 @@ static int fmh_gpib_get_dma_residue(struct dma_chan *chan, dma_cookie_t cookie)
return state.residue;
}
-static int wait_for_tx_fifo_half_empty(gpib_board_t *board)
+static int wait_for_tx_fifo_half_empty(struct gpib_board *board)
{
struct fmh_priv *e_priv = board->private_data;
struct nec7210_priv *nec_priv = &e_priv->nec7210_priv;
int retval = 0;
-// printk("%s: enter\n", __FUNCTION__);
if (wait_event_interruptible(board->wait,
(test_bit(TACS_NUM, &board->status) &&
@@ -584,14 +577,14 @@ static int wait_for_tx_fifo_half_empty(gpib_board_t *board)
retval = -ETIMEDOUT;
if (test_and_clear_bit(DEV_CLEAR_BN, &nec_priv->state))
retval = -EINTR;
-// printk("%s: exit, retval=%i\n", __FUNCTION__, retval);
+
return retval;
}
/* supports writing a chunk of data whose length must fit into the hardware'd xfer counter,
* called in a loop by fmh_gpib_fifo_write()
*/
-static int fmh_gpib_fifo_write_countable(gpib_board_t *board, uint8_t *buffer,
+static int fmh_gpib_fifo_write_countable(struct gpib_board *board, uint8_t *buffer,
size_t length, int send_eoi, size_t *bytes_written)
{
struct fmh_priv *e_priv = board->private_data;
@@ -600,7 +593,6 @@ static int fmh_gpib_fifo_write_countable(gpib_board_t *board, uint8_t *buffer,
unsigned int remainder;
*bytes_written = 0;
-// printk("%s: enter\n", __FUNCTION__);
if (WARN_ON_ONCE(length > fifo_xfer_counter_mask))
return -EFAULT;
@@ -635,10 +627,9 @@ static int fmh_gpib_fifo_write_countable(gpib_board_t *board, uint8_t *buffer,
fmh_gpib_all_bytes_are_sent(e_priv) ||
test_bit(BUS_ERROR_BN, &nec_priv->state) ||
test_bit(DEV_CLEAR_BN, &nec_priv->state) ||
- test_bit(TIMO_NUM, &board->status))) {
- dev_dbg(board->gpib_dev, "gpib write interrupted!\n");
+ test_bit(TIMO_NUM, &board->status)))
retval = -ERESTARTSYS;
- }
+
if (test_bit(TIMO_NUM, &board->status))
retval = -ETIMEDOUT;
if (test_and_clear_bit(DEV_CLEAR_BN, &nec_priv->state))
@@ -655,15 +646,11 @@ cleanup:
fifo_xfer_counter_mask);
if (WARN_ON_ONCE(*bytes_written > length))
return -EFAULT;
- /* printk("length=%i, *bytes_written=%i, residue=%i, retval=%i\n",
- * length, *bytes_written, get_dma_residue(e_priv->dma_channel), retval);
- */
-// printk("%s: exit, retval=%d\n", __FUNCTION__, retval);
return retval;
}
-static int fmh_gpib_fifo_write(gpib_board_t *board, uint8_t *buffer, size_t length,
+static int fmh_gpib_fifo_write(struct gpib_board *board, uint8_t *buffer, size_t length,
int send_eoi, size_t *bytes_written)
{
struct fmh_priv *e_priv = board->private_data;
@@ -678,8 +665,6 @@ static int fmh_gpib_fifo_write(gpib_board_t *board, uint8_t *buffer, size_t leng
clear_bit(DEV_CLEAR_BN, &nec_priv->state); // XXX FIXME
-// printk("%s: entering while loop\n", __FUNCTION__);
-
while (remainder > 0) {
size_t num_bytes;
int last_pass;
@@ -708,11 +693,11 @@ static int fmh_gpib_fifo_write(gpib_board_t *board, uint8_t *buffer, size_t leng
if (need_resched())
schedule();
}
-// printk("%s: bytes send=%i\n", __FUNCTION__, (int)(length - remainder));
+
return retval;
}
-static int fmh_gpib_dma_read(gpib_board_t *board, uint8_t *buffer,
+static int fmh_gpib_dma_read(struct gpib_board *board, uint8_t *buffer,
size_t length, int *end, size_t *bytes_read)
{
struct fmh_priv *e_priv = board->private_data;
@@ -725,10 +710,6 @@ static int fmh_gpib_dma_read(gpib_board_t *board, uint8_t *buffer,
struct dma_async_tx_descriptor *tx_desc;
dma_cookie_t dma_cookie;
- // printk("%s: enter, bus_address=0x%x, length=%i\n", __FUNCTION__,
- //(unsigned)bus_address,
-// (int)length);
-
*bytes_read = 0;
*end = 0;
if (length == 0)
@@ -737,7 +718,7 @@ static int fmh_gpib_dma_read(gpib_board_t *board, uint8_t *buffer,
bus_address = dma_map_single(board->dev, e_priv->dma_buffer,
length, DMA_FROM_DEVICE);
if (dma_mapping_error(board->dev, bus_address))
- pr_err("dma mapping error in dma read!");
+ dev_err(board->gpib_dev, "dma mapping error in dma read!");
/* program dma controller */
retval = fmh_gpib_config_dma(board, 0);
@@ -749,7 +730,7 @@ static int fmh_gpib_dma_read(gpib_board_t *board, uint8_t *buffer,
length, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!tx_desc) {
- pr_err("fmh_gpib_gpib: failed to allocate dma transmit descriptor\n");
+ dev_err(board->gpib_dev, "failed to allocate dma transmit descriptor\n");
dma_unmap_single(board->dev, bus_address, length, DMA_FROM_DEVICE);
return -EIO;
}
@@ -769,7 +750,7 @@ static int fmh_gpib_dma_read(gpib_board_t *board, uint8_t *buffer,
set_bit(DMA_READ_IN_PROGRESS_BN, &nec_priv->state);
spin_unlock_irqrestore(&board->spinlock, flags);
-// printk("waiting for data transfer.\n");
+
// wait for data to transfer
wait_retval = wait_event_interruptible(board->wait,
test_bit(DMA_READ_IN_PROGRESS_BN, &nec_priv->state)
@@ -777,10 +758,9 @@ static int fmh_gpib_dma_read(gpib_board_t *board, uint8_t *buffer,
test_bit(RECEIVED_END_BN, &nec_priv->state) ||
test_bit(DEV_CLEAR_BN, &nec_priv->state) ||
test_bit(TIMO_NUM, &board->status));
- if (wait_retval) {
- pr_warn("fmh_gpib: dma read wait interrupted\n");
+ if (wait_retval)
retval = -ERESTARTSYS;
- }
+
if (test_bit(TIMO_NUM, &board->status))
retval = -ETIMEDOUT;
if (test_bit(DEV_CLEAR_BN, &nec_priv->state))
@@ -825,13 +805,11 @@ static int fmh_gpib_dma_read(gpib_board_t *board, uint8_t *buffer,
*end = 1;
}
spin_unlock_irqrestore(&board->spinlock, flags);
-// printk("\tbytes_read=%i, residue=%i, end=%i, retval=%i, wait_retval=%i\n",
-// *bytes_read, residue, *end, retval, wait_retval);
return retval;
}
-static void fmh_gpib_release_rfd_holdoff(gpib_board_t *board, struct fmh_priv *e_priv)
+static void fmh_gpib_release_rfd_holdoff(struct gpib_board *board, struct fmh_priv *e_priv)
{
struct nec7210_priv *nec_priv = &e_priv->nec7210_priv;
unsigned int ext_status_1;
@@ -868,7 +846,7 @@ static void fmh_gpib_release_rfd_holdoff(gpib_board_t *board, struct fmh_priv *e
spin_unlock_irqrestore(&board->spinlock, flags);
}
-static int fmh_gpib_accel_read(gpib_board_t *board, uint8_t *buffer, size_t length,
+static int fmh_gpib_accel_read(struct gpib_board *board, uint8_t *buffer, size_t length,
int *end, size_t *bytes_read)
{
struct fmh_priv *e_priv = board->private_data;
@@ -918,17 +896,13 @@ static int fmh_gpib_accel_read(gpib_board_t *board, uint8_t *buffer, size_t leng
/* Read a chunk of data whose length is within the limits of the hardware's
* xfer counter. Called in a loop from fmh_gpib_fifo_read().
*/
-static int fmh_gpib_fifo_read_countable(gpib_board_t *board, uint8_t *buffer,
+static int fmh_gpib_fifo_read_countable(struct gpib_board *board, uint8_t *buffer,
size_t length, int *end, size_t *bytes_read)
{
struct fmh_priv *e_priv = board->private_data;
struct nec7210_priv *nec_priv = &e_priv->nec7210_priv;
int retval = 0;
- // printk("%s: enter, bus_address=0x%x, length=%i\n", __FUNCTION__,
- // (unsigned)bus_address,
-// (int)length);
-
*bytes_read = 0;
*end = 0;
if (length == 0)
@@ -977,13 +951,10 @@ cleanup:
*end = 1;
}
-// printk("\tbytes_read=%i, end=%i, retval=%i, wait_retval=%i\n",
-// *bytes_read, *end, retval, wait_retval);
-
return retval;
}
-static int fmh_gpib_fifo_read(gpib_board_t *board, uint8_t *buffer, size_t length,
+static int fmh_gpib_fifo_read(struct gpib_board *board, uint8_t *buffer, size_t length,
int *end, size_t *bytes_read)
{
struct fmh_priv *e_priv = board->private_data;
@@ -1152,7 +1123,7 @@ static gpib_interface_t fmh_gpib_pci_unaccel_interface = {
.return_to_local = fmh_gpib_return_to_local,
};
-irqreturn_t fmh_gpib_internal_interrupt(gpib_board_t *board)
+irqreturn_t fmh_gpib_internal_interrupt(struct gpib_board *board)
{
unsigned int status0, status1, status2, ext_status_1, fifo_status;
struct fmh_priv *priv = board->private_data;
@@ -1242,7 +1213,7 @@ irqreturn_t fmh_gpib_internal_interrupt(gpib_board_t *board)
irqreturn_t fmh_gpib_interrupt(int irq, void *arg)
{
- gpib_board_t *board = arg;
+ struct gpib_board *board = arg;
unsigned long flags;
irqreturn_t retval;
@@ -1252,7 +1223,7 @@ irqreturn_t fmh_gpib_interrupt(int irq, void *arg)
return retval;
}
-static int fmh_gpib_allocate_private(gpib_board_t *board)
+static int fmh_gpib_allocate_private(struct gpib_board *board)
{
struct fmh_priv *priv;
@@ -1269,7 +1240,7 @@ static int fmh_gpib_allocate_private(gpib_board_t *board)
return 0;
}
-static void fmh_gpib_generic_detach(gpib_board_t *board)
+static void fmh_gpib_generic_detach(struct gpib_board *board)
{
if (board->private_data) {
struct fmh_priv *e_priv = board->private_data;
@@ -1283,7 +1254,7 @@ static void fmh_gpib_generic_detach(gpib_board_t *board)
}
// generic part of attach functions
-static int fmh_gpib_generic_attach(gpib_board_t *board)
+static int fmh_gpib_generic_attach(struct gpib_board *board)
{
struct fmh_priv *e_priv;
struct nec7210_priv *nec_priv;
@@ -1303,7 +1274,7 @@ static int fmh_gpib_generic_attach(gpib_board_t *board)
return 0;
}
-static int fmh_gpib_config_dma(gpib_board_t *board, int output)
+static int fmh_gpib_config_dma(struct gpib_board *board, int output)
{
struct fmh_priv *e_priv = board->private_data;
struct dma_slave_config config;
@@ -1333,7 +1304,7 @@ static int fmh_gpib_config_dma(gpib_board_t *board, int output)
return dmaengine_slave_config(e_priv->dma_channel, &config);
}
-static int fmh_gpib_init(struct fmh_priv *e_priv, gpib_board_t *board, int handshake_mode)
+static int fmh_gpib_init(struct fmh_priv *e_priv, struct gpib_board *board, int handshake_mode)
{
struct nec7210_priv *nec_priv = &e_priv->nec7210_priv;
unsigned long flags;
@@ -1376,11 +1347,11 @@ static int fmh_gpib_device_match(struct device *dev, const void *data)
if (config->serial_number)
return 0;
- dev_notice(dev, "matched: %s\n", of_node_full_name(dev_of_node((dev))));
+ dev_dbg(dev, "matched: %s\n", of_node_full_name(dev_of_node((dev))));
return 1;
}
-static int fmh_gpib_attach_impl(gpib_board_t *board, const gpib_board_config_t *config,
+static int fmh_gpib_attach_impl(struct gpib_board *board, const gpib_board_config_t *config,
unsigned int handshake_mode, int acquire_dma)
{
struct fmh_priv *e_priv;
@@ -1393,7 +1364,7 @@ static int fmh_gpib_attach_impl(gpib_board_t *board, const gpib_board_config_t *
board->dev = driver_find_device(&fmh_gpib_platform_driver.driver,
NULL, (const void *)config, &fmh_gpib_device_match);
if (!board->dev) {
- pr_err("No matching fmh_gpib_core device was found, attach failed.");
+ dev_err(board->gpib_dev, "No matching fmh_gpib_core device was found, attach failed.");
return -ENODEV;
}
// currently only used to mark the device as already attached
@@ -1409,7 +1380,7 @@ static int fmh_gpib_attach_impl(gpib_board_t *board, const gpib_board_config_t *
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gpib_control_status");
if (!res) {
- dev_err(board->dev, "Unable to locate mmio resource for cb7210 gpib\n");
+ dev_err(board->dev, "Unable to locate mmio resource\n");
return -ENODEV;
}
@@ -1422,13 +1393,13 @@ static int fmh_gpib_attach_impl(gpib_board_t *board, const gpib_board_config_t *
e_priv->gpib_iomem_res = res;
nec_priv->mmiobase = ioremap(e_priv->gpib_iomem_res->start,
- resource_size(e_priv->gpib_iomem_res));
+ resource_size(e_priv->gpib_iomem_res));
if (!nec_priv->mmiobase) {
- dev_err(board->dev, "Could not map I/O memory for gpib\n");
+ dev_err(board->dev, "Could not map I/O memory\n");
return -ENOMEM;
}
- dev_info(board->dev, "iobase %pr remapped to %p\n",
- e_priv->gpib_iomem_res, nec_priv->mmiobase);
+ dev_dbg(board->dev, "iobase %pr remapped to %p\n",
+ e_priv->gpib_iomem_res, nec_priv->mmiobase);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dma_fifos");
if (!res) {
@@ -1448,14 +1419,13 @@ static int fmh_gpib_attach_impl(gpib_board_t *board, const gpib_board_config_t *
dev_err(board->dev, "Could not map I/O memory for fifos\n");
return -ENOMEM;
}
- dev_info(board->dev, "dma fifos 0x%lx remapped to %p, length=%ld\n",
- (unsigned long)e_priv->dma_port_res->start, e_priv->fifo_base,
- (unsigned long)resource_size(e_priv->dma_port_res));
+ dev_dbg(board->dev, "dma fifos 0x%lx remapped to %p, length=%ld\n",
+ (unsigned long)e_priv->dma_port_res->start, e_priv->fifo_base,
+ (unsigned long)resource_size(e_priv->dma_port_res));
irq = platform_get_irq(pdev, 0);
- pr_info("gpib: irq %d\n", irq);
if (irq < 0) {
- dev_err(board->dev, "fmh_gpib_gpib: request for IRQ failed\n");
+ dev_err(board->dev, "request for IRQ failed\n");
return -EBUSY;
}
retval = request_irq(irq, fmh_gpib_interrupt, IRQF_SHARED, pdev->name, board);
@@ -1484,17 +1454,17 @@ static int fmh_gpib_attach_impl(gpib_board_t *board, const gpib_board_config_t *
return fmh_gpib_init(e_priv, board, handshake_mode);
}
-int fmh_gpib_attach_holdoff_all(gpib_board_t *board, const gpib_board_config_t *config)
+int fmh_gpib_attach_holdoff_all(struct gpib_board *board, const gpib_board_config_t *config)
{
return fmh_gpib_attach_impl(board, config, HR_HLDA, 0);
}
-int fmh_gpib_attach_holdoff_end(gpib_board_t *board, const gpib_board_config_t *config)
+int fmh_gpib_attach_holdoff_end(struct gpib_board *board, const gpib_board_config_t *config)
{
return fmh_gpib_attach_impl(board, config, HR_HLDE, 1);
}
-void fmh_gpib_detach(gpib_board_t *board)
+void fmh_gpib_detach(struct gpib_board *board)
{
struct fmh_priv *e_priv = board->private_data;
struct nec7210_priv *nec_priv;
@@ -1527,7 +1497,7 @@ void fmh_gpib_detach(gpib_board_t *board)
fmh_gpib_generic_detach(board);
}
-static int fmh_gpib_pci_attach_impl(gpib_board_t *board, const gpib_board_config_t *config,
+static int fmh_gpib_pci_attach_impl(struct gpib_board *board, const gpib_board_config_t *config,
unsigned int handshake_mode)
{
struct fmh_priv *e_priv;
@@ -1546,7 +1516,7 @@ static int fmh_gpib_pci_attach_impl(gpib_board_t *board, const gpib_board_config
pci_device = gpib_pci_get_device(config, BOGUS_PCI_VENDOR_ID_FLUKE,
BOGUS_PCI_DEVICE_ID_FLUKE_BLADERUNNER, NULL);
if (!pci_device) {
- pr_err("No matching fmh_gpib_core pci device was found, attach failed.");
+ dev_err(board->gpib_dev, "No matching fmh_gpib_core pci device was found, attach failed.");
return -ENODEV;
}
board->dev = &pci_device->dev;
@@ -1563,34 +1533,32 @@ static int fmh_gpib_pci_attach_impl(gpib_board_t *board, const gpib_board_config
return -EIO;
}
e_priv->gpib_iomem_res = &pci_device->resource[gpib_control_status_pci_resource_index];
- e_priv->dma_port_res = &pci_device->resource[gpib_fifo_pci_resource_index];
+ e_priv->dma_port_res = &pci_device->resource[gpib_fifo_pci_resource_index];
nec_priv->mmiobase = ioremap(pci_resource_start(pci_device,
- gpib_control_status_pci_resource_index),
- pci_resource_len(pci_device,
- gpib_control_status_pci_resource_index));
- dev_info(board->dev, "base address for gpib control/status registers remapped to 0x%p\n",
- nec_priv->mmiobase);
+ gpib_control_status_pci_resource_index),
+ pci_resource_len(pci_device,
+ gpib_control_status_pci_resource_index));
+ dev_dbg(board->dev, "base address for gpib control/status registers remapped to 0x%p\n",
+ nec_priv->mmiobase);
if (e_priv->dma_port_res->flags & IORESOURCE_MEM) {
e_priv->fifo_base = ioremap(pci_resource_start(pci_device,
gpib_fifo_pci_resource_index),
pci_resource_len(pci_device,
gpib_fifo_pci_resource_index));
- dev_info(board->dev, "base address for gpib fifo registers remapped to 0x%p\n",
- e_priv->fifo_base);
+ dev_dbg(board->dev, "base address for gpib fifo registers remapped to 0x%p\n",
+ e_priv->fifo_base);
} else {
e_priv->fifo_base = NULL;
- dev_info(board->dev, "hardware has no gpib fifo registers.\n");
+ dev_dbg(board->dev, "hardware has no gpib fifo registers.\n");
}
if (pci_device->irq) {
retval = request_irq(pci_device->irq, fmh_gpib_interrupt, IRQF_SHARED,
KBUILD_MODNAME, board);
if (retval) {
- dev_err(board->dev,
- "cannot register interrupt handler err=%d\n",
- retval);
+ dev_err(board->dev, "cannot register interrupt handler err=%d\n", retval);
return retval;
}
}
@@ -1602,12 +1570,12 @@ static int fmh_gpib_pci_attach_impl(gpib_board_t *board, const gpib_board_config
return fmh_gpib_init(e_priv, board, handshake_mode);
}
-int fmh_gpib_pci_attach_holdoff_all(gpib_board_t *board, const gpib_board_config_t *config)
+int fmh_gpib_pci_attach_holdoff_all(struct gpib_board *board, const gpib_board_config_t *config)
{
return fmh_gpib_pci_attach_impl(board, config, HR_HLDA);
}
-int fmh_gpib_pci_attach_holdoff_end(gpib_board_t *board, const gpib_board_config_t *config)
+int fmh_gpib_pci_attach_holdoff_end(struct gpib_board *board, const gpib_board_config_t *config)
{
int retval;
struct fmh_priv *e_priv;
@@ -1615,13 +1583,13 @@ int fmh_gpib_pci_attach_holdoff_end(gpib_board_t *board, const gpib_board_config
retval = fmh_gpib_pci_attach_impl(board, config, HR_HLDE);
e_priv = board->private_data;
if (retval == 0 && e_priv && e_priv->supports_fifo_interrupts == 0) {
- pr_err("fmh_gpib: your fmh_gpib_core does not appear to support fifo interrupts. Try the fmh_gpib_pci_unaccel board type instead.");
+ dev_err(board->gpib_dev, "your fmh_gpib_core does not appear to support fifo interrupts. Try the fmh_gpib_pci_unaccel board type instead.");
return -EIO;
}
return retval;
}
-void fmh_gpib_pci_detach(gpib_board_t *board)
+void fmh_gpib_pci_detach(struct gpib_board *board)
{
struct fmh_priv *e_priv = board->private_data;
struct nec7210_priv *nec_priv;
@@ -1662,7 +1630,7 @@ MODULE_DEVICE_TABLE(of, fmh_gpib_of_match);
static struct platform_driver fmh_gpib_platform_driver = {
.driver = {
- .name = "fmh_gpib",
+ .name = DRV_NAME,
.owner = THIS_MODULE,
.of_match_table = fmh_gpib_of_match,
},
@@ -1681,7 +1649,7 @@ static const struct pci_device_id fmh_gpib_pci_match[] = {
MODULE_DEVICE_TABLE(pci, fmh_gpib_pci_match);
static struct pci_driver fmh_gpib_pci_driver = {
- .name = "fmh_gpib",
+ .name = DRV_NAME,
.id_table = fmh_gpib_pci_match,
.probe = &fmh_gpib_pci_probe
};
@@ -1692,37 +1660,37 @@ static int __init fmh_gpib_init_module(void)
result = platform_driver_register(&fmh_gpib_platform_driver);
if (result) {
- pr_err("fmh_gpib: platform_driver_register failed: error = %d\n", result);
+ pr_err("platform_driver_register failed: error = %d\n", result);
return result;
}
result = pci_register_driver(&fmh_gpib_pci_driver);
if (result) {
- pr_err("fmh_gpib: pci_register_driver failed: error = %d\n", result);
+ pr_err("pci_register_driver failed: error = %d\n", result);
goto err_pci_driver;
}
result = gpib_register_driver(&fmh_gpib_unaccel_interface, THIS_MODULE);
if (result) {
- pr_err("fmh_gpib: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
goto err_unaccel;
}
result = gpib_register_driver(&fmh_gpib_interface, THIS_MODULE);
if (result) {
- pr_err("fmh_gpib: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
goto err_interface;
}
result = gpib_register_driver(&fmh_gpib_pci_unaccel_interface, THIS_MODULE);
if (result) {
- pr_err("fmh_gpib: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
goto err_pci_unaccel;
}
result = gpib_register_driver(&fmh_gpib_pci_interface, THIS_MODULE);
if (result) {
- pr_err("fmh_gpib: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
goto err_pci;
}
diff --git a/drivers/staging/gpib/gpio/gpib_bitbang.c b/drivers/staging/gpib/gpio/gpib_bitbang.c
index 828c99ea613f..86bdd381472a 100644
--- a/drivers/staging/gpib/gpio/gpib_bitbang.c
+++ b/drivers/staging/gpib/gpio/gpib_bitbang.c
@@ -25,6 +25,8 @@
* device support (non master operation)
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define dev_fmt pr_fmt
#define NAME KBUILD_MODNAME
#define ENABLE_IRQ(IRQ, TYPE) irq_set_irq_type(IRQ, TYPE)
@@ -41,7 +43,7 @@
*/
#define dbg_printk(level, frm, ...) \
do { if (debug >= (level)) \
- pr_info("%s:%s - " frm, NAME, __func__, ## __VA_ARGS__); } \
+ dev_dbg(board->gpib_dev, frm, ## __VA_ARGS__); } \
while (0)
#define LINVAL gpiod_get_value(DAV), \
@@ -316,13 +318,14 @@ struct bb_priv {
};
static inline long usec_diff(struct timespec64 *a, struct timespec64 *b);
-static void bb_buffer_print(unsigned char *buffer, size_t length, int cmd, int eoi);
+static void bb_buffer_print(struct gpib_board *board, unsigned char *buffer, size_t length,
+ int cmd, int eoi);
static void set_data_lines(u8 byte);
static u8 get_data_lines(void);
static void set_data_lines_input(void);
static void set_data_lines_output(void);
static inline int check_for_eos(struct bb_priv *priv, uint8_t byte);
-static void set_atn(struct bb_priv *priv, int atn_asserted);
+static void set_atn(struct gpib_board *board, int atn_asserted);
static inline void SET_DIR_WRITE(struct bb_priv *priv);
static inline void SET_DIR_READ(struct bb_priv *priv);
@@ -334,11 +337,7 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GPIB helper functions for bitbanging I/O");
/**** global variables ****/
-#ifdef CONFIG_GPIB_DEBUG
-static int debug = 1;
-#else
static int debug;
-#endif
module_param(debug, int, 0644);
static char printable(char x)
@@ -354,7 +353,7 @@ static char printable(char x)
* *
***************************************************************************/
-static int bb_read(gpib_board_t *board, uint8_t *buffer, size_t length,
+static int bb_read(struct gpib_board *board, uint8_t *buffer, size_t length,
int *end, size_t *bytes_read)
{
struct bb_priv *priv = board->private_data;
@@ -426,7 +425,7 @@ read_end:
static irqreturn_t bb_DAV_interrupt(int irq, void *arg)
{
- gpib_board_t *board = arg;
+ struct gpib_board *board = arg;
struct bb_priv *priv = board->private_data;
int val;
unsigned long flags;
@@ -492,7 +491,7 @@ dav_exit:
* *
***************************************************************************/
-static int bb_write(gpib_board_t *board, uint8_t *buffer, size_t length,
+static int bb_write(struct gpib_board *board, uint8_t *buffer, size_t length,
int send_eoi, size_t *bytes_written)
{
unsigned long flags;
@@ -508,7 +507,7 @@ static int bb_write(gpib_board_t *board, uint8_t *buffer, size_t length,
board, mutex_is_locked(&board->user_mutex), length);
if (debug > 1)
- bb_buffer_print(buffer, length, priv->cmd, send_eoi);
+ bb_buffer_print(board, buffer, length, priv->cmd, send_eoi);
priv->count = 0;
priv->phase = 300;
@@ -550,7 +549,6 @@ static int bb_write(gpib_board_t *board, uint8_t *buffer, size_t length,
dbg_printk(1, "timeout after %zu/%zu at %d " LINFMT " eoi: %d\n",
priv->w_cnt, length, priv->phase, LINVAL, send_eoi);
} else {
- // dbg_printk(1,"written %zu\n", priv->w_cnt);
retval = priv->w_cnt;
}
} else {
@@ -582,7 +580,7 @@ write_end:
static irqreturn_t bb_NRFD_interrupt(int irq, void *arg)
{
- gpib_board_t *board = arg;
+ struct gpib_board *board = arg;
struct bb_priv *priv = board->private_data;
unsigned long flags;
int nrfd;
@@ -655,7 +653,7 @@ nrfd_exit:
static irqreturn_t bb_NDAC_interrupt(int irq, void *arg)
{
- gpib_board_t *board = arg;
+ struct gpib_board *board = arg;
struct bb_priv *priv = board->private_data;
unsigned long flags;
int ndac;
@@ -716,7 +714,7 @@ ndac_exit:
static irqreturn_t bb_SRQ_interrupt(int irq, void *arg)
{
- gpib_board_t *board = arg;
+ struct gpib_board *board = arg;
int val = gpiod_get_value(SRQ);
@@ -730,7 +728,7 @@ static irqreturn_t bb_SRQ_interrupt(int irq, void *arg)
return IRQ_HANDLED;
}
-static int bb_command(gpib_board_t *board, uint8_t *buffer,
+static int bb_command(struct gpib_board *board, uint8_t *buffer,
size_t length, size_t *bytes_written)
{
size_t ret;
@@ -811,7 +809,8 @@ static char *cmd_string[32] = {
"CFE" // 0x1f
};
-static void bb_buffer_print(unsigned char *buffer, size_t length, int cmd, int eoi)
+static void bb_buffer_print(struct gpib_board *board, unsigned char *buffer, size_t length,
+ int cmd, int eoi)
{
int i;
@@ -843,11 +842,13 @@ static void bb_buffer_print(unsigned char *buffer, size_t length, int cmd, int e
* STATUS Management *
* *
***************************************************************************/
-static void set_atn(struct bb_priv *priv, int atn_asserted)
+static void set_atn(struct gpib_board *board, int atn_asserted)
{
+ struct bb_priv *priv = board->private_data;
+
if (priv->listener_state != listener_idle &&
priv->talker_state != talker_idle) {
- dbg_printk(0, "listener/talker state machine conflict\n");
+ dev_err(board->gpib_dev, "listener/talker state machine conflict\n");
}
if (atn_asserted) {
if (priv->listener_state == listener_active)
@@ -866,22 +867,22 @@ static void set_atn(struct bb_priv *priv, int atn_asserted)
priv->atn_asserted = atn_asserted;
}
-static int bb_take_control(gpib_board_t *board, int synchronous)
+static int bb_take_control(struct gpib_board *board, int synchronous)
{
dbg_printk(2, "%d\n", synchronous);
- set_atn(board->private_data, 1);
+ set_atn(board, 1);
set_bit(CIC_NUM, &board->status);
return 0;
}
-static int bb_go_to_standby(gpib_board_t *board)
+static int bb_go_to_standby(struct gpib_board *board)
{
dbg_printk(2, "\n");
- set_atn(board->private_data, 0);
+ set_atn(board, 0);
return 0;
}
-static void bb_request_system_control(gpib_board_t *board, int request_control)
+static void bb_request_system_control(struct gpib_board *board, int request_control)
{
dbg_printk(2, "%d\n", request_control);
if (request_control) {
@@ -893,7 +894,7 @@ static void bb_request_system_control(gpib_board_t *board, int request_control)
}
}
-static void bb_interface_clear(gpib_board_t *board, int assert)
+static void bb_interface_clear(struct gpib_board *board, int assert)
{
struct bb_priv *priv = board->private_data;
@@ -907,7 +908,7 @@ static void bb_interface_clear(gpib_board_t *board, int assert)
}
}
-static void bb_remote_enable(gpib_board_t *board, int enable)
+static void bb_remote_enable(struct gpib_board *board, int enable)
{
dbg_printk(2, "%d\n", enable);
if (enable) {
@@ -919,7 +920,7 @@ static void bb_remote_enable(gpib_board_t *board, int enable)
}
}
-static int bb_enable_eos(gpib_board_t *board, uint8_t eos_byte, int compare_8_bits)
+static int bb_enable_eos(struct gpib_board *board, uint8_t eos_byte, int compare_8_bits)
{
struct bb_priv *priv = board->private_data;
@@ -932,7 +933,7 @@ static int bb_enable_eos(gpib_board_t *board, uint8_t eos_byte, int compare_8_bi
return 0;
}
-static void bb_disable_eos(gpib_board_t *board)
+static void bb_disable_eos(struct gpib_board *board)
{
struct bb_priv *priv = board->private_data;
@@ -940,7 +941,7 @@ static void bb_disable_eos(gpib_board_t *board)
priv->eos_flags &= ~REOS;
}
-static unsigned int bb_update_status(gpib_board_t *board, unsigned int clear_mask)
+static unsigned int bb_update_status(struct gpib_board *board, unsigned int clear_mask)
{
struct bb_priv *priv = board->private_data;
@@ -971,14 +972,14 @@ static unsigned int bb_update_status(gpib_board_t *board, unsigned int clear_mas
return board->status;
}
-static int bb_primary_address(gpib_board_t *board, unsigned int address)
+static int bb_primary_address(struct gpib_board *board, unsigned int address)
{
dbg_printk(2, "%d\n", address);
board->pad = address;
return 0;
}
-static int bb_secondary_address(gpib_board_t *board, unsigned int address, int enable)
+static int bb_secondary_address(struct gpib_board *board, unsigned int address, int enable)
{
dbg_printk(2, "%d %d\n", address, enable);
if (enable)
@@ -986,33 +987,29 @@ static int bb_secondary_address(gpib_board_t *board, unsigned int address, int e
return 0;
}
-static int bb_parallel_poll(gpib_board_t *board, uint8_t *result)
+static int bb_parallel_poll(struct gpib_board *board, uint8_t *result)
{
- dbg_printk(1, "%s\n", "not implemented");
- return -EPERM;
+ return -ENOENT;
}
-static void bb_parallel_poll_configure(gpib_board_t *board, uint8_t config)
+static void bb_parallel_poll_configure(struct gpib_board *board, uint8_t config)
{
- dbg_printk(1, "%s\n", "not implemented");
}
-static void bb_parallel_poll_response(gpib_board_t *board, int ist)
+static void bb_parallel_poll_response(struct gpib_board *board, int ist)
{
}
-static void bb_serial_poll_response(gpib_board_t *board, uint8_t status)
+static void bb_serial_poll_response(struct gpib_board *board, uint8_t status)
{
- dbg_printk(1, "%s\n", "not implemented");
}
-static uint8_t bb_serial_poll_status(gpib_board_t *board)
+static uint8_t bb_serial_poll_status(struct gpib_board *board)
{
- dbg_printk(1, "%s\n", "not implemented");
- return 0; // -ENOSYS;
+ return 0; // -ENOENT;
}
-static unsigned int bb_t1_delay(gpib_board_t *board, unsigned int nano_sec)
+static int bb_t1_delay(struct gpib_board *board, unsigned int nano_sec)
{
struct bb_priv *priv = board->private_data;
@@ -1028,33 +1025,30 @@ static unsigned int bb_t1_delay(gpib_board_t *board, unsigned int nano_sec)
return priv->t1_delay;
}
-static void bb_return_to_local(gpib_board_t *board)
+static void bb_return_to_local(struct gpib_board *board)
{
- dbg_printk(1, "%s\n", "not implemented");
}
-static int bb_line_status(const gpib_board_t *board)
+static int bb_line_status(const struct gpib_board *board)
{
- int line_status = ValidALL;
-
-// dbg_printk(1,"\n");
+ int line_status = VALID_ALL;
if (gpiod_get_value(REN) == 0)
- line_status |= BusREN;
+ line_status |= BUS_REN;
if (gpiod_get_value(IFC) == 0)
- line_status |= BusIFC;
+ line_status |= BUS_IFC;
if (gpiod_get_value(NDAC) == 0)
- line_status |= BusNDAC;
+ line_status |= BUS_NDAC;
if (gpiod_get_value(NRFD) == 0)
- line_status |= BusNRFD;
+ line_status |= BUS_NRFD;
if (gpiod_get_value(DAV) == 0)
- line_status |= BusDAV;
+ line_status |= BUS_DAV;
if (gpiod_get_value(EOI) == 0)
- line_status |= BusEOI;
+ line_status |= BUS_EOI;
if (gpiod_get_value(_ATN) == 0)
- line_status |= BusATN;
+ line_status |= BUS_ATN;
if (gpiod_get_value(SRQ) == 0)
- line_status |= BusSRQ;
+ line_status |= BUS_SRQ;
dbg_printk(2, "status lines: %4x\n", line_status);
@@ -1067,7 +1061,7 @@ static int bb_line_status(const gpib_board_t *board)
* *
***************************************************************************/
-static int allocate_private(gpib_board_t *board)
+static int allocate_private(struct gpib_board *board)
{
board->private_data = kzalloc(sizeof(struct bb_priv), GFP_KERNEL);
if (!board->private_data)
@@ -1075,13 +1069,13 @@ static int allocate_private(gpib_board_t *board)
return 0;
}
-static void free_private(gpib_board_t *board)
+static void free_private(struct gpib_board *board)
{
kfree(board->private_data);
board->private_data = NULL;
}
-static int bb_get_irq(gpib_board_t *board, char *name,
+static int bb_get_irq(struct gpib_board *board, char *name,
struct gpio_desc *gpio, int *irq,
irq_handler_t handler, irq_handler_t thread_fn, unsigned long flags)
{
@@ -1091,11 +1085,11 @@ static int bb_get_irq(gpib_board_t *board, char *name,
*irq = gpiod_to_irq(gpio);
dbg_printk(2, "IRQ %s: %d\n", name, *irq);
if (*irq < 0) {
- dbg_printk(0, "gpib: can't get IRQ for %s\n", name);
+ dev_err(board->gpib_dev, "can't get IRQ for %s\n", name);
return -1;
}
if (request_threaded_irq(*irq, handler, thread_fn, flags, name, board)) {
- dbg_printk(0, "gpib: can't request IRQ for %s %d\n", name, *irq);
+ dev_err(board->gpib_dev, "can't request IRQ for %s %d\n", name, *irq);
*irq = 0;
return -1;
}
@@ -1103,7 +1097,7 @@ static int bb_get_irq(gpib_board_t *board, char *name,
return 0;
}
-static void bb_free_irq(gpib_board_t *board, int *irq, char *name)
+static void bb_free_irq(struct gpib_board *board, int *irq, char *name)
{
if (*irq) {
free_irq(*irq, board);
@@ -1124,7 +1118,7 @@ static void release_gpios(void)
}
}
-static int allocate_gpios(gpib_board_t *board)
+static int allocate_gpios(struct gpib_board *board)
{
int j, retval = 0;
bool error = false;
@@ -1163,8 +1157,8 @@ try_again:
gpiod_add_lookup_table(lookup_table);
goto try_again;
}
- dbg_printk(0, "Unable to obtain gpio descriptor for pin %d error %ld\n",
- gpios_vector[j], PTR_ERR(desc));
+ dev_err(board->gpib_dev, "Unable to obtain gpio descriptor for pin %d error %ld\n",
+ gpios_vector[j], PTR_ERR(desc));
error = true;
break;
}
@@ -1182,7 +1176,7 @@ try_again:
return retval;
}
-static void bb_detach(gpib_board_t *board)
+static void bb_detach(struct gpib_board *board)
{
struct bb_priv *priv = board->private_data;
@@ -1212,7 +1206,7 @@ static void bb_detach(gpib_board_t *board)
free_private(board);
}
-static int bb_attach(gpib_board_t *board, const gpib_board_config_t *config)
+static int bb_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
struct bb_priv *priv;
int retval = 0;
@@ -1253,7 +1247,7 @@ static int bb_attach(gpib_board_t *board, const gpib_board_config_t *config)
gpios_vector[&(DC) - &all_descriptors[0]] = -1;
gpios_vector[&(ACT_LED) - &all_descriptors[0]] = -1;
} else {
- dbg_printk(0, "Unrecognized pin mapping.\n");
+ dev_err(board->gpib_dev, "Unrecognized pin map %s\n", pin_map);
goto bb_attach_fail;
}
dbg_printk(0, "Using pin map \"%s\" %s\n", pin_map, (sn7516x) ?
@@ -1344,19 +1338,15 @@ static int __init bb_init_module(void)
int result = gpib_register_driver(&bb_interface, THIS_MODULE);
if (result) {
- pr_err("gpib_bitbang: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
return result;
}
- dbg_printk(0, "module loaded with pin map \"%s\"%s\n",
- pin_map, (sn7516x_used) ? " and SN7516x driver support" : "");
return 0;
}
static void __exit bb_exit_module(void)
{
- dbg_printk(0, "module unloaded!");
-
gpib_unregister_driver(&bb_interface);
}
diff --git a/drivers/staging/gpib/hp_82335/hp82335.c b/drivers/staging/gpib/hp_82335/hp82335.c
index 700d1ba029d2..fd23b1cb80f9 100644
--- a/drivers/staging/gpib/hp_82335/hp82335.c
+++ b/drivers/staging/gpib/hp_82335/hp82335.c
@@ -8,6 +8,10 @@
* implement recovery from bus errors (if necessary)
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define dev_fmt pr_fmt
+#define DRV_NAME KBUILD_MODNAME
+
#include "hp82335.h"
#include <linux/io.h>
#include <linux/ioport.h>
@@ -20,153 +24,155 @@
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GPIB driver for HP 82335 interface cards");
-static int hp82335_attach(gpib_board_t *board, const gpib_board_config_t *config);
-
-static void hp82335_detach(gpib_board_t *board);
+static int hp82335_attach(struct gpib_board *board, const gpib_board_config_t *config);
+static void hp82335_detach(struct gpib_board *board);
+static irqreturn_t hp82335_interrupt(int irq, void *arg);
// wrappers for interface functions
-int hp82335_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end, size_t *bytes_read)
+static int hp82335_read(struct gpib_board *board, uint8_t *buffer, size_t length,
+ int *end, size_t *bytes_read)
{
struct hp82335_priv *priv = board->private_data;
return tms9914_read(board, &priv->tms9914_priv, buffer, length, end, bytes_read);
}
-int hp82335_write(gpib_board_t *board, uint8_t *buffer, size_t length, int send_eoi,
- size_t *bytes_written)
+static int hp82335_write(struct gpib_board *board, uint8_t *buffer, size_t length, int send_eoi,
+ size_t *bytes_written)
{
struct hp82335_priv *priv = board->private_data;
return tms9914_write(board, &priv->tms9914_priv, buffer, length, send_eoi, bytes_written);
}
-int hp82335_command(gpib_board_t *board, uint8_t *buffer, size_t length, size_t *bytes_written)
+static int hp82335_command(struct gpib_board *board, uint8_t *buffer, size_t length,
+ size_t *bytes_written)
{
struct hp82335_priv *priv = board->private_data;
return tms9914_command(board, &priv->tms9914_priv, buffer, length, bytes_written);
}
-int hp82335_take_control(gpib_board_t *board, int synchronous)
+static int hp82335_take_control(struct gpib_board *board, int synchronous)
{
struct hp82335_priv *priv = board->private_data;
return tms9914_take_control(board, &priv->tms9914_priv, synchronous);
}
-int hp82335_go_to_standby(gpib_board_t *board)
+static int hp82335_go_to_standby(struct gpib_board *board)
{
struct hp82335_priv *priv = board->private_data;
return tms9914_go_to_standby(board, &priv->tms9914_priv);
}
-void hp82335_request_system_control(gpib_board_t *board, int request_control)
+static void hp82335_request_system_control(struct gpib_board *board, int request_control)
{
struct hp82335_priv *priv = board->private_data;
tms9914_request_system_control(board, &priv->tms9914_priv, request_control);
}
-void hp82335_interface_clear(gpib_board_t *board, int assert)
+static void hp82335_interface_clear(struct gpib_board *board, int assert)
{
struct hp82335_priv *priv = board->private_data;
tms9914_interface_clear(board, &priv->tms9914_priv, assert);
}
-void hp82335_remote_enable(gpib_board_t *board, int enable)
+static void hp82335_remote_enable(struct gpib_board *board, int enable)
{
struct hp82335_priv *priv = board->private_data;
tms9914_remote_enable(board, &priv->tms9914_priv, enable);
}
-int hp82335_enable_eos(gpib_board_t *board, uint8_t eos_byte, int compare_8_bits)
+static int hp82335_enable_eos(struct gpib_board *board, uint8_t eos_byte, int compare_8_bits)
{
struct hp82335_priv *priv = board->private_data;
return tms9914_enable_eos(board, &priv->tms9914_priv, eos_byte, compare_8_bits);
}
-void hp82335_disable_eos(gpib_board_t *board)
+static void hp82335_disable_eos(struct gpib_board *board)
{
struct hp82335_priv *priv = board->private_data;
tms9914_disable_eos(board, &priv->tms9914_priv);
}
-unsigned int hp82335_update_status(gpib_board_t *board, unsigned int clear_mask)
+static unsigned int hp82335_update_status(struct gpib_board *board, unsigned int clear_mask)
{
struct hp82335_priv *priv = board->private_data;
return tms9914_update_status(board, &priv->tms9914_priv, clear_mask);
}
-int hp82335_primary_address(gpib_board_t *board, unsigned int address)
+static int hp82335_primary_address(struct gpib_board *board, unsigned int address)
{
struct hp82335_priv *priv = board->private_data;
return tms9914_primary_address(board, &priv->tms9914_priv, address);
}
-int hp82335_secondary_address(gpib_board_t *board, unsigned int address, int enable)
+static int hp82335_secondary_address(struct gpib_board *board, unsigned int address, int enable)
{
struct hp82335_priv *priv = board->private_data;
return tms9914_secondary_address(board, &priv->tms9914_priv, address, enable);
}
-int hp82335_parallel_poll(gpib_board_t *board, uint8_t *result)
+static int hp82335_parallel_poll(struct gpib_board *board, uint8_t *result)
{
struct hp82335_priv *priv = board->private_data;
return tms9914_parallel_poll(board, &priv->tms9914_priv, result);
}
-void hp82335_parallel_poll_configure(gpib_board_t *board, uint8_t config)
+static void hp82335_parallel_poll_configure(struct gpib_board *board, uint8_t config)
{
struct hp82335_priv *priv = board->private_data;
tms9914_parallel_poll_configure(board, &priv->tms9914_priv, config);
}
-void hp82335_parallel_poll_response(gpib_board_t *board, int ist)
+static void hp82335_parallel_poll_response(struct gpib_board *board, int ist)
{
struct hp82335_priv *priv = board->private_data;
tms9914_parallel_poll_response(board, &priv->tms9914_priv, ist);
}
-void hp82335_serial_poll_response(gpib_board_t *board, uint8_t status)
+static void hp82335_serial_poll_response(struct gpib_board *board, uint8_t status)
{
struct hp82335_priv *priv = board->private_data;
tms9914_serial_poll_response(board, &priv->tms9914_priv, status);
}
-static uint8_t hp82335_serial_poll_status(gpib_board_t *board)
+static uint8_t hp82335_serial_poll_status(struct gpib_board *board)
{
struct hp82335_priv *priv = board->private_data;
return tms9914_serial_poll_status(board, &priv->tms9914_priv);
}
-static int hp82335_line_status(const gpib_board_t *board)
+static int hp82335_line_status(const struct gpib_board *board)
{
struct hp82335_priv *priv = board->private_data;
return tms9914_line_status(board, &priv->tms9914_priv);
}
-static unsigned int hp82335_t1_delay(gpib_board_t *board, unsigned int nano_sec)
+static int hp82335_t1_delay(struct gpib_board *board, unsigned int nano_sec)
{
struct hp82335_priv *priv = board->private_data;
return tms9914_t1_delay(board, &priv->tms9914_priv, nano_sec);
}
-void hp82335_return_to_local(gpib_board_t *board)
+static void hp82335_return_to_local(struct gpib_board *board)
{
struct hp82335_priv *priv = board->private_data;
@@ -201,7 +207,7 @@ static gpib_interface_t hp82335_interface = {
.return_to_local = hp82335_return_to_local,
};
-int hp82335_allocate_private(gpib_board_t *board)
+static int hp82335_allocate_private(struct gpib_board *board)
{
board->private_data = kzalloc(sizeof(struct hp82335_priv), GFP_KERNEL);
if (!board->private_data)
@@ -209,7 +215,7 @@ int hp82335_allocate_private(gpib_board_t *board)
return 0;
}
-void hp82335_free_private(gpib_board_t *board)
+static void hp82335_free_private(struct gpib_board *board)
{
kfree(board->private_data);
board->private_data = NULL;
@@ -237,7 +243,7 @@ static void hp82335_clear_interrupt(struct hp82335_priv *hp_priv)
writeb(0, tms_priv->mmiobase + HPREG_INTR_CLEAR);
}
-int hp82335_attach(gpib_board_t *board, const gpib_board_config_t *config)
+static int hp82335_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
struct hp82335_priv *hp_priv;
struct tms9914_priv *tms_priv;
@@ -272,26 +278,23 @@ int hp82335_attach(gpib_board_t *board, const gpib_board_config_t *config)
case 0xfc000:
break;
default:
- pr_err("hp82335: invalid base io address 0x%u\n", config->ibbase);
+ dev_err(board->gpib_dev, "invalid base io address 0x%x\n", config->ibbase);
return -EINVAL;
}
if (!request_mem_region(upper_iomem_base, hp82335_upper_iomem_size, "hp82335")) {
- pr_err("hp82335: failed to allocate io memory region 0x%lx-0x%lx\n",
- upper_iomem_base, upper_iomem_base + hp82335_upper_iomem_size - 1);
+ dev_err(board->gpib_dev, "failed to allocate io memory region 0x%lx-0x%lx\n",
+ upper_iomem_base, upper_iomem_base + hp82335_upper_iomem_size - 1);
return -EBUSY;
}
hp_priv->raw_iobase = upper_iomem_base;
tms_priv->mmiobase = ioremap(upper_iomem_base, hp82335_upper_iomem_size);
- pr_info("hp82335: upper half of 82335 iomem region 0x%lx remapped to 0x%p\n",
- hp_priv->raw_iobase, tms_priv->mmiobase);
- retval = request_irq(config->ibirq, hp82335_interrupt, 0, "hp82335", board);
+ retval = request_irq(config->ibirq, hp82335_interrupt, 0, DRV_NAME, board);
if (retval) {
- pr_err("hp82335: can't request IRQ %d\n", config->ibirq);
+ dev_err(board->gpib_dev, "can't request IRQ %d\n", config->ibirq);
return retval;
}
hp_priv->irq = config->ibirq;
- pr_info("hp82335: IRQ %d\n", config->ibirq);
tms9914_board_reset(tms_priv);
@@ -304,7 +307,7 @@ int hp82335_attach(gpib_board_t *board, const gpib_board_config_t *config)
return 0;
}
-void hp82335_detach(gpib_board_t *board)
+static void hp82335_detach(struct gpib_board *board)
{
struct hp82335_priv *hp_priv = board->private_data;
struct tms9914_priv *tms_priv;
@@ -329,7 +332,7 @@ static int __init hp82335_init_module(void)
int result = gpib_register_driver(&hp82335_interface, THIS_MODULE);
if (result) {
- pr_err("hp82335: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
return result;
}
@@ -348,10 +351,10 @@ module_exit(hp82335_exit_module);
* GPIB interrupt service routines
*/
-irqreturn_t hp82335_interrupt(int irq, void *arg)
+static irqreturn_t hp82335_interrupt(int irq, void *arg)
{
int status1, status2;
- gpib_board_t *board = arg;
+ struct gpib_board *board = arg;
struct hp82335_priv *priv = board->private_data;
unsigned long flags;
irqreturn_t retval;
diff --git a/drivers/staging/gpib/hp_82335/hp82335.h b/drivers/staging/gpib/hp_82335/hp82335.h
index 4b185d7c5188..0c252a712ec9 100644
--- a/drivers/staging/gpib/hp_82335/hp82335.h
+++ b/drivers/staging/gpib/hp_82335/hp82335.h
@@ -17,36 +17,6 @@ struct hp82335_priv {
unsigned long raw_iobase;
};
-// interface functions
-int hp82335_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end, size_t *bytes_read);
-int hp82335_write(gpib_board_t *board, uint8_t *buffer, size_t length,
- int send_eoi, size_t *bytes_written);
-int hp82335_command(gpib_board_t *board, uint8_t *buffer, size_t length, size_t *bytes_written);
-int hp82335_take_control(gpib_board_t *board, int synchronous);
-int hp82335_go_to_standby(gpib_board_t *board);
-void hp82335_request_system_control(gpib_board_t *board, int request_control);
-void hp82335_interface_clear(gpib_board_t *board, int assert);
-void hp82335_remote_enable(gpib_board_t *board, int enable);
-int hp82335_enable_eos(gpib_board_t *board, uint8_t eos_byte, int
- compare_8_bits);
-void hp82335_disable_eos(gpib_board_t *board);
-unsigned int hp82335_update_status(gpib_board_t *board, unsigned int clear_mask);
-int hp82335_primary_address(gpib_board_t *board, unsigned int address);
-int hp82335_secondary_address(gpib_board_t *board, unsigned int address, int
- enable);
-int hp82335_parallel_poll(gpib_board_t *board, uint8_t *result);
-void hp82335_parallel_poll_configure(gpib_board_t *board, uint8_t config);
-void hp82335_parallel_poll_response(gpib_board_t *board, int ist);
-void hp82335_serial_poll_response(gpib_board_t *board, uint8_t status);
-void hp82335_return_to_local(gpib_board_t *board);
-
-// interrupt service routines
-irqreturn_t hp82335_interrupt(int irq, void *arg);
-
-// utility functions
-int hp82335_allocate_private(gpib_board_t *board);
-void hp82335_free_private(gpib_board_t *board);
-
// size of io memory region used
static const int hp82335_rom_size = 0x2000;
static const int hp82335_upper_iomem_size = 0x2000;
diff --git a/drivers/staging/gpib/hp_82341/hp_82341.c b/drivers/staging/gpib/hp_82341/hp_82341.c
index 0ddae295912f..f52e673dc869 100644
--- a/drivers/staging/gpib/hp_82341/hp_82341.c
+++ b/drivers/staging/gpib/hp_82341/hp_82341.c
@@ -6,6 +6,10 @@
* copyright : (C) 2002, 2005 by Frank Mori Hess *
***************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define dev_fmt pr_fmt
+#define DRV_NAME KBUILD_MODNAME
+
#include "hp_82341.h"
#include <linux/delay.h>
#include <linux/ioport.h>
@@ -16,9 +20,17 @@
#include <linux/isapnp.h>
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("GPIB driver for hp 82341a/b/c/d boards");
+
+static unsigned short read_and_clear_event_status(struct gpib_board *board);
+static void set_transfer_counter(struct hp_82341_priv *hp_priv, int count);
+static int read_transfer_counter(struct hp_82341_priv *hp_priv);
+static int hp_82341_write(struct gpib_board *board, uint8_t *buffer, size_t length, int send_eoi,
+ size_t *bytes_written);
+static irqreturn_t hp_82341_interrupt(int irq, void *arg);
-int hp_82341_accel_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end,
- size_t *bytes_read)
+static int hp_82341_accel_read(struct gpib_board *board, uint8_t *buffer, size_t length, int *end,
+ size_t *bytes_read)
{
struct hp_82341_priv *hp_priv = board->private_data;
struct tms9914_priv *tms_priv = &hp_priv->tms9914_priv;
@@ -50,7 +62,7 @@ int hp_82341_accel_read(gpib_board_t *board, uint8_t *buffer, size_t length, int
retval = tms9914_read(board, tms_priv, buffer, 1, end, &num_bytes);
*bytes_read += num_bytes;
if (retval < 0)
- pr_err("tms9914_read failed retval=%i\n", retval);
+ dev_err(board->gpib_dev, "tms9914_read failed retval=%i\n", retval);
if (retval < 0 || *end)
return retval;
++buffer;
@@ -86,7 +98,6 @@ int hp_82341_accel_read(gpib_board_t *board, uint8_t *buffer, size_t length, int
test_bit(DEV_CLEAR_BN, &tms_priv->state) ||
test_bit(TIMO_NUM, &board->status));
if (retval) {
- pr_warn("%s: read wait interrupted\n", __func__);
retval = -ERESTARTSYS;
break;
}
@@ -111,12 +122,10 @@ int hp_82341_accel_read(gpib_board_t *board, uint8_t *buffer, size_t length, int
tms_priv->holdoff_active = 1;
}
if (test_bit(TIMO_NUM, &board->status)) {
- pr_debug("%s: minor %i: read timed out\n", __FILE__, board->minor);
retval = -ETIMEDOUT;
break;
}
if (test_bit(DEV_CLEAR_BN, &tms_priv->state)) {
- pr_warn("%s: device clear interrupted read\n", __FILE__);
retval = -EINTR;
break;
}
@@ -138,7 +147,7 @@ int hp_82341_accel_read(gpib_board_t *board, uint8_t *buffer, size_t length, int
return 0;
}
-static int restart_write_fifo(gpib_board_t *board, struct hp_82341_priv *hp_priv)
+static int restart_write_fifo(struct gpib_board *board, struct hp_82341_priv *hp_priv)
{
struct tms9914_priv *tms_priv = &hp_priv->tms9914_priv;
@@ -149,7 +158,7 @@ static int restart_write_fifo(gpib_board_t *board, struct hp_82341_priv *hp_priv
//restart doesn't work if data holdoff is in effect
status = tms9914_line_status(board, tms_priv);
- if ((status & BusNRFD) == 0) {
+ if ((status & BUS_NRFD) == 0) {
outb(RESTART_STREAM_BIT, hp_priv->iobase[0] + STREAM_STATUS_REG);
return 0;
}
@@ -163,8 +172,8 @@ static int restart_write_fifo(gpib_board_t *board, struct hp_82341_priv *hp_priv
return 0;
}
-int hp_82341_accel_write(gpib_board_t *board, uint8_t *buffer, size_t length,
- int send_eoi, size_t *bytes_written)
+static int hp_82341_accel_write(struct gpib_board *board, uint8_t *buffer, size_t length,
+ int send_eoi, size_t *bytes_written)
{
struct hp_82341_priv *hp_priv = board->private_data;
struct tms9914_priv *tms_priv = &hp_priv->tms9914_priv;
@@ -204,7 +213,7 @@ int hp_82341_accel_write(gpib_board_t *board, uint8_t *buffer, size_t length,
outb(ENABLE_TI_BUFFER_BIT, hp_priv->iobase[3] + BUFFER_CONTROL_REG);
retval = restart_write_fifo(board, hp_priv);
if (retval < 0) {
- pr_err("hp82341: failed to restart write stream\n");
+ dev_err(board->gpib_dev, "failed to restart write stream\n");
break;
}
retval = wait_event_interruptible(board->wait,
@@ -216,17 +225,14 @@ int hp_82341_accel_write(gpib_board_t *board, uint8_t *buffer, size_t length,
outb(0, hp_priv->iobase[3] + BUFFER_CONTROL_REG);
*bytes_written += block_size - read_transfer_counter(hp_priv);
if (retval) {
- pr_warn("%s: write wait interrupted\n", __FILE__);
retval = -ERESTARTSYS;
break;
}
if (test_bit(TIMO_NUM, &board->status)) {
- pr_debug("%s: minor %i: write timed out\n", __FILE__, board->minor);
retval = -ETIMEDOUT;
break;
}
if (test_bit(DEV_CLEAR_BN, &tms_priv->state)) {
- pr_warn("%s: device clear interrupted write\n", __FILE__);
retval = -EINTR;
break;
}
@@ -244,48 +250,50 @@ int hp_82341_accel_write(gpib_board_t *board, uint8_t *buffer, size_t length,
return 0;
}
-static int hp_82341_attach(gpib_board_t *board, const gpib_board_config_t *config);
+static int hp_82341_attach(struct gpib_board *board, const gpib_board_config_t *config);
-static void hp_82341_detach(gpib_board_t *board);
+static void hp_82341_detach(struct gpib_board *board);
// wrappers for interface functions
-int hp_82341_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end, size_t *bytes_read)
+static int hp_82341_read(struct gpib_board *board, uint8_t *buffer, size_t length, int *end,
+ size_t *bytes_read)
{
struct hp_82341_priv *priv = board->private_data;
return tms9914_read(board, &priv->tms9914_priv, buffer, length, end, bytes_read);
}
-int hp_82341_write(gpib_board_t *board, uint8_t *buffer, size_t length, int send_eoi,
- size_t *bytes_written)
+static int hp_82341_write(struct gpib_board *board, uint8_t *buffer, size_t length, int send_eoi,
+ size_t *bytes_written)
{
struct hp_82341_priv *priv = board->private_data;
return tms9914_write(board, &priv->tms9914_priv, buffer, length, send_eoi, bytes_written);
}
-int hp_82341_command(gpib_board_t *board, uint8_t *buffer, size_t length, size_t *bytes_written)
+static int hp_82341_command(struct gpib_board *board, uint8_t *buffer, size_t length,
+ size_t *bytes_written)
{
struct hp_82341_priv *priv = board->private_data;
return tms9914_command(board, &priv->tms9914_priv, buffer, length, bytes_written);
}
-int hp_82341_take_control(gpib_board_t *board, int synchronous)
+static int hp_82341_take_control(struct gpib_board *board, int synchronous)
{
struct hp_82341_priv *priv = board->private_data;
return tms9914_take_control(board, &priv->tms9914_priv, synchronous);
}
-int hp_82341_go_to_standby(gpib_board_t *board)
+static int hp_82341_go_to_standby(struct gpib_board *board)
{
struct hp_82341_priv *priv = board->private_data;
return tms9914_go_to_standby(board, &priv->tms9914_priv);
}
-void hp_82341_request_system_control(gpib_board_t *board, int request_control)
+static void hp_82341_request_system_control(struct gpib_board *board, int request_control)
{
struct hp_82341_priv *priv = board->private_data;
@@ -297,105 +305,105 @@ void hp_82341_request_system_control(gpib_board_t *board, int request_control)
tms9914_request_system_control(board, &priv->tms9914_priv, request_control);
}
-void hp_82341_interface_clear(gpib_board_t *board, int assert)
+static void hp_82341_interface_clear(struct gpib_board *board, int assert)
{
struct hp_82341_priv *priv = board->private_data;
tms9914_interface_clear(board, &priv->tms9914_priv, assert);
}
-void hp_82341_remote_enable(gpib_board_t *board, int enable)
+static void hp_82341_remote_enable(struct gpib_board *board, int enable)
{
struct hp_82341_priv *priv = board->private_data;
tms9914_remote_enable(board, &priv->tms9914_priv, enable);
}
-int hp_82341_enable_eos(gpib_board_t *board, uint8_t eos_byte, int compare_8_bits)
+static int hp_82341_enable_eos(struct gpib_board *board, uint8_t eos_byte, int compare_8_bits)
{
struct hp_82341_priv *priv = board->private_data;
return tms9914_enable_eos(board, &priv->tms9914_priv, eos_byte, compare_8_bits);
}
-void hp_82341_disable_eos(gpib_board_t *board)
+static void hp_82341_disable_eos(struct gpib_board *board)
{
struct hp_82341_priv *priv = board->private_data;
tms9914_disable_eos(board, &priv->tms9914_priv);
}
-unsigned int hp_82341_update_status(gpib_board_t *board, unsigned int clear_mask)
+static unsigned int hp_82341_update_status(struct gpib_board *board, unsigned int clear_mask)
{
struct hp_82341_priv *priv = board->private_data;
return tms9914_update_status(board, &priv->tms9914_priv, clear_mask);
}
-int hp_82341_primary_address(gpib_board_t *board, unsigned int address)
+static int hp_82341_primary_address(struct gpib_board *board, unsigned int address)
{
struct hp_82341_priv *priv = board->private_data;
return tms9914_primary_address(board, &priv->tms9914_priv, address);
}
-int hp_82341_secondary_address(gpib_board_t *board, unsigned int address, int enable)
+static int hp_82341_secondary_address(struct gpib_board *board, unsigned int address, int enable)
{
struct hp_82341_priv *priv = board->private_data;
return tms9914_secondary_address(board, &priv->tms9914_priv, address, enable);
}
-int hp_82341_parallel_poll(gpib_board_t *board, uint8_t *result)
+static int hp_82341_parallel_poll(struct gpib_board *board, uint8_t *result)
{
struct hp_82341_priv *priv = board->private_data;
return tms9914_parallel_poll(board, &priv->tms9914_priv, result);
}
-void hp_82341_parallel_poll_configure(gpib_board_t *board, uint8_t config)
+static void hp_82341_parallel_poll_configure(struct gpib_board *board, uint8_t config)
{
struct hp_82341_priv *priv = board->private_data;
tms9914_parallel_poll_configure(board, &priv->tms9914_priv, config);
}
-void hp_82341_parallel_poll_response(gpib_board_t *board, int ist)
+static void hp_82341_parallel_poll_response(struct gpib_board *board, int ist)
{
struct hp_82341_priv *priv = board->private_data;
tms9914_parallel_poll_response(board, &priv->tms9914_priv, ist);
}
-void hp_82341_serial_poll_response(gpib_board_t *board, uint8_t status)
+static void hp_82341_serial_poll_response(struct gpib_board *board, uint8_t status)
{
struct hp_82341_priv *priv = board->private_data;
tms9914_serial_poll_response(board, &priv->tms9914_priv, status);
}
-static uint8_t hp_82341_serial_poll_status(gpib_board_t *board)
+static uint8_t hp_82341_serial_poll_status(struct gpib_board *board)
{
struct hp_82341_priv *priv = board->private_data;
return tms9914_serial_poll_status(board, &priv->tms9914_priv);
}
-static int hp_82341_line_status(const gpib_board_t *board)
+static int hp_82341_line_status(const struct gpib_board *board)
{
struct hp_82341_priv *priv = board->private_data;
return tms9914_line_status(board, &priv->tms9914_priv);
}
-static unsigned int hp_82341_t1_delay(gpib_board_t *board, unsigned int nano_sec)
+static int hp_82341_t1_delay(struct gpib_board *board, unsigned int nano_sec)
{
struct hp_82341_priv *priv = board->private_data;
return tms9914_t1_delay(board, &priv->tms9914_priv, nano_sec);
}
-void hp_82341_return_to_local(gpib_board_t *board)
+static void hp_82341_return_to_local(struct gpib_board *board)
{
struct hp_82341_priv *priv = board->private_data;
@@ -457,7 +465,7 @@ static gpib_interface_t hp_82341_interface = {
.return_to_local = hp_82341_return_to_local,
};
-int hp_82341_allocate_private(gpib_board_t *board)
+static int hp_82341_allocate_private(struct gpib_board *board)
{
board->private_data = kzalloc(sizeof(struct hp_82341_priv), GFP_KERNEL);
if (!board->private_data)
@@ -465,7 +473,7 @@ int hp_82341_allocate_private(gpib_board_t *board)
return 0;
}
-void hp_82341_free_private(gpib_board_t *board)
+static void hp_82341_free_private(struct gpib_board *board)
{
kfree(board->private_data);
board->private_data = NULL;
@@ -486,21 +494,21 @@ static int hp_82341_find_isapnp_board(struct pnp_dev **dev)
*dev = pnp_find_dev(NULL, ISAPNP_VENDOR('H', 'W', 'P'),
ISAPNP_FUNCTION(0x1411), NULL);
if (!*dev || !(*dev)->card) {
- pr_err("hp_82341: failed to find isapnp board\n");
+ pr_err("failed to find isapnp board\n");
return -ENODEV;
}
if (pnp_device_attach(*dev) < 0) {
- pr_err("hp_82341: board already active, skipping\n");
+ pr_err("board already active, skipping\n");
return -EBUSY;
}
if (pnp_activate_dev(*dev) < 0) {
pnp_device_detach(*dev);
- pr_err("hp_82341: failed to activate() atgpib/tnt, aborting\n");
+ pr_err("failed to activate(), aborting\n");
return -EAGAIN;
}
if (!pnp_port_valid(*dev, 0) || !pnp_irq_valid(*dev, 0)) {
pnp_device_detach(*dev);
- pr_err("hp_82341: invalid port or irq for atgpib/tnt, aborting\n");
+ pr_err("invalid port or irq, aborting\n");
return -ENOMEM;
}
return 0;
@@ -521,7 +529,7 @@ static int xilinx_ready(struct hp_82341_priv *hp_priv)
else
return 0;
default:
- pr_err("hp_82341: %s: bug! unknown hw_version\n", __func__);
+ pr_err("bug! unknown hw_version\n");
break;
}
return 0;
@@ -541,7 +549,7 @@ static int xilinx_done(struct hp_82341_priv *hp_priv)
else
return 0;
default:
- pr_err("hp_82341: %s: bug! unknown hw_version\n", __func__);
+ pr_err("bug! unknown hw_version\n");
break;
}
return 0;
@@ -562,7 +570,7 @@ static int irq_valid(struct hp_82341_priv *hp_priv, int irq)
case 15:
return 1;
default:
- pr_err("hp_82341: invalid irq=%i for 82341C, irq must be 3, 5, 7, 9, 10, 11, 12, or 15.\n",
+ pr_err("invalid irq=%i for 82341C, irq must be 3, 5, 7, 9, 10, 11, 12, or 15.\n",
irq);
return 0;
}
@@ -570,7 +578,7 @@ static int irq_valid(struct hp_82341_priv *hp_priv, int irq)
case HW_VERSION_82341D:
return 1;
default:
- pr_err("hp_82341: %s: bug! unknown hw_version\n", __func__);
+ pr_err("bug! unknown hw_version\n");
break;
}
return 0;
@@ -592,7 +600,7 @@ static int hp_82341_load_firmware_array(struct hp_82341_priv *hp_priv,
usleep_range(10, 15);
}
if (j == timeout) {
- pr_err("hp_82341: timed out waiting for Xilinx ready.\n");
+ pr_err("timed out waiting for Xilinx ready.\n");
return -ETIMEDOUT;
}
outb(firmware_data[i], hp_priv->iobase[0] + XILINX_DATA_REG);
@@ -605,7 +613,7 @@ static int hp_82341_load_firmware_array(struct hp_82341_priv *hp_priv,
usleep_range(10, 15);
}
if (j == timeout) {
- pr_err("hp_82341: timed out waiting for Xilinx done.\n");
+ pr_err("timed out waiting for Xilinx done.\n");
return -ETIMEDOUT;
}
return 0;
@@ -616,27 +624,27 @@ static int hp_82341_load_firmware(struct hp_82341_priv *hp_priv, const gpib_boar
if (config->init_data_length == 0) {
if (xilinx_done(hp_priv))
return 0;
- pr_err("hp_82341: board needs be initialized with firmware upload.\n"
+ pr_err("board needs be initialized with firmware upload.\n"
"\tUse the --init-data option of gpib_config.\n");
return -EINVAL;
}
switch (hp_priv->hw_version) {
case HW_VERSION_82341C:
if (config->init_data_length != hp_82341c_firmware_length) {
- pr_err("hp_82341: bad firmware length=%i for 82341c (expected %i).\n",
+ pr_err("bad firmware length=%i for 82341c (expected %i).\n",
config->init_data_length, hp_82341c_firmware_length);
return -EINVAL;
}
break;
case HW_VERSION_82341D:
if (config->init_data_length != hp_82341d_firmware_length) {
- pr_err("hp_82341: bad firmware length=%i for 82341d (expected %i).\n",
+ pr_err("bad firmware length=%i for 82341d (expected %i).\n",
config->init_data_length, hp_82341d_firmware_length);
return -EINVAL;
}
break;
default:
- pr_err("hp_82341: %s: bug! unknown hw_version\n", __func__);
+ pr_err("bug! unknown hw_version\n");
break;
}
return hp_82341_load_firmware_array(hp_priv, config->init_data, config->init_data_length);
@@ -678,7 +686,7 @@ static int clear_xilinx(struct hp_82341_priv *hp_priv)
return 0;
}
-int hp_82341_attach(gpib_board_t *board, const gpib_board_config_t *config)
+static int hp_82341_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
struct hp_82341_priv *hp_priv;
struct tms9914_priv *tms_priv;
@@ -714,13 +722,12 @@ int hp_82341_attach(gpib_board_t *board, const gpib_board_config_t *config)
hp_priv->hw_version = HW_VERSION_82341C;
hp_priv->io_region_offset = 0x400;
}
- pr_info("hp_82341: base io 0x%u\n", iobase);
for (i = 0; i < hp_82341_num_io_regions; ++i) {
start_addr = iobase + i * hp_priv->io_region_offset;
- if (!request_region(start_addr, hp_82341_region_iosize, "hp_82341")) {
- pr_err("hp_82341: failed to allocate io ports 0x%lx-0x%lx\n",
- start_addr,
- start_addr + hp_82341_region_iosize - 1);
+ if (!request_region(start_addr, hp_82341_region_iosize, DRV_NAME)) {
+ dev_err(board->gpib_dev, "failed to allocate io ports 0x%x-0x%x\n",
+ start_addr,
+ start_addr + hp_82341_region_iosize - 1);
return -EIO;
}
hp_priv->iobase[i] = start_addr;
@@ -730,7 +737,7 @@ int hp_82341_attach(gpib_board_t *board, const gpib_board_config_t *config)
retval = isapnp_cfg_begin(hp_priv->pnp_dev->card->number,
hp_priv->pnp_dev->number);
if (retval < 0) {
- pr_err("hp_82341: isapnp_cfg_begin returned error\n");
+ dev_err(board->gpib_dev, "isapnp_cfg_begin returned error\n");
return retval;
}
isapnp_write_byte(PIO_DIRECTION_REG, HP_82341D_XILINX_READY_BIT |
@@ -746,12 +753,11 @@ int hp_82341_attach(gpib_board_t *board, const gpib_board_config_t *config)
return retval;
if (irq_valid(hp_priv, irq) == 0)
return -EINVAL;
- if (request_irq(irq, hp_82341_interrupt, 0, "hp_82341", board)) {
- pr_err("hp_82341: failed to allocate IRQ %d\n", irq);
+ if (request_irq(irq, hp_82341_interrupt, 0, DRV_NAME, board)) {
+ dev_err(board->gpib_dev, "failed to allocate IRQ %d\n", irq);
return -EIO;
}
hp_priv->irq = irq;
- pr_info("hp_82341: IRQ %d\n", irq);
hp_priv->config_control_bits &= ~IRQ_SELECT_MASK;
hp_priv->config_control_bits |= IRQ_SELECT_BITS(irq);
outb(hp_priv->config_control_bits, hp_priv->iobase[0] + CONFIG_CONTROL_STATUS_REG);
@@ -768,13 +774,11 @@ int hp_82341_attach(gpib_board_t *board, const gpib_board_config_t *config)
hp_priv->iobase[0] + EVENT_STATUS_REG);
tms9914_online(board, tms_priv);
- pr_info("hp_82341: board id %x %x %x %x\n", inb(hp_priv->iobase[1] + ID0_REG),
- inb(hp_priv->iobase[1] + ID1_REG), inb(hp_priv->iobase[2] + ID2_REG),
- inb(hp_priv->iobase[2] + ID3_REG));
+
return 0;
}
-void hp_82341_detach(gpib_board_t *board)
+static void hp_82341_detach(struct gpib_board *board)
{
struct hp_82341_priv *hp_priv = board->private_data;
struct tms9914_priv *tms_priv;
@@ -799,11 +803,14 @@ void hp_82341_detach(gpib_board_t *board)
hp_82341_free_private(board);
}
+#if 0
+/* unused, will be needed when the driver is turned into a pnp_driver */
static const struct pnp_device_id hp_82341_pnp_table[] = {
{.id = "HWP1411"},
{.id = ""}
};
MODULE_DEVICE_TABLE(pnp, hp_82341_pnp_table);
+#endif
static int __init hp_82341_init_module(void)
{
@@ -811,13 +818,13 @@ static int __init hp_82341_init_module(void)
ret = gpib_register_driver(&hp_82341_unaccel_interface, THIS_MODULE);
if (ret) {
- pr_err("hp_82341: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
return ret;
}
ret = gpib_register_driver(&hp_82341_interface, THIS_MODULE);
if (ret) {
- pr_err("hp_82341: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
gpib_unregister_driver(&hp_82341_unaccel_interface);
return ret;
}
@@ -837,7 +844,7 @@ module_exit(hp_82341_exit_module);
/*
* GPIB interrupt service routines
*/
-unsigned short read_and_clear_event_status(gpib_board_t *board)
+static unsigned short read_and_clear_event_status(struct gpib_board *board)
{
struct hp_82341_priv *hp_priv = board->private_data;
unsigned long flags;
@@ -850,10 +857,10 @@ unsigned short read_and_clear_event_status(gpib_board_t *board)
return status;
}
-irqreturn_t hp_82341_interrupt(int irq, void *arg)
+static irqreturn_t hp_82341_interrupt(int irq, void *arg)
{
int status1, status2;
- gpib_board_t *board = arg;
+ struct gpib_board *board = arg;
struct hp_82341_priv *hp_priv = board->private_data;
struct tms9914_priv *tms_priv = &hp_priv->tms9914_priv;
unsigned long flags;
@@ -862,7 +869,6 @@ irqreturn_t hp_82341_interrupt(int irq, void *arg)
spin_lock_irqsave(&board->spinlock, flags);
event_status = inb(hp_priv->iobase[0] + EVENT_STATUS_REG);
-// printk("hp_82341: interrupt event_status=0x%x\n", event_status);
if (event_status & INTERRUPT_PENDING_EVENT_BIT)
retval = IRQ_HANDLED;
//write-clear status bits
@@ -877,15 +883,12 @@ irqreturn_t hp_82341_interrupt(int irq, void *arg)
status1 = read_byte(tms_priv, ISR0);
status2 = read_byte(tms_priv, ISR1);
tms9914_interrupt_have_status(board, tms_priv, status1, status2);
-/* printk("hp_82341: interrupt status1=0x%x status2=0x%x\n",
- * status1, status2);
- */
}
spin_unlock_irqrestore(&board->spinlock, flags);
return retval;
}
-int read_transfer_counter(struct hp_82341_priv *hp_priv)
+static int read_transfer_counter(struct hp_82341_priv *hp_priv)
{
int lo, mid, value;
@@ -896,7 +899,7 @@ int read_transfer_counter(struct hp_82341_priv *hp_priv)
return value;
}
-void set_transfer_counter(struct hp_82341_priv *hp_priv, int count)
+static void set_transfer_counter(struct hp_82341_priv *hp_priv, int count)
{
int complement = -count;
diff --git a/drivers/staging/gpib/hp_82341/hp_82341.h b/drivers/staging/gpib/hp_82341/hp_82341.h
index 0065ebd9747c..370a3d4576eb 100644
--- a/drivers/staging/gpib/hp_82341/hp_82341.h
+++ b/drivers/staging/gpib/hp_82341/hp_82341.h
@@ -26,42 +26,6 @@ struct hp_82341_priv {
enum hp_82341_hardware_version hw_version;
};
-
-// interface functions
-int hp_82341_accel_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end,
- size_t *bytes_read);
-int hp_82341_accel_write(gpib_board_t *board, uint8_t *buffer, size_t length, int send_eoi,
- size_t *bytes_written);
-int hp_82341_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end,
- size_t *bytes_read);
-int hp_82341_write(gpib_board_t *board, uint8_t *buffer, size_t length, int send_eoi,
- size_t *bytes_written);
-int hp_82341_command(gpib_board_t *board, uint8_t *buffer, size_t length, size_t *bytes_written);
-int hp_82341_take_control(gpib_board_t *board, int synchronous);
-int hp_82341_go_to_standby(gpib_board_t *board);
-void hp_82341_request_system_control(gpib_board_t *board, int request_control);
-void hp_82341_interface_clear(gpib_board_t *board, int assert);
-void hp_82341_remote_enable(gpib_board_t *board, int enable);
-int hp_82341_enable_eos(gpib_board_t *board, uint8_t eos_byte, int
- compare_8_bits);
-void hp_82341_disable_eos(gpib_board_t *board);
-unsigned int hp_82341_update_status(gpib_board_t *board, unsigned int clear_mask);
-int hp_82341_primary_address(gpib_board_t *board, unsigned int address);
-int hp_82341_secondary_address(gpib_board_t *board, unsigned int address, int
- enable);
-int hp_82341_parallel_poll(gpib_board_t *board, uint8_t *result);
-void hp_82341_parallel_poll_configure(gpib_board_t *board, uint8_t config);
-void hp_82341_parallel_poll_response(gpib_board_t *board, int ist);
-void hp_82341_serial_poll_response(gpib_board_t *board, uint8_t status);
-void hp_82341_return_to_local(gpib_board_t *board);
-
-// interrupt service routines
-irqreturn_t hp_82341_interrupt(int irq, void *arg);
-
-// utility functions
-int hp_82341_allocate_private(gpib_board_t *board);
-void hp_82341_free_private(gpib_board_t *board);
-
static const int hp_82341_region_iosize = 0x8;
static const int hp_82341_num_io_regions = 4;
static const int hp_82341_fifo_size = 0xffe;
@@ -199,7 +163,3 @@ enum hp_82341d_pnp_pio_bits {
HP_82341D_LEGACY_MODE_BIT = 0x4,
HP_82341D_NOT_PROG_BIT = 0x8, // clear to reinitialize xilinx
};
-
-unsigned short read_and_clear_event_status(gpib_board_t *board);
-int read_transfer_counter(struct hp_82341_priv *hp_priv);
-void set_transfer_counter(struct hp_82341_priv *hp_priv, int count);
diff --git a/drivers/staging/gpib/include/gpibP.h b/drivers/staging/gpib/include/gpibP.h
index d35fdd391f7e..0c71a038e444 100644
--- a/drivers/staging/gpib/include/gpibP.h
+++ b/drivers/staging/gpib/include/gpibP.h
@@ -26,13 +26,13 @@ struct pci_dev *gpib_pci_get_subsys(const gpib_board_config_t *config, unsigned
unsigned int device_id, unsigned int ss_vendor,
unsigned int ss_device, struct pci_dev *from);
unsigned int num_gpib_events(const gpib_event_queue_t *queue);
-int push_gpib_event(gpib_board_t *board, short event_type);
-int pop_gpib_event(gpib_board_t *board, gpib_event_queue_t *queue, short *event_type);
-int gpib_request_pseudo_irq(gpib_board_t *board, irqreturn_t (*handler)(int, void *));
-void gpib_free_pseudo_irq(gpib_board_t *board);
+int push_gpib_event(struct gpib_board *board, short event_type);
+int pop_gpib_event(struct gpib_board *board, gpib_event_queue_t *queue, short *event_type);
+int gpib_request_pseudo_irq(struct gpib_board *board, irqreturn_t (*handler)(int, void *));
+void gpib_free_pseudo_irq(struct gpib_board *board);
int gpib_match_device_path(struct device *dev, const char *device_path_in);
-extern gpib_board_t board_array[GPIB_MAX_NUM_BOARDS];
+extern struct gpib_board board_array[GPIB_MAX_NUM_BOARDS];
extern struct list_head registered_drivers;
diff --git a/drivers/staging/gpib/include/gpib_proto.h b/drivers/staging/gpib/include/gpib_proto.h
index 1499f954210b..2c7dfc02f517 100644
--- a/drivers/staging/gpib/include/gpib_proto.h
+++ b/drivers/staging/gpib/include/gpib_proto.h
@@ -10,11 +10,11 @@ int ibclose(struct inode *inode, struct file *file);
long ibioctl(struct file *filep, unsigned int cmd, unsigned long arg);
int osInit(void);
void osReset(void);
-void os_start_timer(gpib_board_t *board, unsigned int usec_timeout);
-void os_remove_timer(gpib_board_t *board);
+void os_start_timer(struct gpib_board *board, unsigned int usec_timeout);
+void os_remove_timer(struct gpib_board *board);
void osSendEOI(void);
void osSendEOI(void);
-void init_gpib_board(gpib_board_t *board);
+void init_gpib_board(struct gpib_board *board);
static inline unsigned long usec_to_jiffies(unsigned int usec)
{
unsigned long usec_per_jiffy = 1000000 / HZ;
@@ -22,35 +22,35 @@ static inline unsigned long usec_to_jiffies(unsigned int usec)
return 1 + (usec + usec_per_jiffy - 1) / usec_per_jiffy;
};
-int serial_poll_all(gpib_board_t *board, unsigned int usec_timeout);
+int serial_poll_all(struct gpib_board *board, unsigned int usec_timeout);
void init_gpib_descriptor(gpib_descriptor_t *desc);
-int dvrsp(gpib_board_t *board, unsigned int pad, int sad,
+int dvrsp(struct gpib_board *board, unsigned int pad, int sad,
unsigned int usec_timeout, uint8_t *result);
-int ibAPWait(gpib_board_t *board, int pad);
-int ibAPrsp(gpib_board_t *board, int padsad, char *spb);
-void ibAPE(gpib_board_t *board, int pad, int v);
-int ibcac(gpib_board_t *board, int sync, int fallback_to_async);
-int ibcmd(gpib_board_t *board, uint8_t *buf, size_t length, size_t *bytes_written);
-int ibgts(gpib_board_t *board);
-int ibonline(gpib_board_t *board);
-int iboffline(gpib_board_t *board);
-int iblines(const gpib_board_t *board, short *lines);
-int ibrd(gpib_board_t *board, uint8_t *buf, size_t length, int *end_flag, size_t *bytes_read);
-int ibrpp(gpib_board_t *board, uint8_t *buf);
-int ibrsv2(gpib_board_t *board, uint8_t status_byte, int new_reason_for_service);
-void ibrsc(gpib_board_t *board, int request_control);
-int ibsic(gpib_board_t *board, unsigned int usec_duration);
-int ibsre(gpib_board_t *board, int enable);
-int ibpad(gpib_board_t *board, unsigned int addr);
-int ibsad(gpib_board_t *board, int addr);
-int ibeos(gpib_board_t *board, int eos, int eosflags);
-int ibwait(gpib_board_t *board, int wait_mask, int clear_mask, int set_mask,
+int ibAPWait(struct gpib_board *board, int pad);
+int ibAPrsp(struct gpib_board *board, int padsad, char *spb);
+void ibAPE(struct gpib_board *board, int pad, int v);
+int ibcac(struct gpib_board *board, int sync, int fallback_to_async);
+int ibcmd(struct gpib_board *board, uint8_t *buf, size_t length, size_t *bytes_written);
+int ibgts(struct gpib_board *board);
+int ibonline(struct gpib_board *board);
+int iboffline(struct gpib_board *board);
+int iblines(const struct gpib_board *board, short *lines);
+int ibrd(struct gpib_board *board, uint8_t *buf, size_t length, int *end_flag, size_t *bytes_read);
+int ibrpp(struct gpib_board *board, uint8_t *buf);
+int ibrsv2(struct gpib_board *board, uint8_t status_byte, int new_reason_for_service);
+void ibrsc(struct gpib_board *board, int request_control);
+int ibsic(struct gpib_board *board, unsigned int usec_duration);
+int ibsre(struct gpib_board *board, int enable);
+int ibpad(struct gpib_board *board, unsigned int addr);
+int ibsad(struct gpib_board *board, int addr);
+int ibeos(struct gpib_board *board, int eos, int eosflags);
+int ibwait(struct gpib_board *board, int wait_mask, int clear_mask, int set_mask,
int *status, unsigned long usec_timeout, gpib_descriptor_t *desc);
-int ibwrt(gpib_board_t *board, uint8_t *buf, size_t cnt, int send_eoi, size_t *bytes_written);
-int ibstatus(gpib_board_t *board);
-int general_ibstatus(gpib_board_t *board, const gpib_status_queue_t *device,
+int ibwrt(struct gpib_board *board, uint8_t *buf, size_t cnt, int send_eoi, size_t *bytes_written);
+int ibstatus(struct gpib_board *board);
+int general_ibstatus(struct gpib_board *board, const gpib_status_queue_t *device,
int clear_mask, int set_mask, gpib_descriptor_t *desc);
-int io_timed_out(gpib_board_t *board);
-int ibppc(gpib_board_t *board, uint8_t configuration);
+int io_timed_out(struct gpib_board *board);
+int ibppc(struct gpib_board *board, uint8_t configuration);
#endif /* GPIB_PROTO_INCLUDED */
diff --git a/drivers/staging/gpib/include/gpib_types.h b/drivers/staging/gpib/include/gpib_types.h
index b41781a55a60..2d9b9be683f8 100644
--- a/drivers/staging/gpib/include/gpib_types.h
+++ b/drivers/staging/gpib/include/gpib_types.h
@@ -23,7 +23,7 @@
#include <linux/interrupt.h>
typedef struct gpib_interface_struct gpib_interface_t;
-typedef struct gpib_board_struct gpib_board_t;
+struct gpib_board;
/* config parameters that are only used by driver attach functions */
typedef struct {
@@ -55,9 +55,9 @@ struct gpib_interface_struct {
/* name of board */
char *name;
/* attach() initializes board and allocates resources */
- int (*attach)(gpib_board_t *board, const gpib_board_config_t *config);
+ int (*attach)(struct gpib_board *board, const gpib_board_config_t *config);
/* detach() shuts down board and frees resources */
- void (*detach)(gpib_board_t *board);
+ void (*detach)(struct gpib_board *board);
/* read() should read at most 'length' bytes from the bus into
* 'buffer'. It should return when it fills the buffer or
* encounters an END (EOI and or EOS if appropriate). It should set 'end'
@@ -68,19 +68,19 @@ struct gpib_interface_struct {
* return indicates error.
* nbytes returns number of bytes read
*/
- int (*read)(gpib_board_t *board, uint8_t *buffer, size_t length, int *end,
+ int (*read)(struct gpib_board *board, uint8_t *buffer, size_t length, int *end,
size_t *bytes_read);
/* write() should write 'length' bytes from buffer to the bus.
* If the boolean value send_eoi is nonzero, then EOI should
* be sent along with the last byte. Returns number of bytes
* written or negative value on error.
*/
- int (*write)(gpib_board_t *board, uint8_t *buffer, size_t length, int send_eoi,
+ int (*write)(struct gpib_board *board, uint8_t *buffer, size_t length, int send_eoi,
size_t *bytes_written);
/* command() writes the command bytes in 'buffer' to the bus
* Returns zero on success or negative value on error.
*/
- int (*command)(gpib_board_t *board, uint8_t *buffer, size_t length,
+ int (*command)(struct gpib_board *board, uint8_t *buffer, size_t length,
size_t *bytes_written);
/* Take control (assert ATN). If 'asyncronous' is nonzero, take
* control asyncronously (assert ATN immediately without waiting
@@ -88,54 +88,54 @@ struct gpib_interface_struct {
* until board becomes controller in charge. Returns zero no success,
* nonzero on error.
*/
- int (*take_control)(gpib_board_t *board, int asyncronous);
+ int (*take_control)(struct gpib_board *board, int asyncronous);
/* De-assert ATN. Returns zero on success, nonzer on error.
*/
- int (*go_to_standby)(gpib_board_t *board);
+ int (*go_to_standby)(struct gpib_board *board);
/* request/release control of the IFC and REN lines (system controller) */
- void (*request_system_control)(gpib_board_t *board, int request_control);
+ void (*request_system_control)(struct gpib_board *board, int request_control);
/* Asserts or de-asserts 'interface clear' (IFC) depending on
* boolean value of 'assert'
*/
- void (*interface_clear)(gpib_board_t *board, int assert);
+ void (*interface_clear)(struct gpib_board *board, int assert);
/* Sends remote enable command if 'enable' is nonzero, disables remote mode
* if 'enable' is zero
*/
- void (*remote_enable)(gpib_board_t *board, int enable);
+ void (*remote_enable)(struct gpib_board *board, int enable);
/* enable END for reads, when byte 'eos' is received. If
* 'compare_8_bits' is nonzero, then all 8 bits are compared
* with the eos bytes. Otherwise only the 7 least significant
* bits are compared.
*/
- int (*enable_eos)(gpib_board_t *board, uint8_t eos, int compare_8_bits);
+ int (*enable_eos)(struct gpib_board *board, uint8_t eos, int compare_8_bits);
/* disable END on eos byte (END on EOI only)*/
- void (*disable_eos)(gpib_board_t *board);
+ void (*disable_eos)(struct gpib_board *board);
/* configure parallel poll */
- void (*parallel_poll_configure)(gpib_board_t *board, uint8_t configuration);
+ void (*parallel_poll_configure)(struct gpib_board *board, uint8_t configuration);
/* conduct parallel poll */
- int (*parallel_poll)(gpib_board_t *board, uint8_t *result);
+ int (*parallel_poll)(struct gpib_board *board, uint8_t *result);
/* set/clear ist (individual status bit) */
- void (*parallel_poll_response)(gpib_board_t *board, int ist);
+ void (*parallel_poll_response)(struct gpib_board *board, int ist);
/* select local parallel poll configuration mode PP2 versus remote PP1 */
- void (*local_parallel_poll_mode)(gpib_board_t *board, int local);
+ void (*local_parallel_poll_mode)(struct gpib_board *board, int local);
/* Returns current status of the bus lines. Should be set to
* NULL if your board does not have the ability to query the
* state of the bus lines.
*/
- int (*line_status)(const gpib_board_t *board);
+ int (*line_status)(const struct gpib_board *board);
/* updates and returns the board's current status.
* The meaning of the bits are specified in gpib_user.h
* in the IBSTA section. The driver does not need to
* worry about setting the CMPL, END, TIMO, or ERR bits.
*/
- unsigned int (*update_status)(gpib_board_t *board, unsigned int clear_mask);
+ unsigned int (*update_status)(struct gpib_board *board, unsigned int clear_mask);
/* Sets primary address 0-30 for gpib interface card.
*/
- int (*primary_address)(gpib_board_t *board, unsigned int address);
+ int (*primary_address)(struct gpib_board *board, unsigned int address);
/* Sets and enables, or disables secondary address 0-30
* for gpib interface card.
*/
- int (*secondary_address)(gpib_board_t *board, unsigned int address,
+ int (*secondary_address)(struct gpib_board *board, unsigned int address,
int enable);
/* Sets the byte the board should send in response to a serial poll.
* This function should also start or stop requests for service via
@@ -149,7 +149,7 @@ struct gpib_interface_struct {
* by IEEE 488.2 section 11.3.3.4.3 "Allowed Coupled Control of
* STB, reqt, and reqf".
*/
- void (*serial_poll_response)(gpib_board_t *board, uint8_t status_byte);
+ void (*serial_poll_response)(struct gpib_board *board, uint8_t status_byte);
/* Sets the byte the board should send in response to a serial poll.
* This function should also request service via IEEE 488.2 reqt/reqf
* based on MSS (bit 6 of the status_byte) and new_reason_for_service.
@@ -164,15 +164,15 @@ struct gpib_interface_struct {
* If this method is left NULL by the driver, then the user library
* function ibrsv2 will not work.
*/
- void (*serial_poll_response2)(gpib_board_t *board, uint8_t status_byte,
+ void (*serial_poll_response2)(struct gpib_board *board, uint8_t status_byte,
int new_reason_for_service);
/* returns the byte the board will send in response to a serial poll.
*/
- uint8_t (*serial_poll_status)(gpib_board_t *board);
+ uint8_t (*serial_poll_status)(struct gpib_board *board);
/* adjust T1 delay */
- unsigned int (*t1_delay)(gpib_board_t *board, unsigned int nano_sec);
+ int (*t1_delay)(struct gpib_board *board, unsigned int nano_sec);
/* go to local mode */
- void (*return_to_local)(gpib_board_t *board);
+ void (*return_to_local)(struct gpib_board *board);
/* board does not support 7 bit eos comparisons */
unsigned no_7_bit_eos : 1;
/* skip check for listeners before trying to send command bytes */
@@ -198,7 +198,7 @@ static inline void init_event_queue(gpib_event_queue_t *queue)
struct gpib_pseudo_irq {
struct timer_list timer;
irqreturn_t (*handler)(int irq, void *arg);
- gpib_board_t *board;
+ struct gpib_board *board;
atomic_t active;
};
@@ -216,11 +216,11 @@ typedef struct gpib_interface_list_struct {
struct module *module;
} gpib_interface_list_t;
-/* One gpib_board_t is allocated for each physical board in the computer.
+/* One struct gpib_board is allocated for each physical board in the computer.
* It provides storage for variables local to each board, and interface
* functions for performing operations on the board
*/
-struct gpib_board_struct {
+struct gpib_board {
/* functions used by this board */
gpib_interface_t *interface;
/* Pointer to module whose use count we should increment when
diff --git a/drivers/staging/gpib/include/nec7210.h b/drivers/staging/gpib/include/nec7210.h
index ca998c4a84bf..069896456230 100644
--- a/drivers/staging/gpib/include/nec7210.h
+++ b/drivers/staging/gpib/include/nec7210.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+//* SPDX-License-Identifier: GPL-2.0 */
/***************************************************************************
* copyright : (C) 2002 by Frank Mori Hess
@@ -78,48 +78,48 @@ enum {
};
// interface functions
-int nec7210_read(gpib_board_t *board, struct nec7210_priv *priv, uint8_t *buffer,
+int nec7210_read(struct gpib_board *board, struct nec7210_priv *priv, uint8_t *buffer,
size_t length, int *end, size_t *bytes_read);
-int nec7210_write(gpib_board_t *board, struct nec7210_priv *priv, uint8_t *buffer,
+int nec7210_write(struct gpib_board *board, struct nec7210_priv *priv, uint8_t *buffer,
size_t length, int send_eoi, size_t *bytes_written);
-int nec7210_command(gpib_board_t *board, struct nec7210_priv *priv, uint8_t *buffer,
+int nec7210_command(struct gpib_board *board, struct nec7210_priv *priv, uint8_t *buffer,
size_t length, size_t *bytes_written);
-int nec7210_take_control(gpib_board_t *board, struct nec7210_priv *priv, int syncronous);
-int nec7210_go_to_standby(gpib_board_t *board, struct nec7210_priv *priv);
-void nec7210_request_system_control(gpib_board_t *board,
+int nec7210_take_control(struct gpib_board *board, struct nec7210_priv *priv, int syncronous);
+int nec7210_go_to_standby(struct gpib_board *board, struct nec7210_priv *priv);
+void nec7210_request_system_control(struct gpib_board *board,
struct nec7210_priv *priv, int request_control);
-void nec7210_interface_clear(gpib_board_t *board, struct nec7210_priv *priv, int assert);
-void nec7210_remote_enable(gpib_board_t *board, struct nec7210_priv *priv, int enable);
-int nec7210_enable_eos(gpib_board_t *board, struct nec7210_priv *priv, uint8_t eos_bytes,
+void nec7210_interface_clear(struct gpib_board *board, struct nec7210_priv *priv, int assert);
+void nec7210_remote_enable(struct gpib_board *board, struct nec7210_priv *priv, int enable);
+int nec7210_enable_eos(struct gpib_board *board, struct nec7210_priv *priv, uint8_t eos_bytes,
int compare_8_bits);
-void nec7210_disable_eos(gpib_board_t *board, struct nec7210_priv *priv);
-unsigned int nec7210_update_status(gpib_board_t *board, struct nec7210_priv *priv,
+void nec7210_disable_eos(struct gpib_board *board, struct nec7210_priv *priv);
+unsigned int nec7210_update_status(struct gpib_board *board, struct nec7210_priv *priv,
unsigned int clear_mask);
-unsigned int nec7210_update_status_nolock(gpib_board_t *board, struct nec7210_priv *priv);
-int nec7210_primary_address(const gpib_board_t *board,
+unsigned int nec7210_update_status_nolock(struct gpib_board *board, struct nec7210_priv *priv);
+int nec7210_primary_address(const struct gpib_board *board,
struct nec7210_priv *priv, unsigned int address);
-int nec7210_secondary_address(const gpib_board_t *board, struct nec7210_priv *priv,
+int nec7210_secondary_address(const struct gpib_board *board, struct nec7210_priv *priv,
unsigned int address, int enable);
-int nec7210_parallel_poll(gpib_board_t *board, struct nec7210_priv *priv, uint8_t *result);
-void nec7210_serial_poll_response(gpib_board_t *board, struct nec7210_priv *priv, uint8_t status);
-void nec7210_parallel_poll_configure(gpib_board_t *board,
+int nec7210_parallel_poll(struct gpib_board *board, struct nec7210_priv *priv, uint8_t *result);
+void nec7210_serial_poll_response(struct gpib_board *board, struct nec7210_priv *priv, uint8_t status);
+void nec7210_parallel_poll_configure(struct gpib_board *board,
struct nec7210_priv *priv, unsigned int configuration);
-void nec7210_parallel_poll_response(gpib_board_t *board,
+void nec7210_parallel_poll_response(struct gpib_board *board,
struct nec7210_priv *priv, int ist);
-uint8_t nec7210_serial_poll_status(gpib_board_t *board,
+uint8_t nec7210_serial_poll_status(struct gpib_board *board,
struct nec7210_priv *priv);
-unsigned int nec7210_t1_delay(gpib_board_t *board,
- struct nec7210_priv *priv, unsigned int nano_sec);
-void nec7210_return_to_local(const gpib_board_t *board, struct nec7210_priv *priv);
+int nec7210_t1_delay(struct gpib_board *board,
+ struct nec7210_priv *priv, unsigned int nano_sec);
+void nec7210_return_to_local(const struct gpib_board *board, struct nec7210_priv *priv);
// utility functions
-void nec7210_board_reset(struct nec7210_priv *priv, const gpib_board_t *board);
-void nec7210_board_online(struct nec7210_priv *priv, const gpib_board_t *board);
+void nec7210_board_reset(struct nec7210_priv *priv, const struct gpib_board *board);
+void nec7210_board_online(struct nec7210_priv *priv, const struct gpib_board *board);
unsigned int nec7210_set_reg_bits(struct nec7210_priv *priv, unsigned int reg,
unsigned int mask, unsigned int bits);
-void nec7210_set_handshake_mode(gpib_board_t *board, struct nec7210_priv *priv, int mode);
-void nec7210_release_rfd_holdoff(gpib_board_t *board, struct nec7210_priv *priv);
-uint8_t nec7210_read_data_in(gpib_board_t *board, struct nec7210_priv *priv, int *end);
+void nec7210_set_handshake_mode(struct gpib_board *board, struct nec7210_priv *priv, int mode);
+void nec7210_release_rfd_holdoff(struct gpib_board *board, struct nec7210_priv *priv);
+uint8_t nec7210_read_data_in(struct gpib_board *board, struct nec7210_priv *priv, int *end);
// wrappers for io functions
uint8_t nec7210_ioport_read_byte(struct nec7210_priv *priv, unsigned int register_num);
@@ -134,8 +134,8 @@ void nec7210_locking_iomem_write_byte(struct nec7210_priv *priv, uint8_t data,
unsigned int register_num);
// interrupt service routine
-irqreturn_t nec7210_interrupt(gpib_board_t *board, struct nec7210_priv *priv);
-irqreturn_t nec7210_interrupt_have_status(gpib_board_t *board,
+irqreturn_t nec7210_interrupt(struct gpib_board *board, struct nec7210_priv *priv);
+irqreturn_t nec7210_interrupt_have_status(struct gpib_board *board,
struct nec7210_priv *priv, int status1, int status2);
#endif //_NEC7210_H
diff --git a/drivers/staging/gpib/include/tms9914.h b/drivers/staging/gpib/include/tms9914.h
index d8c8d1c9b131..424c95ad85c6 100644
--- a/drivers/staging/gpib/include/tms9914.h
+++ b/drivers/staging/gpib/include/tms9914.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+//* SPDX-License-Identifier: GPL-2.0 */
/***************************************************************************
* copyright : (C) 2002 by Frank Mori Hess
@@ -79,47 +79,47 @@ enum {
};
// interface functions
-int tms9914_read(gpib_board_t *board, struct tms9914_priv *priv, uint8_t *buffer,
+int tms9914_read(struct gpib_board *board, struct tms9914_priv *priv, uint8_t *buffer,
size_t length, int *end, size_t *bytes_read);
-int tms9914_write(gpib_board_t *board, struct tms9914_priv *priv, uint8_t *buffer,
+int tms9914_write(struct gpib_board *board, struct tms9914_priv *priv, uint8_t *buffer,
size_t length, int send_eoi, size_t *bytes_written);
-int tms9914_command(gpib_board_t *board, struct tms9914_priv *priv, uint8_t *buffer,
+int tms9914_command(struct gpib_board *board, struct tms9914_priv *priv, uint8_t *buffer,
size_t length, size_t *bytes_written);
-int tms9914_take_control(gpib_board_t *board, struct tms9914_priv *priv, int syncronous);
+int tms9914_take_control(struct gpib_board *board, struct tms9914_priv *priv, int syncronous);
/* alternate version of tms9914_take_control which works around buggy tcs
* implementation.
*/
-int tms9914_take_control_workaround(gpib_board_t *board, struct tms9914_priv *priv,
+int tms9914_take_control_workaround(struct gpib_board *board, struct tms9914_priv *priv,
int syncronous);
-int tms9914_go_to_standby(gpib_board_t *board, struct tms9914_priv *priv);
-void tms9914_request_system_control(gpib_board_t *board, struct tms9914_priv *priv,
+int tms9914_go_to_standby(struct gpib_board *board, struct tms9914_priv *priv);
+void tms9914_request_system_control(struct gpib_board *board, struct tms9914_priv *priv,
int request_control);
-void tms9914_interface_clear(gpib_board_t *board, struct tms9914_priv *priv, int assert);
-void tms9914_remote_enable(gpib_board_t *board, struct tms9914_priv *priv, int enable);
-int tms9914_enable_eos(gpib_board_t *board, struct tms9914_priv *priv, uint8_t eos_bytes,
+void tms9914_interface_clear(struct gpib_board *board, struct tms9914_priv *priv, int assert);
+void tms9914_remote_enable(struct gpib_board *board, struct tms9914_priv *priv, int enable);
+int tms9914_enable_eos(struct gpib_board *board, struct tms9914_priv *priv, uint8_t eos_bytes,
int compare_8_bits);
-void tms9914_disable_eos(gpib_board_t *board, struct tms9914_priv *priv);
-unsigned int tms9914_update_status(gpib_board_t *board, struct tms9914_priv *priv,
+void tms9914_disable_eos(struct gpib_board *board, struct tms9914_priv *priv);
+unsigned int tms9914_update_status(struct gpib_board *board, struct tms9914_priv *priv,
unsigned int clear_mask);
-int tms9914_primary_address(gpib_board_t *board,
+int tms9914_primary_address(struct gpib_board *board,
struct tms9914_priv *priv, unsigned int address);
-int tms9914_secondary_address(gpib_board_t *board, struct tms9914_priv *priv,
+int tms9914_secondary_address(struct gpib_board *board, struct tms9914_priv *priv,
unsigned int address, int enable);
-int tms9914_parallel_poll(gpib_board_t *board, struct tms9914_priv *priv, uint8_t *result);
-void tms9914_parallel_poll_configure(gpib_board_t *board,
+int tms9914_parallel_poll(struct gpib_board *board, struct tms9914_priv *priv, uint8_t *result);
+void tms9914_parallel_poll_configure(struct gpib_board *board,
struct tms9914_priv *priv, uint8_t config);
-void tms9914_parallel_poll_response(gpib_board_t *board,
+void tms9914_parallel_poll_response(struct gpib_board *board,
struct tms9914_priv *priv, int ist);
-void tms9914_serial_poll_response(gpib_board_t *board, struct tms9914_priv *priv, uint8_t status);
-uint8_t tms9914_serial_poll_status(gpib_board_t *board, struct tms9914_priv *priv);
-int tms9914_line_status(const gpib_board_t *board, struct tms9914_priv *priv);
-unsigned int tms9914_t1_delay(gpib_board_t *board, struct tms9914_priv *priv,
+void tms9914_serial_poll_response(struct gpib_board *board, struct tms9914_priv *priv, uint8_t status);
+uint8_t tms9914_serial_poll_status(struct gpib_board *board, struct tms9914_priv *priv);
+int tms9914_line_status(const struct gpib_board *board, struct tms9914_priv *priv);
+unsigned int tms9914_t1_delay(struct gpib_board *board, struct tms9914_priv *priv,
unsigned int nano_sec);
-void tms9914_return_to_local(const gpib_board_t *board, struct tms9914_priv *priv);
+void tms9914_return_to_local(const struct gpib_board *board, struct tms9914_priv *priv);
// utility functions
void tms9914_board_reset(struct tms9914_priv *priv);
-void tms9914_online(gpib_board_t *board, struct tms9914_priv *priv);
+void tms9914_online(struct gpib_board *board, struct tms9914_priv *priv);
void tms9914_release_holdoff(struct tms9914_priv *priv);
void tms9914_set_holdoff_mode(struct tms9914_priv *priv, enum tms9914_holdoff_mode mode);
@@ -130,8 +130,8 @@ uint8_t tms9914_iomem_read_byte(struct tms9914_priv *priv, unsigned int register
void tms9914_iomem_write_byte(struct tms9914_priv *priv, uint8_t data, unsigned int register_num);
// interrupt service routine
-irqreturn_t tms9914_interrupt(gpib_board_t *board, struct tms9914_priv *priv);
-irqreturn_t tms9914_interrupt_have_status(gpib_board_t *board, struct tms9914_priv *priv,
+irqreturn_t tms9914_interrupt(struct gpib_board *board, struct tms9914_priv *priv);
+irqreturn_t tms9914_interrupt_have_status(struct gpib_board *board, struct tms9914_priv *priv,
int status1, int status2);
// tms9914 has 8 registers
diff --git a/drivers/staging/gpib/ines/Makefile b/drivers/staging/gpib/ines/Makefile
index 6b6e480fd811..88241f15ecea 100644
--- a/drivers/staging/gpib/ines/Makefile
+++ b/drivers/staging/gpib/ines/Makefile
@@ -1,4 +1,3 @@
-ccflags-$(CONFIG_GPIB_PCMCIA) := -DGPIB_PCMCIA
obj-$(CONFIG_GPIB_INES) += ines_gpib.o
diff --git a/drivers/staging/gpib/ines/ines.h b/drivers/staging/gpib/ines/ines.h
index 3918737fa21a..ff27f055a0ff 100644
--- a/drivers/staging/gpib/ines/ines.h
+++ b/drivers/staging/gpib/ines/ines.h
@@ -36,41 +36,41 @@ struct ines_priv {
};
// interface functions
-int ines_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end, size_t *bytes_read);
-int ines_write(gpib_board_t *board, uint8_t *buffer, size_t length,
+int ines_read(struct gpib_board *board, uint8_t *buffer, size_t length, int *end, size_t *bytes_read);
+int ines_write(struct gpib_board *board, uint8_t *buffer, size_t length,
int send_eoi, size_t *bytes_written);
-int ines_accel_read(gpib_board_t *board, uint8_t *buffer, size_t length,
+int ines_accel_read(struct gpib_board *board, uint8_t *buffer, size_t length,
int *end, size_t *bytes_read);
-int ines_accel_write(gpib_board_t *board, uint8_t *buffer, size_t length,
+int ines_accel_write(struct gpib_board *board, uint8_t *buffer, size_t length,
int send_eoi, size_t *bytes_written);
-int ines_command(gpib_board_t *board, uint8_t *buffer, size_t length, size_t *bytes_written);
-int ines_take_control(gpib_board_t *board, int synchronous);
-int ines_go_to_standby(gpib_board_t *board);
-void ines_request_system_control(gpib_board_t *board, int request_control);
-void ines_interface_clear(gpib_board_t *board, int assert);
-void ines_remote_enable(gpib_board_t *board, int enable);
-int ines_enable_eos(gpib_board_t *board, uint8_t eos_byte, int compare_8_bits);
-void ines_disable_eos(gpib_board_t *board);
-unsigned int ines_update_status(gpib_board_t *board, unsigned int clear_mask);
-int ines_primary_address(gpib_board_t *board, unsigned int address);
-int ines_secondary_address(gpib_board_t *board, unsigned int address, int enable);
-int ines_parallel_poll(gpib_board_t *board, uint8_t *result);
-void ines_parallel_poll_configure(gpib_board_t *board, uint8_t config);
-void ines_parallel_poll_response(gpib_board_t *board, int ist);
-void ines_serial_poll_response(gpib_board_t *board, uint8_t status);
-uint8_t ines_serial_poll_status(gpib_board_t *board);
-int ines_line_status(const gpib_board_t *board);
-unsigned int ines_t1_delay(gpib_board_t *board, unsigned int nano_sec);
-void ines_return_to_local(gpib_board_t *board);
+int ines_command(struct gpib_board *board, uint8_t *buffer, size_t length, size_t *bytes_written);
+int ines_take_control(struct gpib_board *board, int synchronous);
+int ines_go_to_standby(struct gpib_board *board);
+void ines_request_system_control(struct gpib_board *board, int request_control);
+void ines_interface_clear(struct gpib_board *board, int assert);
+void ines_remote_enable(struct gpib_board *board, int enable);
+int ines_enable_eos(struct gpib_board *board, uint8_t eos_byte, int compare_8_bits);
+void ines_disable_eos(struct gpib_board *board);
+unsigned int ines_update_status(struct gpib_board *board, unsigned int clear_mask);
+int ines_primary_address(struct gpib_board *board, unsigned int address);
+int ines_secondary_address(struct gpib_board *board, unsigned int address, int enable);
+int ines_parallel_poll(struct gpib_board *board, uint8_t *result);
+void ines_parallel_poll_configure(struct gpib_board *board, uint8_t config);
+void ines_parallel_poll_response(struct gpib_board *board, int ist);
+void ines_serial_poll_response(struct gpib_board *board, uint8_t status);
+uint8_t ines_serial_poll_status(struct gpib_board *board);
+int ines_line_status(const struct gpib_board *board);
+int ines_t1_delay(struct gpib_board *board, unsigned int nano_sec);
+void ines_return_to_local(struct gpib_board *board);
// interrupt service routines
irqreturn_t ines_pci_interrupt(int irq, void *arg);
-irqreturn_t ines_interrupt(gpib_board_t *board);
+irqreturn_t ines_interrupt(struct gpib_board *board);
// utility functions
-void ines_free_private(gpib_board_t *board);
-int ines_generic_attach(gpib_board_t *board);
-void ines_online(struct ines_priv *priv, const gpib_board_t *board, int use_accel);
+void ines_free_private(struct gpib_board *board);
+int ines_generic_attach(struct gpib_board *board);
+void ines_online(struct ines_priv *priv, const struct gpib_board *board, int use_accel);
void ines_set_xfer_counter(struct ines_priv *priv, unsigned int count);
/* inb/outb wrappers */
diff --git a/drivers/staging/gpib/ines/ines_gpib.c b/drivers/staging/gpib/ines/ines_gpib.c
index 22a05a287bce..d93eb05dab90 100644
--- a/drivers/staging/gpib/ines/ines_gpib.c
+++ b/drivers/staging/gpib/ines/ines_gpib.c
@@ -5,6 +5,10 @@
* (C) 2002 by Frank Mori Hess
***************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define dev_fmt pr_fmt
+#define DRV_NAME KBUILD_MODNAME
+
#include "ines.h"
#include <linux/pci.h>
@@ -21,34 +25,32 @@
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GPIB driver for Ines iGPIB 72010");
-int ines_line_status(const gpib_board_t *board)
+int ines_line_status(const struct gpib_board *board)
{
- int status = ValidALL;
+ int status = VALID_ALL;
int bcm_bits;
struct ines_priv *ines_priv;
- struct nec7210_priv *nec_priv;
ines_priv = board->private_data;
- nec_priv = &ines_priv->nec7210_priv;
bcm_bits = ines_inb(ines_priv, BUS_CONTROL_MONITOR);
if (bcm_bits & BCM_REN_BIT)
- status |= BusREN;
+ status |= BUS_REN;
if (bcm_bits & BCM_IFC_BIT)
- status |= BusIFC;
+ status |= BUS_IFC;
if (bcm_bits & BCM_SRQ_BIT)
- status |= BusSRQ;
+ status |= BUS_SRQ;
if (bcm_bits & BCM_EOI_BIT)
- status |= BusEOI;
+ status |= BUS_EOI;
if (bcm_bits & BCM_NRFD_BIT)
- status |= BusNRFD;
+ status |= BUS_NRFD;
if (bcm_bits & BCM_NDAC_BIT)
- status |= BusNDAC;
+ status |= BUS_NDAC;
if (bcm_bits & BCM_DAV_BIT)
- status |= BusDAV;
+ status |= BUS_DAV;
if (bcm_bits & BCM_ATN_BIT)
- status |= BusATN;
+ status |= BUS_ATN;
return status;
}
@@ -56,14 +58,14 @@ int ines_line_status(const gpib_board_t *board)
void ines_set_xfer_counter(struct ines_priv *priv, unsigned int count)
{
if (count > 0xffff) {
- pr_err("ines: bug! tried to set xfer counter > 0xffff\n");
+ pr_err("bug! tried to set xfer counter > 0xffff\n");
return;
}
ines_outb(priv, (count >> 8) & 0xff, XFER_COUNT_UPPER);
ines_outb(priv, count & 0xff, XFER_COUNT_LOWER);
}
-unsigned int ines_t1_delay(gpib_board_t *board, unsigned int nano_sec)
+int ines_t1_delay(struct gpib_board *board, unsigned int nano_sec)
{
struct ines_priv *ines_priv = board->private_data;
struct nec7210_priv *nec_priv = &ines_priv->nec7210_priv;
@@ -93,7 +95,7 @@ static inline unsigned short num_in_fifo_bytes(struct ines_priv *ines_priv)
return ines_inb(ines_priv, IN_FIFO_COUNT);
}
-static ssize_t pio_read(gpib_board_t *board, struct ines_priv *ines_priv, uint8_t *buffer,
+static ssize_t pio_read(struct gpib_board *board, struct ines_priv *ines_priv, uint8_t *buffer,
size_t length, size_t *nbytes)
{
ssize_t retval = 0;
@@ -106,21 +108,18 @@ static ssize_t pio_read(gpib_board_t *board, struct ines_priv *ines_priv, uint8_
num_in_fifo_bytes(ines_priv) ||
test_bit(RECEIVED_END_BN, &nec_priv->state) ||
test_bit(DEV_CLEAR_BN, &nec_priv->state) ||
- test_bit(TIMO_NUM, &board->status))) {
- pr_warn("gpib: pio read wait interrupted\n");
+ test_bit(TIMO_NUM, &board->status)))
return -ERESTARTSYS;
- }
+
if (test_bit(TIMO_NUM, &board->status))
return -ETIMEDOUT;
if (test_bit(DEV_CLEAR_BN, &nec_priv->state))
return -EINTR;
num_fifo_bytes = num_in_fifo_bytes(ines_priv);
- if (num_fifo_bytes + *nbytes > length) {
- pr_warn("ines: counter allowed %li extra byte(s)\n",
- (long)(num_fifo_bytes - (length - *nbytes)));
+ if (num_fifo_bytes + *nbytes > length)
num_fifo_bytes = length - *nbytes;
- }
+
for (i = 0; i < num_fifo_bytes; i++)
buffer[(*nbytes)++] = read_byte(nec_priv, DIR);
if (test_bit(RECEIVED_END_BN, &nec_priv->state) &&
@@ -134,7 +133,7 @@ static ssize_t pio_read(gpib_board_t *board, struct ines_priv *ines_priv, uint8_
return retval;
}
-int ines_accel_read(gpib_board_t *board, uint8_t *buffer,
+int ines_accel_read(struct gpib_board *board, uint8_t *buffer,
size_t length, int *end, size_t *bytes_read)
{
ssize_t retval = 0;
@@ -191,7 +190,7 @@ static inline unsigned short num_out_fifo_bytes(struct ines_priv *ines_priv)
return ines_inb(ines_priv, OUT_FIFO_COUNT);
}
-static int ines_write_wait(gpib_board_t *board, struct ines_priv *ines_priv,
+static int ines_write_wait(struct gpib_board *board, struct ines_priv *ines_priv,
unsigned int fifo_threshold)
{
struct nec7210_priv *nec_priv = &ines_priv->nec7210_priv;
@@ -201,10 +200,9 @@ static int ines_write_wait(gpib_board_t *board, struct ines_priv *ines_priv,
num_out_fifo_bytes(ines_priv) < fifo_threshold ||
test_bit(BUS_ERROR_BN, &nec_priv->state) ||
test_bit(DEV_CLEAR_BN, &nec_priv->state) ||
- test_bit(TIMO_NUM, &board->status))) {
- dev_dbg(board->gpib_dev, "gpib write interrupted\n");
+ test_bit(TIMO_NUM, &board->status)))
return -ERESTARTSYS;
- }
+
if (test_bit(BUS_ERROR_BN, &nec_priv->state))
return -EIO;
if (test_bit(DEV_CLEAR_BN, &nec_priv->state))
@@ -215,7 +213,7 @@ static int ines_write_wait(gpib_board_t *board, struct ines_priv *ines_priv,
return 0;
}
-int ines_accel_write(gpib_board_t *board, uint8_t *buffer, size_t length,
+int ines_accel_write(struct gpib_board *board, uint8_t *buffer, size_t length,
int send_eoi, size_t *bytes_written)
{
size_t count = 0;
@@ -268,7 +266,7 @@ int ines_accel_write(gpib_board_t *board, uint8_t *buffer, size_t length,
irqreturn_t ines_pci_interrupt(int irq, void *arg)
{
- gpib_board_t *board = arg;
+ struct gpib_board *board = arg;
struct ines_priv *priv = board->private_data;
struct nec7210_priv *nec_priv = &priv->nec7210_priv;
@@ -283,7 +281,7 @@ irqreturn_t ines_pci_interrupt(int irq, void *arg)
return ines_interrupt(board);
}
-irqreturn_t ines_interrupt(gpib_board_t *board)
+irqreturn_t ines_interrupt(struct gpib_board *board)
{
struct ines_priv *priv = board->private_data;
struct nec7210_priv *nec_priv = &priv->nec7210_priv;
@@ -301,7 +299,7 @@ irqreturn_t ines_interrupt(gpib_board_t *board)
wake++;
}
if (isr3_bits & FIFO_ERROR_BIT)
- pr_err("ines gpib: fifo error\n");
+ dev_err(board->gpib_dev, "fifo error\n");
if (isr3_bits & XFER_COUNT_BIT)
wake++;
@@ -315,12 +313,12 @@ irqreturn_t ines_interrupt(gpib_board_t *board)
return IRQ_HANDLED;
}
-static int ines_pci_attach(gpib_board_t *board, const gpib_board_config_t *config);
-static int ines_pci_accel_attach(gpib_board_t *board, const gpib_board_config_t *config);
-static int ines_isa_attach(gpib_board_t *board, const gpib_board_config_t *config);
+static int ines_pci_attach(struct gpib_board *board, const gpib_board_config_t *config);
+static int ines_pci_accel_attach(struct gpib_board *board, const gpib_board_config_t *config);
+static int ines_isa_attach(struct gpib_board *board, const gpib_board_config_t *config);
-static void ines_pci_detach(gpib_board_t *board);
-static void ines_isa_detach(gpib_board_t *board);
+static void ines_pci_detach(struct gpib_board *board);
+static void ines_isa_detach(struct gpib_board *board);
enum ines_pci_vendor_ids {
PCI_VENDOR_ID_INES_QUICKLOGIC = 0x16da
@@ -395,7 +393,8 @@ static struct ines_pci_id pci_ids[] = {
static const int num_pci_chips = ARRAY_SIZE(pci_ids);
// wrappers for interface functions
-int ines_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end, size_t *bytes_read)
+int ines_read(struct gpib_board *board, uint8_t *buffer, size_t length,
+ int *end, size_t *bytes_read)
{
struct ines_priv *priv = board->private_data;
struct nec7210_priv *nec_priv = &priv->nec7210_priv;
@@ -413,7 +412,7 @@ int ines_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end, siz
return retval;
}
-int ines_write(gpib_board_t *board, uint8_t *buffer, size_t length, int send_eoi,
+int ines_write(struct gpib_board *board, uint8_t *buffer, size_t length, int send_eoi,
size_t *bytes_written)
{
struct ines_priv *priv = board->private_data;
@@ -421,119 +420,119 @@ int ines_write(gpib_board_t *board, uint8_t *buffer, size_t length, int send_eoi
return nec7210_write(board, &priv->nec7210_priv, buffer, length, send_eoi, bytes_written);
}
-int ines_command(gpib_board_t *board, uint8_t *buffer, size_t length, size_t *bytes_written)
+int ines_command(struct gpib_board *board, uint8_t *buffer, size_t length, size_t *bytes_written)
{
struct ines_priv *priv = board->private_data;
return nec7210_command(board, &priv->nec7210_priv, buffer, length, bytes_written);
}
-int ines_take_control(gpib_board_t *board, int synchronous)
+int ines_take_control(struct gpib_board *board, int synchronous)
{
struct ines_priv *priv = board->private_data;
return nec7210_take_control(board, &priv->nec7210_priv, synchronous);
}
-int ines_go_to_standby(gpib_board_t *board)
+int ines_go_to_standby(struct gpib_board *board)
{
struct ines_priv *priv = board->private_data;
return nec7210_go_to_standby(board, &priv->nec7210_priv);
}
-void ines_request_system_control(gpib_board_t *board, int request_control)
+void ines_request_system_control(struct gpib_board *board, int request_control)
{
struct ines_priv *priv = board->private_data;
nec7210_request_system_control(board, &priv->nec7210_priv, request_control);
}
-void ines_interface_clear(gpib_board_t *board, int assert)
+void ines_interface_clear(struct gpib_board *board, int assert)
{
struct ines_priv *priv = board->private_data;
nec7210_interface_clear(board, &priv->nec7210_priv, assert);
}
-void ines_remote_enable(gpib_board_t *board, int enable)
+void ines_remote_enable(struct gpib_board *board, int enable)
{
struct ines_priv *priv = board->private_data;
nec7210_remote_enable(board, &priv->nec7210_priv, enable);
}
-int ines_enable_eos(gpib_board_t *board, uint8_t eos_byte, int compare_8_bits)
+int ines_enable_eos(struct gpib_board *board, uint8_t eos_byte, int compare_8_bits)
{
struct ines_priv *priv = board->private_data;
return nec7210_enable_eos(board, &priv->nec7210_priv, eos_byte, compare_8_bits);
}
-void ines_disable_eos(gpib_board_t *board)
+void ines_disable_eos(struct gpib_board *board)
{
struct ines_priv *priv = board->private_data;
nec7210_disable_eos(board, &priv->nec7210_priv);
}
-unsigned int ines_update_status(gpib_board_t *board, unsigned int clear_mask)
+unsigned int ines_update_status(struct gpib_board *board, unsigned int clear_mask)
{
struct ines_priv *priv = board->private_data;
return nec7210_update_status(board, &priv->nec7210_priv, clear_mask);
}
-int ines_primary_address(gpib_board_t *board, unsigned int address)
+int ines_primary_address(struct gpib_board *board, unsigned int address)
{
struct ines_priv *priv = board->private_data;
return nec7210_primary_address(board, &priv->nec7210_priv, address);
}
-int ines_secondary_address(gpib_board_t *board, unsigned int address, int enable)
+int ines_secondary_address(struct gpib_board *board, unsigned int address, int enable)
{
struct ines_priv *priv = board->private_data;
return nec7210_secondary_address(board, &priv->nec7210_priv, address, enable);
}
-int ines_parallel_poll(gpib_board_t *board, uint8_t *result)
+int ines_parallel_poll(struct gpib_board *board, uint8_t *result)
{
struct ines_priv *priv = board->private_data;
return nec7210_parallel_poll(board, &priv->nec7210_priv, result);
}
-void ines_parallel_poll_configure(gpib_board_t *board, uint8_t config)
+void ines_parallel_poll_configure(struct gpib_board *board, uint8_t config)
{
struct ines_priv *priv = board->private_data;
nec7210_parallel_poll_configure(board, &priv->nec7210_priv, config);
}
-void ines_parallel_poll_response(gpib_board_t *board, int ist)
+void ines_parallel_poll_response(struct gpib_board *board, int ist)
{
struct ines_priv *priv = board->private_data;
nec7210_parallel_poll_response(board, &priv->nec7210_priv, ist);
}
-void ines_serial_poll_response(gpib_board_t *board, uint8_t status)
+void ines_serial_poll_response(struct gpib_board *board, uint8_t status)
{
struct ines_priv *priv = board->private_data;
nec7210_serial_poll_response(board, &priv->nec7210_priv, status);
}
-uint8_t ines_serial_poll_status(gpib_board_t *board)
+uint8_t ines_serial_poll_status(struct gpib_board *board)
{
struct ines_priv *priv = board->private_data;
return nec7210_serial_poll_status(board, &priv->nec7210_priv);
}
-void ines_return_to_local(gpib_board_t *board)
+void ines_return_to_local(struct gpib_board *board)
{
struct ines_priv *priv = board->private_data;
@@ -652,7 +651,7 @@ static gpib_interface_t ines_isa_interface = {
.return_to_local = ines_return_to_local,
};
-static int ines_allocate_private(gpib_board_t *board)
+static int ines_allocate_private(struct gpib_board *board)
{
struct ines_priv *priv;
@@ -665,13 +664,13 @@ static int ines_allocate_private(gpib_board_t *board)
return 0;
}
-void ines_free_private(gpib_board_t *board)
+void ines_free_private(struct gpib_board *board)
{
kfree(board->private_data);
board->private_data = NULL;
}
-int ines_generic_attach(gpib_board_t *board)
+int ines_generic_attach(struct gpib_board *board)
{
struct ines_priv *ines_priv;
struct nec7210_priv *nec_priv;
@@ -691,7 +690,7 @@ int ines_generic_attach(gpib_board_t *board)
return 0;
}
-void ines_online(struct ines_priv *ines_priv, const gpib_board_t *board, int use_accel)
+void ines_online(struct ines_priv *ines_priv, const struct gpib_board *board, int use_accel)
{
struct nec7210_priv *nec_priv = &ines_priv->nec7210_priv;
@@ -725,7 +724,7 @@ void ines_online(struct ines_priv *ines_priv, const gpib_board_t *board, int use
nec7210_set_reg_bits(nec_priv, IMR1, HR_DOIE | HR_DIIE, 0);
}
-static int ines_common_pci_attach(gpib_board_t *board, const gpib_board_config_t *config)
+static int ines_common_pci_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
struct ines_priv *ines_priv;
struct nec7210_priv *nec_priv;
@@ -769,16 +768,16 @@ static int ines_common_pci_attach(gpib_board_t *board, const gpib_board_config_t
} while (1);
}
if (!ines_priv->pci_device) {
- pr_err("gpib: could not find ines PCI board\n");
+ dev_err(board->gpib_dev, "could not find ines PCI board\n");
return -1;
}
if (pci_enable_device(ines_priv->pci_device)) {
- pr_err("error enabling pci device\n");
+ dev_err(board->gpib_dev, "error enabling pci device\n");
return -1;
}
- if (pci_request_regions(ines_priv->pci_device, "ines-gpib"))
+ if (pci_request_regions(ines_priv->pci_device, DRV_NAME))
return -1;
nec_priv->iobase = pci_resource_start(ines_priv->pci_device,
found_id.gpib_region);
@@ -797,7 +796,7 @@ static int ines_common_pci_attach(gpib_board_t *board, const gpib_board_config_t
case PCI_CHIP_QUICKLOGIC5030:
break;
default:
- pr_err("gpib: unspecified chip type? (bug)\n");
+ dev_err(board->gpib_dev, "unspecified chip type? (bug)\n");
nec_priv->iobase = 0;
pci_release_regions(ines_priv->pci_device);
return -1;
@@ -813,8 +812,8 @@ static int ines_common_pci_attach(gpib_board_t *board, const gpib_board_config_t
#endif
isr_flags |= IRQF_SHARED;
if (request_irq(ines_priv->pci_device->irq, ines_pci_interrupt, isr_flags,
- "pci-gpib", board)) {
- pr_err("gpib: can't request IRQ %d\n", ines_priv->pci_device->irq);
+ DRV_NAME, board)) {
+ dev_err(board->gpib_dev, "can't request IRQ %d\n", ines_priv->pci_device->irq);
return -1;
}
ines_priv->irq = ines_priv->pci_device->irq;
@@ -846,14 +845,14 @@ static int ines_common_pci_attach(gpib_board_t *board, const gpib_board_config_t
case PCI_CHIP_QUICKLOGIC5030:
break;
default:
- pr_err("gpib: unspecified chip type? (bug)\n");
+ dev_err(board->gpib_dev, "unspecified chip type? (bug)\n");
return -1;
}
return 0;
}
-int ines_pci_attach(gpib_board_t *board, const gpib_board_config_t *config)
+int ines_pci_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
struct ines_priv *ines_priv;
int retval;
@@ -868,7 +867,7 @@ int ines_pci_attach(gpib_board_t *board, const gpib_board_config_t *config)
return 0;
}
-int ines_pci_accel_attach(gpib_board_t *board, const gpib_board_config_t *config)
+int ines_pci_accel_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
struct ines_priv *ines_priv;
int retval;
@@ -885,7 +884,7 @@ int ines_pci_accel_attach(gpib_board_t *board, const gpib_board_config_t *config
static const int ines_isa_iosize = 0x20;
-int ines_isa_attach(gpib_board_t *board, const gpib_board_config_t *config)
+int ines_isa_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
struct ines_priv *ines_priv;
struct nec7210_priv *nec_priv;
@@ -899,15 +898,16 @@ int ines_isa_attach(gpib_board_t *board, const gpib_board_config_t *config)
ines_priv = board->private_data;
nec_priv = &ines_priv->nec7210_priv;
- if (!request_region(config->ibbase, ines_isa_iosize, "ines_gpib")) {
- pr_err("ines_gpib: ioports at 0x%x already in use\n", config->ibbase);
- return -1;
+ if (!request_region(config->ibbase, ines_isa_iosize, DRV_NAME)) {
+ dev_err(board->gpib_dev, "ioports at 0x%x already in use\n",
+ config->ibbase);
+ return -EBUSY;
}
nec_priv->iobase = config->ibbase;
nec_priv->offset = 1;
nec7210_board_reset(nec_priv, board);
- if (request_irq(config->ibirq, ines_pci_interrupt, isr_flags, "ines_gpib", board)) {
- pr_err("ines_gpib: failed to allocate IRQ %d\n", config->ibirq);
+ if (request_irq(config->ibirq, ines_pci_interrupt, isr_flags, DRV_NAME, board)) {
+ dev_err(board->gpib_dev, "failed to allocate IRQ %d\n", config->ibirq);
return -1;
}
ines_priv->irq = config->ibirq;
@@ -915,7 +915,7 @@ int ines_isa_attach(gpib_board_t *board, const gpib_board_config_t *config)
return 0;
}
-void ines_pci_detach(gpib_board_t *board)
+void ines_pci_detach(struct gpib_board *board)
{
struct ines_priv *ines_priv = board->private_data;
struct nec7210_priv *nec_priv;
@@ -949,7 +949,7 @@ void ines_pci_detach(gpib_board_t *board)
ines_free_private(board);
}
-void ines_isa_detach(gpib_board_t *board)
+void ines_isa_detach(struct gpib_board *board)
{
struct ines_priv *ines_priv = board->private_data;
struct nec7210_priv *nec_priv;
@@ -977,7 +977,7 @@ static struct pci_driver ines_pci_driver = {
.probe = &ines_pci_probe
};
-#ifdef GPIB_PCMCIA
+#ifdef CONFIG_GPIB_PCMCIA
#include <linux/kernel.h>
#include <linux/ptrace.h>
@@ -988,13 +988,6 @@ static struct pci_driver ines_pci_driver = {
#include <pcmcia/ds.h>
#include <pcmcia/cisreg.h>
-#ifdef PCMCIA_DEBUG
-static int pc_debug = PCMCIA_DEBUG;
-#define DEBUG(n, args...) do {if (pc_debug > (n)) pr_debug(args)} while (0)
-#else
-#define DEBUG(args...)
-#endif
-
static const int ines_pcmcia_iosize = 0x20;
/* The event() function is this driver's Card Services event handler.
@@ -1007,11 +1000,11 @@ static const int ines_pcmcia_iosize = 0x20;
static int ines_gpib_config(struct pcmcia_device *link);
static void ines_gpib_release(struct pcmcia_device *link);
-static int ines_pcmcia_attach(gpib_board_t *board, const gpib_board_config_t *config);
-static int ines_pcmcia_accel_attach(gpib_board_t *board, const gpib_board_config_t *config);
-static void ines_pcmcia_detach(gpib_board_t *board);
+static int ines_pcmcia_attach(struct gpib_board *board, const gpib_board_config_t *config);
+static int ines_pcmcia_accel_attach(struct gpib_board *board, const gpib_board_config_t *config);
+static void ines_pcmcia_detach(struct gpib_board *board);
static irqreturn_t ines_pcmcia_interrupt(int irq, void *arg);
-static int ines_common_pcmcia_attach(gpib_board_t *board);
+static int ines_common_pcmcia_attach(struct gpib_board *board);
/*
* A linked list of "instances" of the gpib device. Each actual
* PCMCIA card corresponds to one device instance, and is described
@@ -1043,7 +1036,7 @@ static struct pcmcia_device *curr_dev;
struct local_info {
struct pcmcia_device *p_dev;
- gpib_board_t *dev;
+ struct gpib_board *dev;
u_short manfid;
u_short cardid;
};
@@ -1063,8 +1056,6 @@ static int ines_gpib_probe(struct pcmcia_device *link)
// int ret, i;
- DEBUG(0, "%s(0x%p)\n", __func__ link);
-
/* Allocate space for private device-specific data */
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
@@ -1096,9 +1087,7 @@ static int ines_gpib_probe(struct pcmcia_device *link)
static void ines_gpib_remove(struct pcmcia_device *link)
{
struct local_info *info = link->priv;
- //struct gpib_board_t *dev = info->dev;
-
- DEBUG(0, "%s(0x%p)\n", __func__, link);
+ //struct struct gpib_board *dev = info->dev;
if (info->dev)
ines_pcmcia_detach(info->dev);
@@ -1125,7 +1114,6 @@ static int ines_gpib_config(struct pcmcia_device *link)
void __iomem *virt;
dev = link->priv;
- DEBUG(0, "%s(0x%p)\n", __func__, link);
retval = pcmcia_loop_config(link, &ines_gpib_config_iteration, NULL);
if (retval) {
@@ -1134,8 +1122,8 @@ static int ines_gpib_config(struct pcmcia_device *link)
return -ENODEV;
}
- pr_debug("ines_cs: manufacturer: 0x%x card: 0x%x\n",
- link->manf_id, link->card_id);
+ dev_dbg(&link->dev, "ines_cs: manufacturer: 0x%x card: 0x%x\n",
+ link->manf_id, link->card_id);
/* for the ines card we have to setup the configuration registers in
* attribute memory here
@@ -1167,7 +1155,6 @@ static int ines_gpib_config(struct pcmcia_device *link)
ines_gpib_release(link);
return -ENODEV;
}
- pr_info("ines gpib device loaded\n");
return 0;
} /* gpib_config */
@@ -1179,18 +1166,16 @@ static int ines_gpib_config(struct pcmcia_device *link)
static void ines_gpib_release(struct pcmcia_device *link)
{
- DEBUG(0, "%s(0x%p)\n", __func__, link);
pcmcia_disable_device(link);
} /* gpib_release */
static int ines_gpib_suspend(struct pcmcia_device *link)
{
//struct local_info *info = link->priv;
- //struct gpib_board_t *dev = info->dev;
- DEBUG(0, "%s(0x%p)\n", __func__, link);
+ //struct struct gpib_board *dev = info->dev;
if (link->open)
- pr_err("Device still open ???\n");
+ dev_err(&link->dev, "Device still open\n");
//netif_device_detach(dev);
return 0;
@@ -1199,12 +1184,10 @@ static int ines_gpib_suspend(struct pcmcia_device *link)
static int ines_gpib_resume(struct pcmcia_device *link)
{
//struct local_info_t *info = link->priv;
- //struct gpib_board_t *dev = info->dev;
- DEBUG(0, "%s(0x%p)\n", __func__, link);
+ //struct struct gpib_board *dev = info->dev;
/*if (link->open) {
* ni_gpib_probe(dev); / really?
- * printk("Gpib resumed ???\n");
* //netif_device_attach(dev);
*}
*/
@@ -1229,7 +1212,6 @@ static struct pcmcia_driver ines_gpib_cs_driver = {
void ines_pcmcia_cleanup_module(void)
{
- DEBUG(0, "ines_cs: unloading\n");
pcmcia_unregister_driver(&ines_gpib_cs_driver);
}
@@ -1319,19 +1301,19 @@ static gpib_interface_t ines_pcmcia_interface = {
irqreturn_t ines_pcmcia_interrupt(int irq, void *arg)
{
- gpib_board_t *board = arg;
+ struct gpib_board *board = arg;
return ines_interrupt(board);
}
-int ines_common_pcmcia_attach(gpib_board_t *board)
+int ines_common_pcmcia_attach(struct gpib_board *board)
{
struct ines_priv *ines_priv;
struct nec7210_priv *nec_priv;
int retval;
if (!curr_dev) {
- pr_err("no ines pcmcia cards found\n");
+ dev_err(board->gpib_dev, "no ines pcmcia cards found\n");
return -1;
}
@@ -1343,9 +1325,9 @@ int ines_common_pcmcia_attach(gpib_board_t *board)
nec_priv = &ines_priv->nec7210_priv;
if (!request_region(curr_dev->resource[0]->start,
- resource_size(curr_dev->resource[0]), "ines_gpib")) {
- pr_err("ines_gpib: ioports at 0x%lx already in use\n",
- (unsigned long)(curr_dev->resource[0]->start));
+ resource_size(curr_dev->resource[0]), DRV_NAME)) {
+ dev_err(board->gpib_dev, "ioports at 0x%lx already in use\n",
+ (unsigned long)(curr_dev->resource[0]->start));
return -1;
}
@@ -1355,7 +1337,7 @@ int ines_common_pcmcia_attach(gpib_board_t *board)
if (request_irq(curr_dev->irq, ines_pcmcia_interrupt, IRQF_SHARED,
"pcmcia-gpib", board)) {
- pr_err("gpib: can't request IRQ %d\n", curr_dev->irq);
+ dev_err(board->gpib_dev, "can't request IRQ %d\n", curr_dev->irq);
return -1;
}
ines_priv->irq = curr_dev->irq;
@@ -1363,7 +1345,7 @@ int ines_common_pcmcia_attach(gpib_board_t *board)
return 0;
}
-int ines_pcmcia_attach(gpib_board_t *board, const gpib_board_config_t *config)
+int ines_pcmcia_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
struct ines_priv *ines_priv;
int retval;
@@ -1378,7 +1360,7 @@ int ines_pcmcia_attach(gpib_board_t *board, const gpib_board_config_t *config)
return 0;
}
-int ines_pcmcia_accel_attach(gpib_board_t *board, const gpib_board_config_t *config)
+int ines_pcmcia_accel_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
struct ines_priv *ines_priv;
int retval;
@@ -1393,7 +1375,7 @@ int ines_pcmcia_accel_attach(gpib_board_t *board, const gpib_board_config_t *con
return 0;
}
-void ines_pcmcia_detach(gpib_board_t *board)
+void ines_pcmcia_detach(struct gpib_board *board)
{
struct ines_priv *ines_priv = board->private_data;
struct nec7210_priv *nec_priv;
@@ -1410,7 +1392,7 @@ void ines_pcmcia_detach(gpib_board_t *board)
ines_free_private(board);
}
-#endif /* GPIB_PCMCIA */
+#endif /* CONFIG_GPIB_PCMCIA */
static int __init ines_init_module(void)
{
@@ -1418,63 +1400,63 @@ static int __init ines_init_module(void)
ret = pci_register_driver(&ines_pci_driver);
if (ret) {
- pr_err("ines_gpib: pci_register_driver failed: error = %d\n", ret);
+ pr_err("pci_register_driver failed: error = %d\n", ret);
return ret;
}
ret = gpib_register_driver(&ines_pci_interface, THIS_MODULE);
if (ret) {
- pr_err("ines_gpib: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
goto err_pci;
}
ret = gpib_register_driver(&ines_pci_unaccel_interface, THIS_MODULE);
if (ret) {
- pr_err("ines_gpib: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
goto err_pci_unaccel;
}
ret = gpib_register_driver(&ines_pci_accel_interface, THIS_MODULE);
if (ret) {
- pr_err("ines_gpib: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
goto err_pci_accel;
}
ret = gpib_register_driver(&ines_isa_interface, THIS_MODULE);
if (ret) {
- pr_err("ines_gpib: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
goto err_isa;
}
-#ifdef GPIB_PCMCIA
+#ifdef CONFIG_GPIB_PCMCIA
ret = gpib_register_driver(&ines_pcmcia_interface, THIS_MODULE);
if (ret) {
- pr_err("ines_gpib: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
goto err_pcmcia;
}
ret = gpib_register_driver(&ines_pcmcia_unaccel_interface, THIS_MODULE);
if (ret) {
- pr_err("ines_gpib: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
goto err_pcmcia_unaccel;
}
ret = gpib_register_driver(&ines_pcmcia_accel_interface, THIS_MODULE);
if (ret) {
- pr_err("ines_gpib: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
goto err_pcmcia_accel;
}
ret = pcmcia_register_driver(&ines_gpib_cs_driver);
if (ret) {
- pr_err("ines_gpib: pcmcia_register_driver failed: error = %d\n", ret);
+ pr_err("pcmcia_register_driver failed: error = %d\n", ret);
goto err_pcmcia_driver;
}
#endif
return 0;
-#ifdef GPIB_PCMCIA
+#ifdef CONFIG_GPIB_PCMCIA
err_pcmcia_driver:
gpib_unregister_driver(&ines_pcmcia_accel_interface);
err_pcmcia_accel:
diff --git a/drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c b/drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c
index 85322af62c23..faf96e9cc4a1 100644
--- a/drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c
+++ b/drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c
@@ -8,6 +8,10 @@
* copyright : (C) 2011 Marcello Carla' *
***************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define dev_fmt pr_fmt
+#define NAME KBUILD_MODNAME
+
/* base module includes */
#include <linux/module.h>
@@ -31,8 +35,6 @@
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GPIB driver for LPVO usb devices");
-#define NAME "lpvo_usb_gpib"
-
/*
* Table of devices that work with this driver.
*
@@ -55,10 +57,11 @@ MODULE_DEVICE_TABLE(usb, skel_table);
/*
* *** Diagnostics and Debug ***
- *
+ * To enable the diagnostic and debug messages either compile with DEBUG set
+ * or control via the dynamic debug mechanisms.
* The module parameter "debug" controls the sending of debug messages to
- * syslog. By default it is set to 0 or 1 according to GPIB_CONFIG_KERNEL_DEBUG.
- * debug = 0: only register/deregister messages are generated
+ * syslog. By default it is set to 0
+ * debug = 0: only attach/detach messages are sent
* 1: every action is logged
* 2: extended logging; each single exchanged byte is documented
* (about twice the log volume of [1])
@@ -70,11 +73,15 @@ MODULE_DEVICE_TABLE(usb, skel_table);
static int debug;
module_param(debug, int, 0644);
-#define DIA_LOG(level, format, ...) \
+#define DIA_LOG(level, format, ...) \
do { if (debug >= (level)) \
- pr_alert("%s:%s - " format, NAME, __func__, ## __VA_ARGS__); } \
+ dev_dbg(board->gpib_dev, format, ## __VA_ARGS__); } \
while (0)
+#define WQT wait_queue_entry_t
+#define WQH head
+#define WQE entry
+
/* standard and extended command sets of the usb-gpib adapter */
#define USB_GPIB_ON "\nIB\n"
@@ -135,7 +142,7 @@ struct char_buf { /* used by one_char() routine */
};
struct usb_gpib_priv { /* private data to the device */
- u8 eos; /* eos character */
+ u8 eos; /* eos character */
short eos_flags; /* eos mode */
int timeout; /* current value for timeout */
void *dev; /* the usb device private data structure */
@@ -143,42 +150,23 @@ struct usb_gpib_priv { /* private data to the device */
#define GPIB_DEV (((struct usb_gpib_priv *)board->private_data)->dev)
-#define SHOW_STATUS(board) { \
- DIA_LOG(2, "# - board %p\n", board); \
- DIA_LOG(2, "# - buffer_length %d\n", board->buffer_length); \
- DIA_LOG(2, "# - status %lx\n", board->status); \
- DIA_LOG(2, "# - use_count %d\n", board->use_count); \
- DIA_LOG(2, "# - pad %x\n", board->pad); \
- DIA_LOG(2, "# - sad %x\n", board->sad); \
- DIA_LOG(2, "# - timeout %d\n", board->usec_timeout); \
- DIA_LOG(2, "# - ppc %d\n", board->parallel_poll_configuration); \
- DIA_LOG(2, "# - t1delay %d\n", board->t1_nano_sec); \
- DIA_LOG(2, "# - online %d\n", board->online); \
- DIA_LOG(2, "# - autopoll %d\n", board->autospollers); \
- DIA_LOG(2, "# - autopoll task %p\n", board->autospoll_task); \
- DIA_LOG(2, "# - minor %d\n", board->minor); \
- DIA_LOG(2, "# - master %d\n", board->master); \
- DIA_LOG(2, "# - list %d\n", board->ist); \
- }
-/*
- * n = 0;
- * list_for_each (l, &board->device_list) n++;
- * TTY_LOG ("%s:%s - devices in list %d\n", a, b, n);
- */
-
-/*
- * TTY_LOG - write a message to the current work terminal (if any)
- */
-
-#define TTY_LOG(format, ...) { \
- char buf[128]; \
- struct tty_struct *tty = get_current_tty(); \
- if (tty) { \
- snprintf(buf, 128, format, __VA_ARGS__); \
- tty->driver->ops->write(tty, buf, strlen(buf)); \
- tty->driver->ops->write(tty, "\r", 1); \
- } \
- }
+static void show_status(struct gpib_board *board)
+{
+ DIA_LOG(2, "# - buffer_length %d\n", board->buffer_length);
+ DIA_LOG(2, "# - status %lx\n", board->status);
+ DIA_LOG(2, "# - use_count %d\n", board->use_count);
+ DIA_LOG(2, "# - pad %x\n", board->pad);
+ DIA_LOG(2, "# - sad %x\n", board->sad);
+ DIA_LOG(2, "# - timeout %d\n", board->usec_timeout);
+ DIA_LOG(2, "# - ppc %d\n", board->parallel_poll_configuration);
+ DIA_LOG(2, "# - t1delay %d\n", board->t1_nano_sec);
+ DIA_LOG(2, "# - online %d\n", board->online);
+ DIA_LOG(2, "# - autopoll %d\n", board->autospollers);
+ DIA_LOG(2, "# - autopoll task %p\n", board->autospoll_task);
+ DIA_LOG(2, "# - minor %d\n", board->minor);
+ DIA_LOG(2, "# - master %d\n", board->master);
+ DIA_LOG(2, "# - list %d\n", board->ist);
+}
/*
* GLOBAL VARIABLES: required for
@@ -200,8 +188,8 @@ static struct mutex minors_lock; /* operations on usb_minors are to be prote
struct usb_skel;
static ssize_t skel_do_write(struct usb_skel *, const char *, size_t);
static ssize_t skel_do_read(struct usb_skel *, char *, size_t);
-static int skel_do_open(gpib_board_t *, int);
-static int skel_do_release(gpib_board_t *);
+static int skel_do_open(struct gpib_board *, int);
+static int skel_do_release(struct gpib_board *);
/*
* usec_diff : take difference in MICROsec between two 'timespec'
@@ -229,27 +217,7 @@ static inline int usec_diff(struct timespec64 *a, struct timespec64 *b)
static int write_loop(void *dev, char *msg, int leng)
{
-// int nchar = 0, val;
-
-// do {
-
return skel_do_write(dev, msg, leng);
-
-// if (val < 1) {
-// printk (KERN_ALERT "%s:%s - write error: %d %d/%d\n",
-// NAME, __func__, val, nchar, leng);
-// return -EIO;
-// }
-// nchar +=val;
-// } while (nchar < leng);
-// return leng;
-}
-
-static char printable(char x)
-{
- if (x < 32 || x > 126)
- return ' ';
- return x;
}
/**
@@ -257,15 +225,15 @@ static char printable(char x)
*
* @board: the gpib_board_struct data area for this gpib interface
* @msg: the byte sequence.
- * @leng the byte sequence length; can be given as zero and is
+ * @leng: the byte sequence length; can be given as zero and is
* computed automatically, but if 'msg' contains a zero byte,
* it has to be given explicitly.
*/
-static int send_command(gpib_board_t *board, char *msg, int leng)
+static int send_command(struct gpib_board *board, char *msg, int leng)
{
char buffer[64];
- int nchar, j;
+ int nchar;
int retval;
struct timespec64 before, after;
@@ -280,17 +248,10 @@ static int send_command(gpib_board_t *board, char *msg, int leng)
nchar = skel_do_read(GPIB_DEV, buffer, 64);
if (nchar < 0) {
- DIA_LOG(0, " return from read: %d\n", nchar);
+ dev_err(board->gpib_dev, " return from read: %d\n", nchar);
return nchar;
} else if (nchar != 1) {
- for (j = 0 ; j < leng ; j++) {
- DIA_LOG(0, " Irregular reply to command: %d %x %c\n",
- j, msg[j], printable(msg[j]));
- }
- for (j = 0 ; j < nchar ; j++) {
- DIA_LOG(0, " Irregular command reply: %d %x %c\n",
- j, buffer[j] & 0xff, printable(buffer[j]));
- }
+ dev_err(board->gpib_dev, " Irregular reply to command: %s\n", msg);
return -EIO;
}
ktime_get_real_ts64 (&after);
@@ -310,7 +271,7 @@ static int send_command(gpib_board_t *board, char *msg, int leng)
*
*/
-static int set_control_line(gpib_board_t *board, int line, int value)
+static int set_control_line(struct gpib_board *board, int line, int value)
{
char msg[] = USB_GPIB_SET_LINES;
int retval;
@@ -337,11 +298,11 @@ static int set_control_line(gpib_board_t *board, int line, int value)
/*
* one_char() - read one single byte from input buffer
*
- * @board: the gpib_board_struct data area for this gpib interface
- * @char_buf: the routine private data structure
+ * @board: the gpib_board_struct data area for this gpib interface
+ * @char_buf: the routine private data structure
*/
-static int one_char(gpib_board_t *board, struct char_buf *b)
+static int one_char(struct gpib_board *board, struct char_buf *b)
{
struct timespec64 before, after;
@@ -360,13 +321,7 @@ static int one_char(gpib_board_t *board, struct char_buf *b)
if (b->nchar > 0) {
DIA_LOG(2, "--> %x\n", b->inbuf[b->last - b->nchar]);
return b->inbuf[b->last - b->nchar--];
- } else if (b->nchar == 0) {
- dev_alert(board->gpib_dev, "%s:%s - read returned EOF\n", NAME, __func__);
- return -EIO;
}
- dev_alert(board->gpib_dev, "%s:%s - read error %d\n", NAME, __func__, b->nchar);
- TTY_LOG("\n *** %s *** Read Error - %s\n", NAME,
- "Reset the adapter with 'gpib_config'\n");
return -EIO;
}
@@ -381,7 +336,7 @@ static int one_char(gpib_board_t *board, struct char_buf *b)
* not supported.
*/
-static void set_timeout(gpib_board_t *board)
+static void set_timeout(struct gpib_board *board)
{
int n, val;
char command[sizeof(USB_GPIB_TTMO) + 6];
@@ -406,12 +361,10 @@ static void set_timeout(gpib_board_t *board)
val = send_command(board, command, 0);
}
- if (val != ACK) {
- dev_alert(board->gpib_dev, "%s:%s - error in timeout set: <%s>\n",
- NAME, __func__, command);
- } else {
+ if (val != ACK)
+ dev_err(board->gpib_dev, "error in timeout set: <%s>\n", command);
+ else
data->timeout = board->usec_timeout;
- }
}
/*
@@ -431,7 +384,7 @@ static void set_timeout(gpib_board_t *board)
* detach() will be called. Always.
*/
-static int usb_gpib_attach(gpib_board_t *board, const gpib_board_config_t *config)
+static int usb_gpib_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
int retval, j;
u32 base = config->ibbase;
@@ -451,8 +404,6 @@ static int usb_gpib_attach(gpib_board_t *board, const gpib_board_config_t *confi
if (config->device_path) {
/* if config->device_path given, try that first */
- dev_alert(board->gpib_dev, "%s:%s - Looking for device_path: %s\n",
- NAME, __func__, config->device_path);
for (j = 0 ; j < MAX_DEV ; j++) {
if ((assigned_usb_minors & 1 << j) == 0)
continue;
@@ -487,8 +438,7 @@ static int usb_gpib_attach(gpib_board_t *board, const gpib_board_config_t *confi
mutex_unlock(&minors_lock);
if (j == MAX_DEV) {
- dev_alert(board->gpib_dev, "%s:%s - Requested device is not registered.\n",
- NAME, __func__);
+ dev_err(board->gpib_dev, "Requested device is not registered.\n");
return -EIO;
}
@@ -501,13 +451,13 @@ static int usb_gpib_attach(gpib_board_t *board, const gpib_board_config_t *confi
DIA_LOG(1, "Skel open: %d\n", retval);
if (retval) {
- TTY_LOG("%s:%s - skel open failed.\n", NAME, __func__);
+ dev_err(board->gpib_dev, "skel open failed.\n");
kfree(board->private_data);
board->private_data = NULL;
return -ENODEV;
}
- SHOW_STATUS(board);
+ show_status(board);
retval = send_command(board, USB_GPIB_ON, 0);
DIA_LOG(1, "USB_GPIB_ON returns %x\n", retval);
@@ -541,8 +491,8 @@ static int usb_gpib_attach(gpib_board_t *board, const gpib_board_config_t *confi
if (retval != ACK)
return -EIO;
- SHOW_STATUS(board);
- TTY_LOG("Module '%s' has been sucesfully configured\n", NAME);
+ show_status(board);
+ DIA_LOG(0, "attached\n");
return 0;
}
@@ -553,13 +503,13 @@ static int usb_gpib_attach(gpib_board_t *board, const gpib_board_config_t *confi
*
*/
-static void usb_gpib_detach(gpib_board_t *board)
+static void usb_gpib_detach(struct gpib_board *board)
{
int retval;
- SHOW_STATUS(board);
+ show_status(board);
- DIA_LOG(0, "detaching %p\n", board);
+ DIA_LOG(0, "detaching\n");
if (board->private_data) {
if (GPIB_DEV) {
@@ -573,15 +523,14 @@ static void usb_gpib_detach(gpib_board_t *board)
board->private_data = NULL;
}
- DIA_LOG(0, "done %p\n", board);
- TTY_LOG("Module '%s' has been detached\n", NAME);
+ DIA_LOG(0, "detached\n");
}
/*
* Other functions follow in alphabetical order
*/
/* command */
-static int usb_gpib_command(gpib_board_t *board,
+static int usb_gpib_command(struct gpib_board *board,
u8 *buffer,
size_t length,
size_t *bytes_written)
@@ -614,7 +563,7 @@ static int usb_gpib_command(gpib_board_t *board,
* Cannot do nothing here, but remember for future use.
*/
-static void usb_gpib_disable_eos(gpib_board_t *board)
+static void usb_gpib_disable_eos(struct gpib_board *board)
{
((struct usb_gpib_priv *)board->private_data)->eos_flags &= ~REOS;
DIA_LOG(1, "done: %x\n",
@@ -630,7 +579,7 @@ static void usb_gpib_disable_eos(gpib_board_t *board)
*
*/
-static int usb_gpib_enable_eos(gpib_board_t *board,
+static int usb_gpib_enable_eos(struct gpib_board *board,
u8 eos_byte,
int compare_8_bits)
{
@@ -650,7 +599,7 @@ static int usb_gpib_enable_eos(gpib_board_t *board,
* @board: the gpib_board data area for this gpib interface
*/
-static int usb_gpib_go_to_standby(gpib_board_t *board)
+static int usb_gpib_go_to_standby(struct gpib_board *board)
{
int retval = set_control_line(board, IB_BUS_ATN, 0);
@@ -665,14 +614,14 @@ static int usb_gpib_go_to_standby(gpib_board_t *board)
* usb_gpib_interface_clear() - Assert or de-assert IFC
*
* @board: the gpib_board data area for this gpib interface
- * assert: 1: assert IFC; 0: de-assert IFC
+ * @assert: 1: assert IFC; 0: de-assert IFC
*
* Currently on the assert request we issue the lpvo IBZ
* command that cycles IFC low for 100 usec, then we ignore
* the de-assert request.
*/
-static void usb_gpib_interface_clear(gpib_board_t *board, int assert)
+static void usb_gpib_interface_clear(struct gpib_board *board, int assert)
{
int retval = 0;
@@ -688,21 +637,16 @@ static void usb_gpib_interface_clear(gpib_board_t *board, int assert)
}
/**
- * line_status() - Read the status of the bus lines.
+ * usb_gpib_line_status() - Read the status of the bus lines.
*
* @board: the gpib_board data area for this gpib interface
*
* We can read all lines.
*/
-
-#define WQT wait_queue_entry_t
-#define WQH head
-#define WQE entry
-
-static int usb_gpib_line_status(const gpib_board_t *board)
+static int usb_gpib_line_status(const struct gpib_board *board)
{
int buffer;
- int line_status = ValidALL; /* all lines will be read */
+ int line_status = VALID_ALL; /* all lines will be read */
struct list_head *p, *q;
WQT *item;
unsigned long flags;
@@ -730,30 +674,29 @@ static int usb_gpib_line_status(const gpib_board_t *board)
msleep(sleep);
}
- buffer = send_command((gpib_board_t *)board, USB_GPIB_STATUS, 0);
+ buffer = send_command((struct gpib_board *)board, USB_GPIB_STATUS, 0);
if (buffer < 0) {
- dev_alert(board->gpib_dev, "%s:%s - line status read failed with %d\n",
- NAME, __func__, buffer);
+ dev_err(board->gpib_dev, "line status read failed with %d\n", buffer);
return -1;
}
if ((buffer & 0x01) == 0)
- line_status |= BusREN;
+ line_status |= BUS_REN;
if ((buffer & 0x02) == 0)
- line_status |= BusIFC;
+ line_status |= BUS_IFC;
if ((buffer & 0x04) == 0)
- line_status |= BusNDAC;
+ line_status |= BUS_NDAC;
if ((buffer & 0x08) == 0)
- line_status |= BusNRFD;
+ line_status |= BUS_NRFD;
if ((buffer & 0x10) == 0)
- line_status |= BusDAV;
+ line_status |= BUS_DAV;
if ((buffer & 0x20) == 0)
- line_status |= BusEOI;
+ line_status |= BUS_EOI;
if ((buffer & 0x40) == 0)
- line_status |= BusATN;
+ line_status |= BUS_ATN;
if ((buffer & 0x80) == 0)
- line_status |= BusSRQ;
+ line_status |= BUS_SRQ;
DIA_LOG(1, "done with %x %x\n", buffer, line_status);
@@ -762,7 +705,7 @@ static int usb_gpib_line_status(const gpib_board_t *board)
/* parallel_poll */
-static int usb_gpib_parallel_poll(gpib_board_t *board, uint8_t *result)
+static int usb_gpib_parallel_poll(struct gpib_board *board, uint8_t *result)
{
/* request parallel poll asserting ATN | EOI;
* we suppose ATN already asserted
@@ -773,27 +716,23 @@ static int usb_gpib_parallel_poll(gpib_board_t *board, uint8_t *result)
DIA_LOG(1, "enter %p\n", board);
retval = set_control_line(board, IB_BUS_EOI, 1);
- if (retval != ACK) {
- dev_alert(board->gpib_dev, "%s:%s - assert EOI failed\n", NAME, __func__);
+ if (retval != ACK)
return -EIO;
- }
*result = send_command(board, USB_GPIB_READ_DATA, 0);
DIA_LOG(1, "done with %x\n", *result);
retval = set_control_line(board, IB_BUS_EOI, 0);
- if (retval != 0x06) {
- dev_alert(board->gpib_dev, "%s:%s - unassert EOI failed\n", NAME, __func__);
+ if (retval != 0x06)
return -EIO;
- }
return 0;
}
/* read */
-static int usb_gpib_read(gpib_board_t *board,
+static int usb_gpib_read(struct gpib_board *board,
u8 *buffer,
size_t length,
int *end,
@@ -866,8 +805,7 @@ static int usb_gpib_read(gpib_board_t *board,
goto read_return;
if (one_char(board, &b) != DLE || one_char(board, &b) != STX) {
- dev_alert(board->gpib_dev, "%s:%s - wrong <DLE><STX> sequence\n",
- NAME, __func__);
+ dev_err(board->gpib_dev, "wrong <DLE><STX> sequence\n");
retval = -EIO;
goto read_return;
}
@@ -907,15 +845,12 @@ static int usb_gpib_read(gpib_board_t *board,
retval = 0;
goto read_return;
} else {
- dev_alert(board->gpib_dev, "%s:%s - %s %x\n",
- NAME, __func__,
- "Wrong end of message", c);
+ dev_err(board->gpib_dev, "wrong end of message %x", c);
retval = -ETIME;
goto read_return;
}
} else {
- dev_alert(board->gpib_dev, "%s:%s - %s\n", NAME, __func__,
- "lone <DLE> in stream");
+ dev_err(board->gpib_dev, "lone <DLE> in stream");
retval = -EIO;
goto read_return;
}
@@ -934,8 +869,7 @@ static int usb_gpib_read(gpib_board_t *board,
c = one_char(board, &b);
if (c == ACK) {
if (MAX_READ_EXCESS - read_count > 1)
- dev_alert(board->gpib_dev, "%s:%s - %s\n", NAME, __func__,
- "small buffer - maybe some data lost");
+ dev_dbg(board->gpib_dev, "small buffer - maybe some data lost");
retval = 0;
goto read_return;
}
@@ -943,15 +877,13 @@ static int usb_gpib_read(gpib_board_t *board,
}
}
- dev_alert(board->gpib_dev, "%s:%s - no input end - GPIB board in odd state\n",
- NAME, __func__);
+ dev_err(board->gpib_dev, "no input end - board in odd state\n");
retval = -EIO;
read_return:
kfree(b.inbuf);
- DIA_LOG(1, "done with byte/status: %d %x %d\n",
- (int)*bytes_read, retval, *end);
+ DIA_LOG(1, "done with byte/status: %d %x %d\n", (int)*bytes_read, retval, *end);
if (retval == 0 || retval == -ETIME) {
if (send_command(board, USB_GPIB_UNTALK, sizeof(USB_GPIB_UNTALK)) == 0x06)
@@ -964,21 +896,20 @@ read_return:
/* remote_enable */
-static void usb_gpib_remote_enable(gpib_board_t *board, int enable)
+static void usb_gpib_remote_enable(struct gpib_board *board, int enable)
{
int retval;
retval = set_control_line(board, IB_BUS_REN, enable ? 1 : 0);
if (retval != ACK)
- dev_alert(board->gpib_dev, "%s:%s - could not set REN line: %x\n",
- NAME, __func__, retval);
+ dev_err(board->gpib_dev, "could not set REN line: %x\n", retval);
DIA_LOG(1, "done with %x\n", retval);
}
/* request_system_control */
-static void usb_gpib_request_system_control(gpib_board_t *board,
+static void usb_gpib_request_system_control(struct gpib_board *board,
int request_control)
{
if (request_control)
@@ -992,7 +923,7 @@ static void usb_gpib_request_system_control(gpib_board_t *board,
/* take_control */
/* beware: the sync flag is ignored; what is its real meaning? */
-static int usb_gpib_take_control(gpib_board_t *board, int sync)
+static int usb_gpib_take_control(struct gpib_board *board, int sync)
{
int retval;
@@ -1007,7 +938,7 @@ static int usb_gpib_take_control(gpib_board_t *board, int sync)
/* update_status */
-static unsigned int usb_gpib_update_status(gpib_board_t *board,
+static unsigned int usb_gpib_update_status(struct gpib_board *board,
unsigned int clear_mask)
{
/* There is nothing we can do here, I guess */
@@ -1022,7 +953,7 @@ static unsigned int usb_gpib_update_status(gpib_board_t *board,
/* write */
/* beware: DLE characters are not escaped - can only send ASCII data */
-static int usb_gpib_write(gpib_board_t *board,
+static int usb_gpib_write(struct gpib_board *board,
u8 *buffer,
size_t length,
int send_eoi,
@@ -1053,9 +984,8 @@ static int usb_gpib_write(gpib_board_t *board,
*bytes_written = length;
- if (send_command(board, USB_GPIB_UNLISTEN, sizeof(USB_GPIB_UNLISTEN))
- != 0x06)
- return -EPIPE;
+ if (send_command(board, USB_GPIB_UNLISTEN, sizeof(USB_GPIB_UNLISTEN)) != 0x06)
+ return -EPIPE;
return length;
}
@@ -1066,64 +996,56 @@ static int usb_gpib_write(gpib_board_t *board,
/* parallel_poll configure */
-static void usb_gpib_parallel_poll_configure(gpib_board_t *board,
+static void usb_gpib_parallel_poll_configure(struct gpib_board *board,
uint8_t configuration)
{
- dev_alert(board->gpib_dev, "%s:%s - currently a NOP\n", NAME, __func__);
}
/* parallel_poll_response */
-static void usb_gpib_parallel_poll_response(gpib_board_t *board, int ist)
+static void usb_gpib_parallel_poll_response(struct gpib_board *board, int ist)
{
- dev_alert(board->gpib_dev, "%s:%s - currently a NOP\n", NAME, __func__);
}
/* primary_address */
-static int usb_gpib_primary_address(gpib_board_t *board, unsigned int address)
+static int usb_gpib_primary_address(struct gpib_board *board, unsigned int address)
{
- dev_alert(board->gpib_dev, "%s:%s - currently a NOP\n", NAME, __func__);
return 0;
}
/* return_to_local */
-static void usb_gpib_return_to_local(gpib_board_t *board)
+static void usb_gpib_return_to_local(struct gpib_board *board)
{
- dev_alert(board->gpib_dev, "%s:%s - currently a NOP\n", NAME, __func__);
}
/* secondary_address */
-static int usb_gpib_secondary_address(gpib_board_t *board,
+static int usb_gpib_secondary_address(struct gpib_board *board,
unsigned int address,
int enable)
{
- dev_alert(board->gpib_dev, "%s:%s - currently a NOP\n", NAME, __func__);
return 0;
}
/* serial_poll_response */
-static void usb_gpib_serial_poll_response(gpib_board_t *board, uint8_t status)
+static void usb_gpib_serial_poll_response(struct gpib_board *board, uint8_t status)
{
- dev_alert(board->gpib_dev, "%s:%s - currently a NOP\n", NAME, __func__);
}
/* serial_poll_status */
-static uint8_t usb_gpib_serial_poll_status(gpib_board_t *board)
+static uint8_t usb_gpib_serial_poll_status(struct gpib_board *board)
{
- dev_alert(board->gpib_dev, "%s:%s - currently a NOP\n", NAME, __func__);
return 0;
}
/* t1_delay */
-static unsigned int usb_gpib_t1_delay(gpib_board_t *board, unsigned int nano_sec)
+static int usb_gpib_t1_delay(struct gpib_board *board, unsigned int nano_sec)
{
- dev_alert(board->gpib_dev, "%s:%s - currently a NOP\n", NAME, __func__);
return 0;
}
@@ -1181,7 +1103,7 @@ static int usb_gpib_init_module(struct usb_interface *interface)
if (!assigned_usb_minors) {
rv = gpib_register_driver(&usb_gpib_interface, THIS_MODULE);
if (rv) {
- pr_err("lpvo_usb_gpib: gpib_register_driver failed: error = %d\n", rv);
+ pr_err("gpib_register_driver failed: error = %d\n", rv);
goto exit;
}
} else {
@@ -1191,8 +1113,8 @@ static int usb_gpib_init_module(struct usb_interface *interface)
for (j = 0 ; j < MAX_DEV ; j++) {
if (usb_minors[j] == interface->minor && assigned_usb_minors & 1 << j) {
- pr_alert("%s:%s - CODE BUG: USB minor %d registered at %d.\n",
- NAME, __func__, interface->minor, j);
+ pr_err("CODE BUG: USB minor %d registered at %d.\n",
+ interface->minor, j);
rv = -1;
goto exit;
}
@@ -1207,13 +1129,11 @@ static int usb_gpib_init_module(struct usb_interface *interface)
usb_minors[j] = interface->minor;
lpvo_usb_interfaces[j] = interface;
assigned_usb_minors |= mask;
- DIA_LOG(0, "usb minor %d registered at %d\n", interface->minor, j);
rv = 0;
goto exit;
}
}
- pr_alert("%s:%s - No slot available for interface %p minor %d\n",
- NAME, __func__, interface, interface->minor);
+ pr_err("No slot available for interface %p minor %d\n", interface, interface->minor);
rv = -1;
exit:
@@ -1235,7 +1155,7 @@ static void usb_gpib_exit_module(int minor)
goto exit;
}
}
- pr_alert("%s:%s - CODE BUG: USB minor %d not found.\n", NAME, __func__, minor);
+ pr_err("CODE BUG: USB minor %d not found.\n", minor);
exit:
mutex_unlock(&minors_lock);
@@ -1267,7 +1187,7 @@ static int write_latency_timer(struct usb_device *udev)
LATENCY_TIMER, LATENCY_CHANNEL,
NULL, 0, WDR_TIMEOUT);
if (rv < 0)
- pr_alert("Unable to write latency timer: %i\n", rv);
+ dev_err(&udev->dev, "Unable to write latency timer: %i\n", rv);
return rv;
}
@@ -1363,18 +1283,15 @@ static void skel_delete(struct kref *kref)
* skel_do_open() - to be called by usb_gpib_attach
*/
-static int skel_do_open(gpib_board_t *board, int subminor)
+static int skel_do_open(struct gpib_board *board, int subminor)
{
struct usb_skel *dev;
struct usb_interface *interface;
int retval = 0;
- DIA_LOG(0, "Required minor: %d\n", subminor);
-
interface = usb_find_interface(&skel_driver, subminor);
if (!interface) {
- pr_err("%s - error, can't find device for minor %d\n",
- __func__, subminor);
+ dev_err(board->gpib_dev, "can't find device for minor %d\n", subminor);
retval = -ENODEV;
goto exit;
}
@@ -1403,7 +1320,7 @@ exit:
* skel_do_release() - to be called by usb_gpib_detach
*/
-static int skel_do_release(gpib_board_t *board)
+static int skel_do_release(struct gpib_board *board)
{
struct usb_skel *dev;
@@ -1439,9 +1356,8 @@ static void skel_read_bulk_callback(struct urb *urb)
if (!(urb->status == -ENOENT ||
urb->status == -ECONNRESET ||
urb->status == -ESHUTDOWN))
- dev_err(&dev->interface->dev,
- "%s - nonzero read bulk status received: %d\n",
- __func__, urb->status);
+ dev_err(&dev->interface->dev, "nonzero read bulk status received: %d\n",
+ urb->status);
dev->errors = urb->status;
} else {
@@ -1478,9 +1394,7 @@ static int skel_do_read_io(struct usb_skel *dev, size_t count)
/* do it */
rv = usb_submit_urb(dev->bulk_in_urb, GFP_KERNEL);
if (rv < 0) {
- dev_err(&dev->interface->dev,
- "%s - failed submitting read urb, error %d\n",
- __func__, rv);
+ dev_err(&dev->interface->dev, "failed submitting read urb, error %d\n", rv);
rv = (rv == -ENOMEM) ? rv : -EIO;
spin_lock_irq(&dev->err_lock);
dev->ongoing_read = 0;
@@ -1504,14 +1418,10 @@ static ssize_t skel_do_read(struct usb_skel *dev, char *buffer, size_t count)
if (!dev->bulk_in_urb || !count)
return 0;
- DIA_LOG(1, "enter for %zu.\n", count);
-
restart: /* added to comply with ftdi timeout technique */
/* no concurrent readers */
- DIA_LOG(2, "restart with %zd %zd.\n", dev->bulk_in_filled, dev->bulk_in_copied);
-
rv = mutex_lock_interruptible(&dev->io_mutex);
if (rv < 0)
return rv;
@@ -1527,8 +1437,6 @@ retry:
ongoing_io = dev->ongoing_read;
spin_unlock_irq(&dev->err_lock);
- DIA_LOG(2, "retry with %d.\n", ongoing_io);
-
if (ongoing_io) {
// /* nonblocking IO shall not wait */
// /* no file, no O_NONBLOCK; maybe provide when from user space */
@@ -1569,8 +1477,6 @@ retry:
// size_t chunk = min(available, count); /* compute chunk later */
size_t chunk;
- DIA_LOG(2, "we have data: %zu %zu.\n", dev->bulk_in_filled, dev->bulk_in_copied);
-
if (!available) {
/*
* all data has been used
@@ -1596,12 +1502,6 @@ retry:
*/
if (dev->bulk_in_copied) {
- int j;
-
- for (j = 0 ; j < dev->bulk_in_filled ; j++) {
- pr_alert("copy -> %x %zu %x\n",
- j, dev->bulk_in_copied, dev->bulk_in_buffer[j]);
- }
chunk = min(available, count);
memcpy(buffer, dev->bulk_in_buffer + dev->bulk_in_copied, chunk);
rv = chunk;
@@ -1613,7 +1513,7 @@ retry:
/* account for two bytes to be discarded */
chunk = min(available, count + 2);
if (chunk < 2) {
- pr_alert("BAD READ - chunk: %zu\n", chunk);
+ dev_err(&dev->udev->dev, "BAD READ - chunk: %zu\n", chunk);
rv = -EIO;
goto exit;
}
@@ -1633,8 +1533,6 @@ retry:
// if (available < count)
// skel_do_read_io(dev, dev->bulk_in_size);
} else {
- DIA_LOG(1, "no data - start read - copied: %zd.\n", dev->bulk_in_copied);
-
/* no data in the buffer */
rv = skel_do_read_io(dev, dev->bulk_in_size);
if (rv < 0)
@@ -1645,10 +1543,10 @@ retry:
exit:
mutex_unlock(&dev->io_mutex);
if (rv == 2)
- goto restart; /* ftdi chip returns two status bytes after a latency anyhow */
- DIA_LOG(1, "exit with %d.\n", rv);
+ goto restart; /* ftdi chip returns two status bytes after a latency anyhow */
+
if (rv > 0)
- return rv - 2; /* account for 2 discarded bytes in a valid buffer */
+ return rv - 2; /* account for 2 discarded bytes in a valid buffer */
return rv;
}
@@ -1669,8 +1567,7 @@ static void skel_write_bulk_callback(struct urb *urb)
urb->status == -ECONNRESET ||
urb->status == -ESHUTDOWN))
dev_err(&dev->interface->dev,
- "%s - nonzero write bulk status received: %d\n",
- __func__, urb->status);
+ "nonzero write bulk status received: %d\n", urb->status);
spin_lock_irqsave(&dev->err_lock, flags);
dev->errors = urb->status;
@@ -1763,9 +1660,7 @@ static ssize_t skel_do_write(struct usb_skel *dev, const char *buffer, size_t co
retval = usb_submit_urb(urb, GFP_KERNEL);
mutex_unlock(&dev->io_mutex);
if (retval) {
- dev_err(&dev->interface->dev,
- "%s - failed submitting write urb, error %d\n",
- __func__, retval);
+ dev_err(&dev->interface->dev, "failed submitting write urb, error %d\n", retval);
goto error_unanchor;
}
@@ -1831,8 +1726,7 @@ static int skel_open(struct inode *inode, struct file *file)
interface = usb_find_interface(&skel_driver, subminor);
if (!interface) {
- pr_err("%s - error, can't find device for minor %d\n",
- __func__, subminor);
+ pr_err("can't find device for minor %d\n", subminor);
retval = -ENODEV;
goto exit;
}
@@ -1895,8 +1789,6 @@ static ssize_t skel_read(struct file *file, char __user *buffer, size_t count,
rv = skel_do_read(dev, buf, count);
- pr_alert("%s - return with %zu\n", __func__, rv);
-
if (rv > 0) {
if (copy_to_user(buffer, buf, rv)) {
kfree(buf);
@@ -2015,8 +1907,8 @@ static int skel_probe(struct usb_interface *interface,
/* let the world know */
device_path = kobject_get_path(&dev->udev->dev.kobj, GFP_KERNEL);
- pr_alert("%s:%s - New lpvo_usb_device -> bus: %d dev: %d path: %s\n", NAME, __func__,
- dev->udev->bus->busnum, dev->udev->devnum, device_path);
+ dev_dbg(&interface->dev, "New lpvo_usb_device -> bus: %d dev: %d path: %s\n",
+ dev->udev->bus->busnum, dev->udev->devnum, device_path);
kfree(device_path);
#if USER_DEVICE
@@ -2029,14 +1921,9 @@ static int skel_probe(struct usb_interface *interface,
usb_set_intfdata(interface, NULL);
goto error;
}
-
- /* let the user know what node this device is now attached to */
- dev_info(&interface->dev,
- "lpvo_usb_gpib device now attached to lpvo_raw%d",
- interface->minor);
#endif
- write_latency_timer(dev->udev); /* adjust the latency timer */
+ write_latency_timer(dev->udev); /* adjust the latency timer */
usb_gpib_init_module(interface); /* last, init the lpvo for this minor */
@@ -2073,8 +1960,6 @@ static void skel_disconnect(struct usb_interface *interface)
/* decrement our usage count */
kref_put(&dev->kref, skel_delete);
-
- dev_info(&interface->dev, "USB lpvo_raw #%d now disconnected", minor);
}
static void skel_draw_down(struct usb_skel *dev)
diff --git a/drivers/staging/gpib/nec7210/nec7210.c b/drivers/staging/gpib/nec7210/nec7210.c
index c9a837fad96e..846c0a3fa1dc 100644
--- a/drivers/staging/gpib/nec7210/nec7210.c
+++ b/drivers/staging/gpib/nec7210/nec7210.c
@@ -4,6 +4,8 @@
* copyright : (C) 2001, 2002 by Frank Mori Hess
***************************************************************************/
+#define dev_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "board.h"
#include <linux/ioport.h>
#include <linux/sched.h>
@@ -21,7 +23,7 @@
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GPIB library code for NEC uPD7210");
-int nec7210_enable_eos(gpib_board_t *board, struct nec7210_priv *priv, uint8_t eos_byte,
+int nec7210_enable_eos(struct gpib_board *board, struct nec7210_priv *priv, uint8_t eos_byte,
int compare_8_bits)
{
write_byte(priv, eos_byte, EOSR);
@@ -35,14 +37,14 @@ int nec7210_enable_eos(gpib_board_t *board, struct nec7210_priv *priv, uint8_t e
}
EXPORT_SYMBOL(nec7210_enable_eos);
-void nec7210_disable_eos(gpib_board_t *board, struct nec7210_priv *priv)
+void nec7210_disable_eos(struct gpib_board *board, struct nec7210_priv *priv)
{
priv->auxa_bits &= ~HR_REOS;
write_byte(priv, priv->auxa_bits, AUXMR);
}
EXPORT_SYMBOL(nec7210_disable_eos);
-int nec7210_parallel_poll(gpib_board_t *board, struct nec7210_priv *priv, uint8_t *result)
+int nec7210_parallel_poll(struct gpib_board *board, struct nec7210_priv *priv, uint8_t *result)
{
int ret;
@@ -62,14 +64,14 @@ int nec7210_parallel_poll(gpib_board_t *board, struct nec7210_priv *priv, uint8_
}
EXPORT_SYMBOL(nec7210_parallel_poll);
-void nec7210_parallel_poll_configure(gpib_board_t *board,
+void nec7210_parallel_poll_configure(struct gpib_board *board,
struct nec7210_priv *priv, unsigned int configuration)
{
write_byte(priv, PPR | configuration, AUXMR);
}
EXPORT_SYMBOL(nec7210_parallel_poll_configure);
-void nec7210_parallel_poll_response(gpib_board_t *board, struct nec7210_priv *priv, int ist)
+void nec7210_parallel_poll_response(struct gpib_board *board, struct nec7210_priv *priv, int ist)
{
if (ist)
write_byte(priv, AUX_SPPF, AUXMR);
@@ -83,7 +85,8 @@ EXPORT_SYMBOL(nec7210_parallel_poll_response);
* the 488.2 capability (for example with NI chips), or we need to implement the
* 488.2 set srv state machine in the driver (if that is even viable).
*/
-void nec7210_serial_poll_response(gpib_board_t *board, struct nec7210_priv *priv, uint8_t status)
+void nec7210_serial_poll_response(struct gpib_board *board,
+ struct nec7210_priv *priv, uint8_t status)
{
unsigned long flags;
@@ -100,13 +103,13 @@ void nec7210_serial_poll_response(gpib_board_t *board, struct nec7210_priv *priv
}
EXPORT_SYMBOL(nec7210_serial_poll_response);
-uint8_t nec7210_serial_poll_status(gpib_board_t *board, struct nec7210_priv *priv)
+uint8_t nec7210_serial_poll_status(struct gpib_board *board, struct nec7210_priv *priv)
{
return read_byte(priv, SPSR);
}
EXPORT_SYMBOL(nec7210_serial_poll_status);
-int nec7210_primary_address(const gpib_board_t *board, struct nec7210_priv *priv,
+int nec7210_primary_address(const struct gpib_board *board, struct nec7210_priv *priv,
unsigned int address)
{
// put primary address in address0
@@ -115,7 +118,7 @@ int nec7210_primary_address(const gpib_board_t *board, struct nec7210_priv *priv
}
EXPORT_SYMBOL(nec7210_primary_address);
-int nec7210_secondary_address(const gpib_board_t *board, struct nec7210_priv *priv,
+int nec7210_secondary_address(const struct gpib_board *board, struct nec7210_priv *priv,
unsigned int address, int enable)
{
if (enable) {
@@ -164,7 +167,7 @@ static void update_listener_state(struct nec7210_priv *priv, unsigned int addres
}
}
-unsigned int nec7210_update_status_nolock(gpib_board_t *board, struct nec7210_priv *priv)
+unsigned int nec7210_update_status_nolock(struct gpib_board *board, struct nec7210_priv *priv)
{
int address_status_bits;
u8 spoll_status;
@@ -198,7 +201,6 @@ unsigned int nec7210_update_status_nolock(gpib_board_t *board, struct nec7210_pr
priv->srq_pending = 0;
set_bit(SPOLL_NUM, &board->status);
}
-// dev_dbg(board->gpib_dev, "status 0x%x, state 0x%x\n", board->status, priv->state);
/* we rely on the interrupt handler to set the
* rest of the status bits
@@ -208,7 +210,7 @@ unsigned int nec7210_update_status_nolock(gpib_board_t *board, struct nec7210_pr
}
EXPORT_SYMBOL(nec7210_update_status_nolock);
-unsigned int nec7210_update_status(gpib_board_t *board, struct nec7210_priv *priv,
+unsigned int nec7210_update_status(struct gpib_board *board, struct nec7210_priv *priv,
unsigned int clear_mask)
{
unsigned long flags;
@@ -233,7 +235,7 @@ unsigned int nec7210_set_reg_bits(struct nec7210_priv *priv, unsigned int reg,
}
EXPORT_SYMBOL(nec7210_set_reg_bits);
-void nec7210_set_handshake_mode(gpib_board_t *board, struct nec7210_priv *priv, int mode)
+void nec7210_set_handshake_mode(struct gpib_board *board, struct nec7210_priv *priv, int mode)
{
unsigned long flags;
@@ -249,7 +251,7 @@ void nec7210_set_handshake_mode(gpib_board_t *board, struct nec7210_priv *priv,
}
EXPORT_SYMBOL(nec7210_set_handshake_mode);
-uint8_t nec7210_read_data_in(gpib_board_t *board, struct nec7210_priv *priv, int *end)
+uint8_t nec7210_read_data_in(struct gpib_board *board, struct nec7210_priv *priv, int *end)
{
unsigned long flags;
u8 data;
@@ -267,7 +269,7 @@ uint8_t nec7210_read_data_in(gpib_board_t *board, struct nec7210_priv *priv, int
}
EXPORT_SYMBOL(nec7210_read_data_in);
-int nec7210_take_control(gpib_board_t *board, struct nec7210_priv *priv, int syncronous)
+int nec7210_take_control(struct gpib_board *board, struct nec7210_priv *priv, int syncronous)
{
int i;
const int timeout = 100;
@@ -294,7 +296,7 @@ int nec7210_take_control(gpib_board_t *board, struct nec7210_priv *priv, int syn
}
EXPORT_SYMBOL(nec7210_take_control);
-int nec7210_go_to_standby(gpib_board_t *board, struct nec7210_priv *priv)
+int nec7210_go_to_standby(struct gpib_board *board, struct nec7210_priv *priv)
{
int i;
const int timeout = 1000;
@@ -319,10 +321,8 @@ int nec7210_go_to_standby(gpib_board_t *board, struct nec7210_priv *priv)
if (adsr_bits & HR_NATN)
break;
}
- if (i == HZ) {
- pr_err("nec7210: error waiting for NATN\n");
+ if (i == HZ)
return -ETIMEDOUT;
- }
}
clear_bit(COMMAND_READY_BN, &priv->state);
@@ -330,7 +330,7 @@ int nec7210_go_to_standby(gpib_board_t *board, struct nec7210_priv *priv)
}
EXPORT_SYMBOL(nec7210_go_to_standby);
-void nec7210_request_system_control(gpib_board_t *board, struct nec7210_priv *priv,
+void nec7210_request_system_control(struct gpib_board *board, struct nec7210_priv *priv,
int request_control)
{
if (request_control == 0) {
@@ -341,7 +341,7 @@ void nec7210_request_system_control(gpib_board_t *board, struct nec7210_priv *pr
}
EXPORT_SYMBOL(nec7210_request_system_control);
-void nec7210_interface_clear(gpib_board_t *board, struct nec7210_priv *priv, int assert)
+void nec7210_interface_clear(struct gpib_board *board, struct nec7210_priv *priv, int assert)
{
if (assert)
write_byte(priv, AUX_SIFC, AUXMR);
@@ -350,7 +350,7 @@ void nec7210_interface_clear(gpib_board_t *board, struct nec7210_priv *priv, int
}
EXPORT_SYMBOL(nec7210_interface_clear);
-void nec7210_remote_enable(gpib_board_t *board, struct nec7210_priv *priv, int enable)
+void nec7210_remote_enable(struct gpib_board *board, struct nec7210_priv *priv, int enable)
{
if (enable)
write_byte(priv, AUX_SREN, AUXMR);
@@ -359,7 +359,7 @@ void nec7210_remote_enable(gpib_board_t *board, struct nec7210_priv *priv, int e
}
EXPORT_SYMBOL(nec7210_remote_enable);
-void nec7210_release_rfd_holdoff(gpib_board_t *board, struct nec7210_priv *priv)
+void nec7210_release_rfd_holdoff(struct gpib_board *board, struct nec7210_priv *priv)
{
unsigned long flags;
@@ -373,8 +373,8 @@ void nec7210_release_rfd_holdoff(gpib_board_t *board, struct nec7210_priv *priv)
}
EXPORT_SYMBOL(nec7210_release_rfd_holdoff);
-unsigned int nec7210_t1_delay(gpib_board_t *board, struct nec7210_priv *priv,
- unsigned int nano_sec)
+int nec7210_t1_delay(struct gpib_board *board, struct nec7210_priv *priv,
+ unsigned int nano_sec)
{
unsigned int retval;
@@ -391,13 +391,13 @@ unsigned int nec7210_t1_delay(gpib_board_t *board, struct nec7210_priv *priv,
}
EXPORT_SYMBOL(nec7210_t1_delay);
-void nec7210_return_to_local(const gpib_board_t *board, struct nec7210_priv *priv)
+void nec7210_return_to_local(const struct gpib_board *board, struct nec7210_priv *priv)
{
write_byte(priv, AUX_RTL, AUXMR);
}
EXPORT_SYMBOL(nec7210_return_to_local);
-static inline short nec7210_atn_has_changed(gpib_board_t *board, struct nec7210_priv *priv)
+static inline short nec7210_atn_has_changed(struct gpib_board *board, struct nec7210_priv *priv)
{
short address_status_bits = read_byte(priv, ADSR);
@@ -415,7 +415,7 @@ static inline short nec7210_atn_has_changed(gpib_board_t *board, struct nec7210_
return -1;
}
-int nec7210_command(gpib_board_t *board, struct nec7210_priv *priv, uint8_t
+int nec7210_command(struct gpib_board *board, struct nec7210_priv *priv, uint8_t
*buffer, size_t length, size_t *bytes_written)
{
int retval = 0;
@@ -430,17 +430,14 @@ int nec7210_command(gpib_board_t *board, struct nec7210_priv *priv, uint8_t
test_bit(COMMAND_READY_BN, &priv->state) ||
test_bit(BUS_ERROR_BN, &priv->state) ||
test_bit(TIMO_NUM, &board->status))) {
- dev_dbg(board->gpib_dev, "gpib command wait interrupted\n");
+ dev_dbg(board->gpib_dev, "command wait interrupted\n");
retval = -ERESTARTSYS;
break;
}
if (test_bit(TIMO_NUM, &board->status))
break;
- if (test_and_clear_bit(BUS_ERROR_BN, &priv->state)) {
- pr_err("nec7210: bus error on command byte\n");
+ if (test_and_clear_bit(BUS_ERROR_BN, &priv->state))
break;
- }
-
spin_lock_irqsave(&board->spinlock, flags);
clear_bit(COMMAND_READY_BN, &priv->state);
write_byte(priv, buffer[*bytes_written], CDOR);
@@ -454,24 +451,20 @@ int nec7210_command(gpib_board_t *board, struct nec7210_priv *priv, uint8_t
// wait for last byte to get sent
if (wait_event_interruptible(board->wait, test_bit(COMMAND_READY_BN, &priv->state) ||
test_bit(BUS_ERROR_BN, &priv->state) ||
- test_bit(TIMO_NUM, &board->status))) {
- dev_dbg(board->gpib_dev, "gpib command wait interrupted\n");
+ test_bit(TIMO_NUM, &board->status)))
retval = -ERESTARTSYS;
- }
- if (test_bit(TIMO_NUM, &board->status)) {
- dev_dbg(board->gpib_dev, "gpib command timed out\n");
+
+ if (test_bit(TIMO_NUM, &board->status))
retval = -ETIMEDOUT;
- }
- if (test_and_clear_bit(BUS_ERROR_BN, &priv->state)) {
- pr_err("nec7210: bus error on command byte\n");
+
+ if (test_and_clear_bit(BUS_ERROR_BN, &priv->state))
retval = -EIO;
- }
return retval;
}
EXPORT_SYMBOL(nec7210_command);
-static int pio_read(gpib_board_t *board, struct nec7210_priv *priv, uint8_t *buffer,
+static int pio_read(struct gpib_board *board, struct nec7210_priv *priv, uint8_t *buffer,
size_t length, int *end, size_t *bytes_read)
{
ssize_t retval = 0;
@@ -484,7 +477,6 @@ static int pio_read(gpib_board_t *board, struct nec7210_priv *priv, uint8_t *buf
test_bit(READ_READY_BN, &priv->state) ||
test_bit(DEV_CLEAR_BN, &priv->state) ||
test_bit(TIMO_NUM, &board->status))) {
- dev_dbg(board->gpib_dev, "nec7210: pio read wait interrupted\n");
retval = -ERESTARTSYS;
break;
}
@@ -503,12 +495,10 @@ static int pio_read(gpib_board_t *board, struct nec7210_priv *priv, uint8_t *buf
break;
}
if (test_bit(TIMO_NUM, &board->status)) {
- dev_dbg(board->gpib_dev, "interrupted by timeout\n");
retval = -ETIMEDOUT;
break;
}
if (test_bit(DEV_CLEAR_BN, &priv->state)) {
- dev_dbg(board->gpib_dev, "interrupted by device clear\n");
retval = -EINTR;
break;
}
@@ -523,7 +513,7 @@ static int pio_read(gpib_board_t *board, struct nec7210_priv *priv, uint8_t *buf
}
#ifdef NEC_DMA
-static ssize_t __dma_read(gpib_board_t *board, struct nec7210_priv *priv, size_t length)
+static ssize_t __dma_read(struct gpib_board *board, struct nec7210_priv *priv, size_t length)
{
ssize_t retval = 0;
size_t count = 0;
@@ -557,10 +547,9 @@ static ssize_t __dma_read(gpib_board_t *board, struct nec7210_priv *priv, size_t
if (wait_event_interruptible(board->wait,
test_bit(DMA_READ_IN_PROGRESS_BN, &priv->state) == 0 ||
test_bit(DEV_CLEAR_BN, &priv->state) ||
- test_bit(TIMO_NUM, &board->status))) {
- dev_dbg(board->gpib_dev, "nec7210: dma read wait interrupted\n");
+ test_bit(TIMO_NUM, &board->status)))
retval = -ERESTARTSYS;
- }
+
if (test_bit(TIMO_NUM, &board->status))
retval = -ETIMEDOUT;
if (test_bit(DEV_CLEAR_BN, &priv->state))
@@ -579,7 +568,7 @@ static ssize_t __dma_read(gpib_board_t *board, struct nec7210_priv *priv, size_t
return retval ? retval : count;
}
-static ssize_t dma_read(gpib_board_t *board, struct nec7210_priv *priv, uint8_t *buffer,
+static ssize_t dma_read(struct gpib_board *board, struct nec7210_priv *priv, uint8_t *buffer,
size_t length)
{
size_t remain = length;
@@ -606,7 +595,7 @@ static ssize_t dma_read(gpib_board_t *board, struct nec7210_priv *priv, uint8_t
}
#endif
-int nec7210_read(gpib_board_t *board, struct nec7210_priv *priv, uint8_t *buffer,
+int nec7210_read(struct gpib_board *board, struct nec7210_priv *priv, uint8_t *buffer,
size_t length, int *end, size_t *bytes_read)
{
ssize_t retval = 0;
@@ -627,7 +616,7 @@ int nec7210_read(gpib_board_t *board, struct nec7210_priv *priv, uint8_t *buffer
}
EXPORT_SYMBOL(nec7210_read);
-static int pio_write_wait(gpib_board_t *board, struct nec7210_priv *priv,
+static int pio_write_wait(struct gpib_board *board, struct nec7210_priv *priv,
short wake_on_lacs, short wake_on_atn, short wake_on_bus_error)
{
// wait until byte is ready to be sent
@@ -638,26 +627,22 @@ static int pio_write_wait(gpib_board_t *board, struct nec7210_priv *priv,
(wake_on_bus_error && test_bit(BUS_ERROR_BN, &priv->state)) ||
(wake_on_lacs && test_bit(LACS_NUM, &board->status)) ||
(wake_on_atn && test_bit(ATN_NUM, &board->status)) ||
- test_bit(TIMO_NUM, &board->status))) {
- dev_dbg(board->gpib_dev, "gpib write interrupted\n");
+ test_bit(TIMO_NUM, &board->status)))
return -ERESTARTSYS;
- }
- if (test_bit(TIMO_NUM, &board->status)) {
- dev_dbg(board->gpib_dev, "nec7210: write timed out\n");
+
+ if (test_bit(TIMO_NUM, &board->status))
return -ETIMEDOUT;
- }
- if (test_bit(DEV_CLEAR_BN, &priv->state)) {
- dev_dbg(board->gpib_dev, "nec7210: write interrupted by clear\n");
+
+ if (test_bit(DEV_CLEAR_BN, &priv->state))
return -EINTR;
- }
- if (wake_on_bus_error && test_and_clear_bit(BUS_ERROR_BN, &priv->state)) {
- dev_dbg(board->gpib_dev, "nec7210: bus error on write\n");
+
+ if (wake_on_bus_error && test_and_clear_bit(BUS_ERROR_BN, &priv->state))
return -EIO;
- }
+
return 0;
}
-static int pio_write(gpib_board_t *board, struct nec7210_priv *priv, uint8_t *buffer,
+static int pio_write(struct gpib_board *board, struct nec7210_priv *priv, uint8_t *buffer,
size_t length, size_t *bytes_written)
{
size_t last_count = 0;
@@ -677,7 +662,6 @@ static int pio_write(gpib_board_t *board, struct nec7210_priv *priv, uint8_t *bu
if (retval == -EIO) {
/* resend last byte on bus error */
*bytes_written = last_count;
- dev_dbg(board->gpib_dev, "resending %c\n", buffer[*bytes_written]);
/* we can get unrecoverable bus errors,
* so give up after a while
*/
@@ -701,7 +685,7 @@ static int pio_write(gpib_board_t *board, struct nec7210_priv *priv, uint8_t *bu
}
#ifdef NEC_DMA
-static ssize_t __dma_write(gpib_board_t *board, struct nec7210_priv *priv, dma_addr_t address,
+static ssize_t __dma_write(struct gpib_board *board, struct nec7210_priv *priv, dma_addr_t address,
size_t length)
{
unsigned long flags, dma_irq_flags;
@@ -733,10 +717,9 @@ static ssize_t __dma_write(gpib_board_t *board, struct nec7210_priv *priv, dma_a
test_bit(DMA_WRITE_IN_PROGRESS_BN, &priv->state) == 0 ||
test_bit(BUS_ERROR_BN, &priv->state) ||
test_bit(DEV_CLEAR_BN, &priv->state) ||
- test_bit(TIMO_NUM, &board->status))) {
- dev_dbg(board->gpib_dev, "gpib write interrupted!\n");
+ test_bit(TIMO_NUM, &board->status)))
retval = -ERESTARTSYS;
- }
+
if (test_bit(TIMO_NUM, &board->status))
retval = -ETIMEDOUT;
if (test_and_clear_bit(DEV_CLEAR_BN, &priv->state))
@@ -759,7 +742,7 @@ static ssize_t __dma_write(gpib_board_t *board, struct nec7210_priv *priv, dma_a
return retval ? retval : length;
}
-static ssize_t dma_write(gpib_board_t *board, struct nec7210_priv *priv, uint8_t *buffer,
+static ssize_t dma_write(struct gpib_board *board, struct nec7210_priv *priv, uint8_t *buffer,
size_t length)
{
size_t remain = length;
@@ -783,8 +766,9 @@ static ssize_t dma_write(gpib_board_t *board, struct nec7210_priv *priv, uint8_t
return length - remain;
}
#endif
-int nec7210_write(gpib_board_t *board, struct nec7210_priv *priv, uint8_t *buffer, size_t length,
- int send_eoi, size_t *bytes_written)
+int nec7210_write(struct gpib_board *board, struct nec7210_priv *priv,
+ uint8_t *buffer, size_t length, int send_eoi,
+ size_t *bytes_written)
{
int retval = 0;
@@ -845,7 +829,7 @@ EXPORT_SYMBOL(nec7210_write);
/*
* interrupt service routine
*/
-irqreturn_t nec7210_interrupt(gpib_board_t *board, struct nec7210_priv *priv)
+irqreturn_t nec7210_interrupt(struct gpib_board *board, struct nec7210_priv *priv)
{
int status1, status2;
@@ -857,7 +841,7 @@ irqreturn_t nec7210_interrupt(gpib_board_t *board, struct nec7210_priv *priv)
}
EXPORT_SYMBOL(nec7210_interrupt);
-irqreturn_t nec7210_interrupt_have_status(gpib_board_t *board,
+irqreturn_t nec7210_interrupt_have_status(struct gpib_board *board,
struct nec7210_priv *priv, int status1, int status2)
{
#ifdef NEC_DMA
@@ -937,13 +921,8 @@ irqreturn_t nec7210_interrupt_have_status(gpib_board_t *board,
set_bit(COMMAND_READY_BN, &priv->state);
// command pass through received
- if (status1 & HR_CPT) {
- unsigned int command;
-
- command = read_byte(priv, CPTR) & gpib_command_mask;
+ if (status1 & HR_CPT)
write_byte(priv, AUX_NVAL, AUXMR);
-// printk("gpib: command pass through 0x%x\n", command);
- }
if (status1 & HR_ERR)
set_bit(BUS_ERROR_BN, &priv->state);
@@ -980,7 +959,7 @@ irqreturn_t nec7210_interrupt_have_status(gpib_board_t *board,
}
EXPORT_SYMBOL(nec7210_interrupt_have_status);
-void nec7210_board_reset(struct nec7210_priv *priv, const gpib_board_t *board)
+void nec7210_board_reset(struct nec7210_priv *priv, const struct gpib_board *board)
{
/* 7210 chip reset */
write_byte(priv, AUX_CR, AUXMR);
@@ -1014,7 +993,7 @@ void nec7210_board_reset(struct nec7210_priv *priv, const gpib_board_t *board)
}
EXPORT_SYMBOL(nec7210_board_reset);
-void nec7210_board_online(struct nec7210_priv *priv, const gpib_board_t *board)
+void nec7210_board_online(struct nec7210_priv *priv, const struct gpib_board *board)
{
/* set GPIB address */
nec7210_primary_address(board, priv, board->pad);
diff --git a/drivers/staging/gpib/ni_usb/ni_usb_gpib.c b/drivers/staging/gpib/ni_usb/ni_usb_gpib.c
index d0656dc520f5..14f7049a8e5e 100644
--- a/drivers/staging/gpib/ni_usb/ni_usb_gpib.c
+++ b/drivers/staging/gpib/ni_usb/ni_usb_gpib.c
@@ -5,6 +5,10 @@
* copyright : (C) 2004 by Frank Mori Hess
***************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define dev_fmt pr_fmt
+#define DRV_NAME KBUILD_MODNAME
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -20,7 +24,7 @@ MODULE_DESCRIPTION("GPIB driver for National Instruments USB devices");
static struct usb_interface *ni_usb_driver_interfaces[MAX_NUM_NI_USB_INTERFACES];
static int ni_usb_parse_status_block(const u8 *buffer, struct ni_usb_status_block *status);
-static int ni_usb_set_interrupt_monitor(gpib_board_t *board, unsigned int monitored_bits);
+static int ni_usb_set_interrupt_monitor(struct gpib_board *board, unsigned int monitored_bits);
static void ni_usb_stop(struct ni_usb_priv *ni_priv);
static DEFINE_MUTEX(ni_usb_hotplug_lock);
@@ -75,7 +79,7 @@ static unsigned short ni_usb_timeout_code(unsigned int usec)
*/
else if (usec <= 1000000000)
return 0x02;
- pr_err("%s: bug? usec is greater than 1e9\n", __func__);
+ pr_err("bug? usec is greater than 1e9\n");
return 0xf0;
}
@@ -83,8 +87,6 @@ static void ni_usb_bulk_complete(struct urb *urb)
{
struct ni_usb_urb_ctx *context = urb->context;
-// printk("debug: %s: status=0x%x, error_count=%i, actual_length=%i\n", __func__,
-// urb->status, urb->error_count, urb->actual_length);
complete(&context->complete);
}
@@ -137,8 +139,8 @@ static int ni_usb_nonblocking_send_bulk_msg(struct ni_usb_priv *ni_priv, void *d
del_timer_sync(&ni_priv->bulk_timer);
usb_free_urb(ni_priv->bulk_urb);
ni_priv->bulk_urb = NULL;
- dev_err(&usb_dev->dev, "%s: failed to submit bulk out urb, retval=%i\n",
- __func__, retval);
+ dev_err(&usb_dev->dev, "failed to submit bulk out urb, retval=%i\n",
+ retval);
mutex_unlock(&ni_priv->bulk_transfer_lock);
return retval;
}
@@ -146,7 +148,7 @@ static int ni_usb_nonblocking_send_bulk_msg(struct ni_usb_priv *ni_priv, void *d
wait_for_completion(&context->complete); // wait for ni_usb_bulk_complete
if (context->timed_out) {
usb_kill_urb(ni_priv->bulk_urb);
- dev_err(&usb_dev->dev, "%s: killed urb due to timeout\n", __func__);
+ dev_err(&usb_dev->dev, "killed urb due to timeout\n");
retval = -ETIMEDOUT;
} else {
retval = ni_priv->bulk_urb->status;
@@ -218,14 +220,12 @@ static int ni_usb_nonblocking_receive_bulk_msg(struct ni_usb_priv *ni_priv,
if (timeout_msecs)
mod_timer(&ni_priv->bulk_timer, jiffies + msecs_to_jiffies(timeout_msecs));
- //printk("%s: submitting urb\n", __func__);
retval = usb_submit_urb(ni_priv->bulk_urb, GFP_KERNEL);
if (retval) {
del_timer_sync(&ni_priv->bulk_timer);
usb_free_urb(ni_priv->bulk_urb);
ni_priv->bulk_urb = NULL;
- dev_err(&usb_dev->dev, "%s: failed to submit bulk out urb, retval=%i\n",
- __func__, retval);
+ dev_err(&usb_dev->dev, "failed to submit bulk in urb, retval=%i\n", retval);
mutex_unlock(&ni_priv->bulk_transfer_lock);
return retval;
}
@@ -250,7 +250,7 @@ static int ni_usb_nonblocking_receive_bulk_msg(struct ni_usb_priv *ni_priv,
}
if (context->timed_out) {
usb_kill_urb(ni_priv->bulk_urb);
- dev_err(&usb_dev->dev, "%s: killed urb due to timeout\n", __func__);
+ dev_err(&usb_dev->dev, "killed urb due to timeout\n");
retval = -ETIMEDOUT;
} else {
if (ni_priv->bulk_urb->status)
@@ -310,7 +310,7 @@ static int ni_usb_receive_control_msg(struct ni_usb_priv *ni_priv, __u8 request,
return retval;
}
-static void ni_usb_soft_update_status(gpib_board_t *board, unsigned int ni_usb_ibsta,
+static void ni_usb_soft_update_status(struct gpib_board *board, unsigned int ni_usb_ibsta,
unsigned int clear_mask)
{
static const unsigned int ni_usb_ibsta_mask = SRQI | ATN | CIC | REM | LACS | TACS | LOK;
@@ -330,14 +330,14 @@ static void ni_usb_soft_update_status(gpib_board_t *board, unsigned int ni_usb_i
ni_priv->monitored_ibsta_bits &= ~ni_usb_ibsta;
need_monitoring_bits &= ~ni_priv->monitored_ibsta_bits; /* mm - monitored set */
spin_unlock_irqrestore(&board->spinlock, flags);
- dev_dbg(&usb_dev->dev, "%s: need_monitoring_bits=0x%x\n", __func__, need_monitoring_bits);
+ dev_dbg(&usb_dev->dev, "need_monitoring_bits=0x%x\n", need_monitoring_bits);
if (need_monitoring_bits & ~ni_usb_ibsta)
ni_usb_set_interrupt_monitor(board, ni_usb_ibsta_monitor_mask);
else if (need_monitoring_bits & ni_usb_ibsta)
wake_up_interruptible(&board->wait);
- dev_dbg(&usb_dev->dev, "%s: ni_usb_ibsta=0x%x\n", __func__, ni_usb_ibsta);
+ dev_dbg(&usb_dev->dev, "ibsta=0x%x\n", ni_usb_ibsta);
}
static int ni_usb_parse_status_block(const u8 *buffer, struct ni_usb_status_block *status)
@@ -371,7 +371,7 @@ static int ni_usb_parse_register_read_block(const u8 *raw_data, unsigned int *re
int k;
if (raw_data[i++] != NIUSB_REGISTER_READ_DATA_START_ID) {
- pr_err("%s: parse error: wrong start id\n", __func__);
+ pr_err("parse error: wrong start id\n");
unexpected = 1;
}
for (k = 0; k < results_per_chunk && j < num_results; ++k)
@@ -380,18 +380,18 @@ static int ni_usb_parse_register_read_block(const u8 *raw_data, unsigned int *re
while (i % 4)
i++;
if (raw_data[i++] != NIUSB_REGISTER_READ_DATA_END_ID) {
- pr_err("%s: parse error: wrong end id\n", __func__);
+ pr_err("parse error: wrong end id\n");
unexpected = 1;
}
if (raw_data[i++] % results_per_chunk != num_results % results_per_chunk) {
- pr_err("%s: parse error: wrong count=%i for NIUSB_REGISTER_READ_DATA_END\n",
- __func__, (int)raw_data[i - 1]);
+ pr_err("parse error: wrong count=%i for NIUSB_REGISTER_READ_DATA_END\n",
+ (int)raw_data[i - 1]);
unexpected = 1;
}
while (i % 4) {
if (raw_data[i++] != 0) {
- pr_err("%s: unexpected data: raw_data[%i]=0x%x, expected 0\n",
- __func__, i - 1, (int)raw_data[i - 1]);
+ pr_err("unexpected data: raw_data[%i]=0x%x, expected 0\n",
+ i - 1, (int)raw_data[i - 1]);
unexpected = 1;
}
}
@@ -408,9 +408,8 @@ static int ni_usb_parse_termination_block(const u8 *buffer)
buffer[i++] != 0x0 ||
buffer[i++] != 0x0 ||
buffer[i++] != 0x0) {
- pr_err("%s: received unexpected termination block\n", __func__);
- pr_err(" expected: 0x%x 0x%x 0x%x 0x%x\n",
- NIUSB_TERM_ID, 0x0, 0x0, 0x0);
+ pr_err("received unexpected termination block\n");
+ pr_err(" expected: 0x%x 0x%x 0x%x 0x%x\n", NIUSB_TERM_ID, 0x0, 0x0, 0x0);
pr_err(" received: 0x%x 0x%x 0x%x 0x%x\n",
buffer[i - 4], buffer[i - 3], buffer[i - 2], buffer[i - 1]);
}
@@ -427,7 +426,6 @@ static int parse_board_ibrd_readback(const u8 *raw_data, struct ni_usb_status_bl
int i = 0;
int j = 0;
int k;
- unsigned int adr1_bits;
int num_data_blocks = 0;
struct ni_usb_status_block register_write_status;
int unexpected = 0;
@@ -438,12 +436,12 @@ static int parse_board_ibrd_readback(const u8 *raw_data, struct ni_usb_status_bl
} else if (raw_data[i] == NIUSB_IBRD_EXTENDED_DATA_ID) {
data_block_length = ibrd_extended_data_block_length;
if (raw_data[++i] != 0) {
- pr_err("%s: unexpected data: raw_data[%i]=0x%x, expected 0\n",
- __func__, i, (int)raw_data[i]);
+ pr_err("unexpected data: raw_data[%i]=0x%x, expected 0\n",
+ i, (int)raw_data[i]);
unexpected = 1;
}
} else {
- pr_err("%s: logic bug!\n", __func__);
+ pr_err("Unexpected NIUSB_IBRD ID\n");
return -EINVAL;
}
++i;
@@ -457,10 +455,10 @@ static int parse_board_ibrd_readback(const u8 *raw_data, struct ni_usb_status_bl
}
i += ni_usb_parse_status_block(&raw_data[i], status);
if (status->id != NIUSB_IBRD_STATUS_ID) {
- pr_err("%s: bug: status->id=%i, != ibrd_status_id\n", __func__, status->id);
+ pr_err("bug: status->id=%i, != ibrd_status_id\n", status->id);
return -EIO;
}
- adr1_bits = raw_data[i++];
+ i++;
if (num_data_blocks) {
*actual_bytes_read = (num_data_blocks - 1) * data_block_length + raw_data[i++];
} else {
@@ -468,29 +466,28 @@ static int parse_board_ibrd_readback(const u8 *raw_data, struct ni_usb_status_bl
*actual_bytes_read = 0;
}
if (*actual_bytes_read > j)
- pr_err("%s: bug: discarded data. actual_bytes_read=%i, j=%i\n",
- __func__, *actual_bytes_read, j);
+ pr_err("bug: discarded data. actual_bytes_read=%i, j=%i\n", *actual_bytes_read, j);
for (k = 0; k < 2; k++)
if (raw_data[i++] != 0) {
- pr_err("%s: unexpected data: raw_data[%i]=0x%x, expected 0\n",
- __func__, i - 1, (int)raw_data[i - 1]);
+ pr_err("unexpected data: raw_data[%i]=0x%x, expected 0\n",
+ i - 1, (int)raw_data[i - 1]);
unexpected = 1;
}
i += ni_usb_parse_status_block(&raw_data[i], &register_write_status);
if (register_write_status.id != NIUSB_REG_WRITE_ID) {
- pr_err("%s: unexpected data: register write status id=0x%x, expected 0x%x\n",
- __func__, register_write_status.id, NIUSB_REG_WRITE_ID);
+ pr_err("unexpected data: register write status id=0x%x, expected 0x%x\n",
+ register_write_status.id, NIUSB_REG_WRITE_ID);
unexpected = 1;
}
if (raw_data[i++] != 2) {
- pr_err("%s: unexpected data: register write count=%i, expected 2\n",
- __func__, (int)raw_data[i - 1]);
+ pr_err("unexpected data: register write count=%i, expected 2\n",
+ (int)raw_data[i - 1]);
unexpected = 1;
}
for (k = 0; k < 3; k++)
if (raw_data[i++] != 0) {
- pr_err("%s: unexpected data: raw_data[%i]=0x%x, expected 0\n",
- __func__, i - 1, (int)raw_data[i - 1]);
+ pr_err("unexpected data: raw_data[%i]=0x%x, expected 0\n",
+ i - 1, (int)raw_data[i - 1]);
unexpected = 1;
}
i += ni_usb_parse_termination_block(&raw_data[i]);
@@ -530,18 +527,14 @@ static int ni_usb_write_registers(struct ni_usb_priv *ni_priv,
out_data_length = num_writes * bytes_per_write + 0x10;
out_data = kmalloc(out_data_length, GFP_KERNEL);
- if (!out_data) {
- dev_err(&usb_dev->dev, "%s: kmalloc failed\n", __func__);
+ if (!out_data)
return -ENOMEM;
- }
i += ni_usb_bulk_register_write_header(&out_data[i], num_writes);
for (j = 0; j < num_writes; j++)
i += ni_usb_bulk_register_write(&out_data[i], writes[j]);
while (i % 4)
out_data[i++] = 0x00;
i += ni_usb_bulk_termination(&out_data[i]);
- if (i > out_data_length)
- dev_err(&usb_dev->dev, "%s: bug! buffer overrun\n", __func__);
mutex_lock(&ni_priv->addressed_transfer_lock);
@@ -549,22 +542,21 @@ static int ni_usb_write_registers(struct ni_usb_priv *ni_priv,
kfree(out_data);
if (retval) {
mutex_unlock(&ni_priv->addressed_transfer_lock);
- dev_err(&usb_dev->dev, "%s: ni_usb_send_bulk_msg returned %i, bytes_written=%i, i=%i\n",
- __func__, retval, bytes_written, i);
+ dev_err(&usb_dev->dev, "send_bulk_msg returned %i, bytes_written=%i, i=%i\n",
+ retval, bytes_written, i);
return retval;
}
in_data = kmalloc(in_data_length, GFP_KERNEL);
if (!in_data) {
mutex_unlock(&ni_priv->addressed_transfer_lock);
- dev_err(&usb_dev->dev, "%s: kmalloc failed\n", __func__);
return -ENOMEM;
}
retval = ni_usb_receive_bulk_msg(ni_priv, in_data, in_data_length, &bytes_read, 1000, 0);
if (retval || bytes_read != 16) {
mutex_unlock(&ni_priv->addressed_transfer_lock);
- dev_err(&usb_dev->dev, "%s: ni_usb_receive_bulk_msg returned %i, bytes_read=%i\n",
- __func__, retval, bytes_read);
+ dev_err(&usb_dev->dev, "receive_bulk_msg returned %i, bytes_read=%i\n",
+ retval, bytes_read);
ni_usb_dump_raw_block(in_data, bytes_read);
kfree(in_data);
return retval;
@@ -576,18 +568,16 @@ static int ni_usb_write_registers(struct ni_usb_priv *ni_priv,
//FIXME parse extra 09 status bits and termination
kfree(in_data);
if (status.id != NIUSB_REG_WRITE_ID) {
- dev_err(&usb_dev->dev, "%s: parse error, id=0x%x != NIUSB_REG_WRITE_ID\n",
- __func__, status.id);
+ dev_err(&usb_dev->dev, "parse error, id=0x%x != NIUSB_REG_WRITE_ID\n", status.id);
return -EIO;
}
if (status.error_code) {
- dev_err(&usb_dev->dev, "%s: nonzero error code 0x%x\n",
- __func__, status.error_code);
+ dev_err(&usb_dev->dev, "nonzero error code 0x%x\n", status.error_code);
return -EIO;
}
if (reg_writes_completed != num_writes) {
- dev_err(&usb_dev->dev, "%s: reg_writes_completed=%i, num_writes=%i\n",
- __func__, reg_writes_completed, num_writes);
+ dev_err(&usb_dev->dev, "reg_writes_completed=%i, num_writes=%i\n",
+ reg_writes_completed, num_writes);
return -EIO;
}
if (ibsta)
@@ -596,12 +586,12 @@ static int ni_usb_write_registers(struct ni_usb_priv *ni_priv,
}
// interface functions
-static int ni_usb_read(gpib_board_t *board, uint8_t *buffer, size_t length,
+static int ni_usb_read(struct gpib_board *board, uint8_t *buffer, size_t length,
int *end, size_t *bytes_read)
{
int retval, parse_retval;
struct ni_usb_priv *ni_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface);
+ struct usb_device *usb_dev;
u8 *out_data, *in_data;
static const int out_data_length = 0x20;
int in_data_length;
@@ -614,10 +604,11 @@ static int ni_usb_read(gpib_board_t *board, uint8_t *buffer, size_t length,
struct ni_usb_register reg;
*bytes_read = 0;
- if (length > max_read_length) {
- length = max_read_length;
- dev_err(&usb_dev->dev, "%s: read length too long\n", __func__);
- }
+ if (!ni_priv->bus_interface)
+ return -ENODEV;
+ if (length > max_read_length)
+ return -EINVAL;
+ usb_dev = interface_to_usbdev(ni_priv->bus_interface);
out_data = kmalloc(out_data_length, GFP_KERNEL);
if (!out_data)
return -ENOMEM;
@@ -649,8 +640,8 @@ static int ni_usb_read(gpib_board_t *board, uint8_t *buffer, size_t length,
if (retval || usb_bytes_written != i) {
if (retval == 0)
retval = -EIO;
- dev_err(&usb_dev->dev, "%s: ni_usb_send_bulk_msg returned %i, usb_bytes_written=%i, i=%i\n",
- __func__, retval, usb_bytes_written, i);
+ dev_err(&usb_dev->dev, "send_bulk_msg returned %i, usb_bytes_written=%i, i=%i\n",
+ retval, usb_bytes_written, i);
mutex_unlock(&ni_priv->addressed_transfer_lock);
return retval;
}
@@ -668,8 +659,8 @@ static int ni_usb_read(gpib_board_t *board, uint8_t *buffer, size_t length,
if (retval == -ERESTARTSYS) {
} else if (retval) {
- dev_err(&usb_dev->dev, "%s: ni_usb_receive_bulk_msg returned %i, usb_bytes_read=%i\n",
- __func__, retval, usb_bytes_read);
+ dev_err(&usb_dev->dev, "receive_bulk_msg returned %i, usb_bytes_read=%i\n",
+ retval, usb_bytes_read);
kfree(in_data);
return retval;
}
@@ -677,14 +668,14 @@ static int ni_usb_read(gpib_board_t *board, uint8_t *buffer, size_t length,
if (parse_retval != usb_bytes_read) {
if (parse_retval >= 0)
parse_retval = -EIO;
- dev_err(&usb_dev->dev, "%s: retval=%i usb_bytes_read=%i\n",
- __func__, parse_retval, usb_bytes_read);
+ dev_err(&usb_dev->dev, "retval=%i usb_bytes_read=%i\n",
+ parse_retval, usb_bytes_read);
kfree(in_data);
return parse_retval;
}
if (actual_length != length - status.count) {
- dev_err(&usb_dev->dev, "%s: actual_length=%i expected=%li\n",
- __func__, actual_length, (long)(length - status.count));
+ dev_err(&usb_dev->dev, "actual_length=%i expected=%li\n",
+ actual_length, (long)(length - status.count));
ni_usb_dump_raw_block(in_data, usb_bytes_read);
}
kfree(in_data);
@@ -699,7 +690,7 @@ static int ni_usb_read(gpib_board_t *board, uint8_t *buffer, size_t length,
break;
case NIUSB_ATN_STATE_ERROR:
retval = -EIO;
- dev_err(&usb_dev->dev, "%s: read when ATN set\n", __func__);
+ dev_err(&usb_dev->dev, "read when ATN set\n");
break;
case NIUSB_ADDRESSING_ERROR:
retval = -EIO;
@@ -708,12 +699,11 @@ static int ni_usb_read(gpib_board_t *board, uint8_t *buffer, size_t length,
retval = -ETIMEDOUT;
break;
case NIUSB_EOSMODE_ERROR:
- dev_err(&usb_dev->dev, "%s: driver bug, we should have been able to avoid NIUSB_EOSMODE_ERROR.\n",
- __func__);
+ dev_err(&usb_dev->dev, "driver bug, we should have been able to avoid NIUSB_EOSMODE_ERROR.\n");
retval = -EINVAL;
break;
default:
- dev_err(&usb_dev->dev, "%s: unknown error code=%i\n", __func__, status.error_code);
+ dev_err(&usb_dev->dev, "unknown error code=%i\n", status.error_code);
retval = -EIO;
break;
}
@@ -726,12 +716,12 @@ static int ni_usb_read(gpib_board_t *board, uint8_t *buffer, size_t length,
return retval;
}
-static int ni_usb_write(gpib_board_t *board, uint8_t *buffer, size_t length,
+static int ni_usb_write(struct gpib_board *board, uint8_t *buffer, size_t length,
int send_eoi, size_t *bytes_written)
{
int retval;
struct ni_usb_priv *ni_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface);
+ struct usb_device *usb_dev;
u8 *out_data, *in_data;
int out_data_length;
static const int in_data_length = 0x10;
@@ -741,12 +731,11 @@ static int ni_usb_write(gpib_board_t *board, uint8_t *buffer, size_t length,
struct ni_usb_status_block status;
static const int max_write_length = 0xffff;
- *bytes_written = 0;
- if (length > max_write_length) {
- length = max_write_length;
- send_eoi = 0;
- dev_err(&usb_dev->dev, "%s: write length too long\n", __func__);
- }
+ if (!ni_priv->bus_interface)
+ return -ENODEV;
+ if (length > max_write_length)
+ return -EINVAL;
+ usb_dev = interface_to_usbdev(ni_priv->bus_interface);
out_data_length = length + 0x10;
out_data = kmalloc(out_data_length, GFP_KERNEL);
if (!out_data)
@@ -777,8 +766,8 @@ static int ni_usb_write(gpib_board_t *board, uint8_t *buffer, size_t length,
kfree(out_data);
if (retval || usb_bytes_written != i) {
mutex_unlock(&ni_priv->addressed_transfer_lock);
- dev_err(&usb_dev->dev, "%s: ni_usb_send_bulk_msg returned %i, usb_bytes_written=%i, i=%i\n",
- __func__, retval, usb_bytes_written, i);
+ dev_err(&usb_dev->dev, "send_bulk_msg returned %i, usb_bytes_written=%i, i=%i\n",
+ retval, usb_bytes_written, i);
return retval;
}
@@ -793,8 +782,8 @@ static int ni_usb_write(gpib_board_t *board, uint8_t *buffer, size_t length,
mutex_unlock(&ni_priv->addressed_transfer_lock);
if ((retval && retval != -ERESTARTSYS) || usb_bytes_read != 12) {
- dev_err(&usb_dev->dev, "%s: ni_usb_receive_bulk_msg returned %i, usb_bytes_read=%i\n",
- __func__, retval, usb_bytes_read);
+ dev_err(&usb_dev->dev, "receive_bulk_msg returned %i, usb_bytes_read=%i\n",
+ retval, usb_bytes_read);
kfree(in_data);
return retval;
}
@@ -810,8 +799,8 @@ static int ni_usb_write(gpib_board_t *board, uint8_t *buffer, size_t length,
*/
break;
case NIUSB_ADDRESSING_ERROR:
- dev_err(&usb_dev->dev, "%s: Addressing error retval %d error code=%i\n",
- __func__, retval, status.error_code);
+ dev_err(&usb_dev->dev, "Addressing error retval %d error code=%i\n",
+ retval, status.error_code);
retval = -ENXIO;
break;
case NIUSB_NO_LISTENER_ERROR:
@@ -821,8 +810,7 @@ static int ni_usb_write(gpib_board_t *board, uint8_t *buffer, size_t length,
retval = -ETIMEDOUT;
break;
default:
- dev_err(&usb_dev->dev, "%s: unknown error code=%i\n",
- __func__, status.error_code);
+ dev_err(&usb_dev->dev, "unknown error code=%i\n", status.error_code);
retval = -EPIPE;
break;
}
@@ -831,12 +819,12 @@ static int ni_usb_write(gpib_board_t *board, uint8_t *buffer, size_t length,
return retval;
}
-static int ni_usb_command_chunk(gpib_board_t *board, uint8_t *buffer, size_t length,
+static int ni_usb_command_chunk(struct gpib_board *board, uint8_t *buffer, size_t length,
size_t *command_bytes_written)
{
int retval;
struct ni_usb_priv *ni_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface);
+ struct usb_device *usb_dev;
u8 *out_data, *in_data;
int out_data_length;
static const int in_data_length = 0x10;
@@ -848,8 +836,11 @@ static int ni_usb_command_chunk(gpib_board_t *board, uint8_t *buffer, size_t len
static const int max_command_length = 0x10;
*command_bytes_written = 0;
+ if (!ni_priv->bus_interface)
+ return -ENODEV;
if (length > max_command_length)
length = max_command_length;
+ usb_dev = interface_to_usbdev(ni_priv->bus_interface);
out_data_length = length + 0x10;
out_data = kmalloc(out_data_length, GFP_KERNEL);
if (!out_data)
@@ -873,8 +864,8 @@ static int ni_usb_command_chunk(gpib_board_t *board, uint8_t *buffer, size_t len
kfree(out_data);
if (retval || bytes_written != i) {
mutex_unlock(&ni_priv->addressed_transfer_lock);
- dev_err(&usb_dev->dev, "%s: ni_usb_send_bulk_msg returned %i, bytes_written=%i, i=%i\n",
- __func__, retval, bytes_written, i);
+ dev_err(&usb_dev->dev, "send_bulk_msg returned %i, bytes_written=%i, i=%i\n",
+ retval, bytes_written, i);
return retval;
}
@@ -890,8 +881,8 @@ static int ni_usb_command_chunk(gpib_board_t *board, uint8_t *buffer, size_t len
mutex_unlock(&ni_priv->addressed_transfer_lock);
if ((retval && retval != -ERESTARTSYS) || bytes_read != 12) {
- dev_err(&usb_dev->dev, "%s: ni_usb_receive_bulk_msg returned %i, bytes_read=%i\n",
- __func__, retval, bytes_read);
+ dev_err(&usb_dev->dev, "receive_bulk_msg returned %i, bytes_read=%i\n",
+ retval, bytes_read);
kfree(in_data);
return retval;
}
@@ -909,19 +900,19 @@ static int ni_usb_command_chunk(gpib_board_t *board, uint8_t *buffer, size_t len
case NIUSB_NO_BUS_ERROR:
return -ENOTCONN;
case NIUSB_EOSMODE_ERROR:
- dev_err(&usb_dev->dev, "%s: got eosmode error. Driver bug?\n", __func__);
+ dev_err(&usb_dev->dev, "got eosmode error. Driver bug?\n");
return -EIO;
case NIUSB_TIMEOUT_ERROR:
return -ETIMEDOUT;
default:
- dev_err(&usb_dev->dev, "%s: unknown error code=%i\n", __func__, status.error_code);
+ dev_err(&usb_dev->dev, "unknown error code=%i\n", status.error_code);
return -EIO;
}
ni_usb_soft_update_status(board, status.ibsta, 0);
return 0;
}
-static int ni_usb_command(gpib_board_t *board, uint8_t *buffer, size_t length,
+static int ni_usb_command(struct gpib_board *board, uint8_t *buffer, size_t length,
size_t *bytes_written)
{
size_t count;
@@ -938,11 +929,11 @@ static int ni_usb_command(gpib_board_t *board, uint8_t *buffer, size_t length,
return 0;
}
-static int ni_usb_take_control(gpib_board_t *board, int synchronous)
+static int ni_usb_take_control(struct gpib_board *board, int synchronous)
{
int retval;
struct ni_usb_priv *ni_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface);
+ struct usb_device *usb_dev;
u8 *out_data, *in_data;
static const int out_data_length = 0x10;
static const int in_data_length = 0x10;
@@ -950,6 +941,9 @@ static int ni_usb_take_control(gpib_board_t *board, int synchronous)
int i = 0;
struct ni_usb_status_block status;
+ if (!ni_priv->bus_interface)
+ return -ENODEV;
+ usb_dev = interface_to_usbdev(ni_priv->bus_interface);
out_data = kmalloc(out_data_length, GFP_KERNEL);
if (!out_data)
return -ENOMEM;
@@ -968,15 +962,14 @@ static int ni_usb_take_control(gpib_board_t *board, int synchronous)
kfree(out_data);
if (retval || bytes_written != i) {
mutex_unlock(&ni_priv->addressed_transfer_lock);
- dev_err(&usb_dev->dev, "%s: ni_usb_send_bulk_msg returned %i, bytes_written=%i, i=%i\n",
- __func__, retval, bytes_written, i);
+ dev_err(&usb_dev->dev, "send_bulk_msg returned %i, bytes_written=%i, i=%i\n",
+ retval, bytes_written, i);
return retval;
}
in_data = kmalloc(in_data_length, GFP_KERNEL);
if (!in_data) {
mutex_unlock(&ni_priv->addressed_transfer_lock);
- dev_err(&usb_dev->dev, "%s: kmalloc failed\n", __func__);
return -ENOMEM;
}
retval = ni_usb_receive_bulk_msg(ni_priv, in_data, in_data_length, &bytes_read, 1000, 1);
@@ -986,8 +979,8 @@ static int ni_usb_take_control(gpib_board_t *board, int synchronous)
if ((retval && retval != -ERESTARTSYS) || bytes_read != 12) {
if (retval == 0)
retval = -EIO;
- dev_err(&usb_dev->dev, "%s: ni_usb_receive_bulk_msg returned %i, bytes_read=%i\n",
- __func__, retval, bytes_read);
+ dev_err(&usb_dev->dev, "receive_bulk_msg returned %i, bytes_read=%i\n",
+ retval, bytes_read);
kfree(in_data);
return retval;
}
@@ -997,11 +990,11 @@ static int ni_usb_take_control(gpib_board_t *board, int synchronous)
return retval;
}
-static int ni_usb_go_to_standby(gpib_board_t *board)
+static int ni_usb_go_to_standby(struct gpib_board *board)
{
int retval;
struct ni_usb_priv *ni_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface);
+ struct usb_device *usb_dev;
u8 *out_data, *in_data;
static const int out_data_length = 0x10;
static const int in_data_length = 0x20;
@@ -1009,6 +1002,9 @@ static int ni_usb_go_to_standby(gpib_board_t *board)
int i = 0;
struct ni_usb_status_block status;
+ if (!ni_priv->bus_interface)
+ return -ENODEV;
+ usb_dev = interface_to_usbdev(ni_priv->bus_interface);
out_data = kmalloc(out_data_length, GFP_KERNEL);
if (!out_data)
return -ENOMEM;
@@ -1025,15 +1021,14 @@ static int ni_usb_go_to_standby(gpib_board_t *board)
kfree(out_data);
if (retval || bytes_written != i) {
mutex_unlock(&ni_priv->addressed_transfer_lock);
- dev_err(&usb_dev->dev, "%s: ni_usb_send_bulk_msg returned %i, bytes_written=%i, i=%i\n",
- __func__, retval, bytes_written, i);
+ dev_err(&usb_dev->dev, "send_bulk_msg returned %i, bytes_written=%i, i=%i\n",
+ retval, bytes_written, i);
return retval;
}
in_data = kmalloc(in_data_length, GFP_KERNEL);
if (!in_data) {
mutex_unlock(&ni_priv->addressed_transfer_lock);
- dev_err(&usb_dev->dev, "%s: kmalloc failed\n", __func__);
return -ENOMEM;
}
retval = ni_usb_receive_bulk_msg(ni_priv, in_data, in_data_length, &bytes_read, 1000, 0);
@@ -1041,29 +1036,31 @@ static int ni_usb_go_to_standby(gpib_board_t *board)
mutex_unlock(&ni_priv->addressed_transfer_lock);
if (retval || bytes_read != 12) {
- dev_err(&usb_dev->dev, "%s: ni_usb_receive_bulk_msg returned %i, bytes_read=%i\n",
- __func__, retval, bytes_read);
+ dev_err(&usb_dev->dev, "receive_bulk_msg returned %i, bytes_read=%i\n",
+ retval, bytes_read);
kfree(in_data);
return retval;
}
ni_usb_parse_status_block(in_data, &status);
kfree(in_data);
if (status.id != NIUSB_IBGTS_ID)
- dev_err(&usb_dev->dev, "%s: bug: status.id 0x%x != INUSB_IBGTS_ID\n",
- __func__, status.id);
+ dev_err(&usb_dev->dev, "bug: status.id 0x%x != INUSB_IBGTS_ID\n", status.id);
ni_usb_soft_update_status(board, status.ibsta, 0);
return 0;
}
-static void ni_usb_request_system_control(gpib_board_t *board, int request_control)
+static void ni_usb_request_system_control(struct gpib_board *board, int request_control)
{
int retval;
struct ni_usb_priv *ni_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface);
+ struct usb_device *usb_dev;
int i = 0;
struct ni_usb_register writes[4];
unsigned int ibsta;
+ if (!ni_priv->bus_interface)
+ return; // -ENODEV;
+ usb_dev = interface_to_usbdev(ni_priv->bus_interface);
if (request_control) {
writes[i].device = NIUSB_SUBDEV_TNT4882;
writes[i].address = CMDR;
@@ -1093,7 +1090,7 @@ static void ni_usb_request_system_control(gpib_board_t *board, int request_contr
}
retval = ni_usb_write_registers(ni_priv, writes, i, &ibsta);
if (retval < 0) {
- dev_err(&usb_dev->dev, "%s: register write failed, retval=%i\n", __func__, retval);
+ dev_err(&usb_dev->dev, "register write failed, retval=%i\n", retval);
return; // retval;
}
if (!request_control)
@@ -1103,11 +1100,11 @@ static void ni_usb_request_system_control(gpib_board_t *board, int request_contr
}
//FIXME maybe the interface should have a "pulse interface clear" function that can return an error?
-static void ni_usb_interface_clear(gpib_board_t *board, int assert)
+static void ni_usb_interface_clear(struct gpib_board *board, int assert)
{
int retval;
struct ni_usb_priv *ni_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface);
+ struct usb_device *usb_dev;
u8 *out_data, *in_data;
static const int out_data_length = 0x10;
static const int in_data_length = 0x10;
@@ -1115,14 +1112,15 @@ static void ni_usb_interface_clear(gpib_board_t *board, int assert)
int i = 0;
struct ni_usb_status_block status;
- // FIXME: we are going to pulse when assert is true, and ignore otherwise
+ if (!ni_priv->bus_interface)
+ return; // -ENODEV;
+ usb_dev = interface_to_usbdev(ni_priv->bus_interface);
+// FIXME: we are going to pulse when assert is true, and ignore otherwise
if (assert == 0)
return;
out_data = kmalloc(out_data_length, GFP_KERNEL);
- if (!out_data) {
- dev_err(&usb_dev->dev, "%s: kmalloc failed\n", __func__);
+ if (!out_data)
return;
- }
out_data[i++] = NIUSB_IBSIC_ID;
out_data[i++] = 0x0;
out_data[i++] = 0x0;
@@ -1131,8 +1129,8 @@ static void ni_usb_interface_clear(gpib_board_t *board, int assert)
retval = ni_usb_send_bulk_msg(ni_priv, out_data, i, &bytes_written, 1000);
kfree(out_data);
if (retval || bytes_written != i) {
- dev_err(&usb_dev->dev, "%s: ni_usb_send_bulk_msg returned %i, bytes_written=%i, i=%i\n",
- __func__, retval, bytes_written, i);
+ dev_err(&usb_dev->dev, "send_bulk_msg returned %i, bytes_written=%i, i=%i\n",
+ retval, bytes_written, i);
return;
}
in_data = kmalloc(in_data_length, GFP_KERNEL);
@@ -1141,8 +1139,8 @@ static void ni_usb_interface_clear(gpib_board_t *board, int assert)
retval = ni_usb_receive_bulk_msg(ni_priv, in_data, in_data_length, &bytes_read, 1000, 0);
if (retval || bytes_read != 12) {
- dev_err(&usb_dev->dev, "%s: ni_usb_receive_bulk_msg returned %i, bytes_read=%i\n",
- __func__, retval, bytes_read);
+ dev_err(&usb_dev->dev, "receive_bulk_msg returned %i, bytes_read=%i\n",
+ retval, bytes_read);
kfree(in_data);
return;
}
@@ -1151,14 +1149,17 @@ static void ni_usb_interface_clear(gpib_board_t *board, int assert)
ni_usb_soft_update_status(board, status.ibsta, 0);
}
-static void ni_usb_remote_enable(gpib_board_t *board, int enable)
+static void ni_usb_remote_enable(struct gpib_board *board, int enable)
{
int retval;
struct ni_usb_priv *ni_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface);
+ struct usb_device *usb_dev;
struct ni_usb_register reg;
unsigned int ibsta;
+ if (!ni_priv->bus_interface)
+ return; // -ENODEV;
+ usb_dev = interface_to_usbdev(ni_priv->bus_interface);
reg.device = NIUSB_SUBDEV_TNT4882;
reg.address = nec7210_to_tnt4882_offset(AUXMR);
if (enable)
@@ -1167,7 +1168,7 @@ static void ni_usb_remote_enable(gpib_board_t *board, int enable)
reg.value = AUX_CREN;
retval = ni_usb_write_registers(ni_priv, &reg, 1, &ibsta);
if (retval < 0) {
- dev_err(&usb_dev->dev, "%s: register write failed, retval=%i\n", __func__, retval);
+ dev_err(&usb_dev->dev, "register write failed, retval=%i\n", retval);
return; //retval;
}
ni_priv->ren_state = enable;
@@ -1175,7 +1176,7 @@ static void ni_usb_remote_enable(gpib_board_t *board, int enable)
return;// 0;
}
-static int ni_usb_enable_eos(gpib_board_t *board, uint8_t eos_byte, int compare_8_bits)
+static int ni_usb_enable_eos(struct gpib_board *board, uint8_t eos_byte, int compare_8_bits)
{
struct ni_usb_priv *ni_priv = board->private_data;
@@ -1188,7 +1189,7 @@ static int ni_usb_enable_eos(gpib_board_t *board, uint8_t eos_byte, int compare_
return 0;
}
-static void ni_usb_disable_eos(gpib_board_t *board)
+static void ni_usb_disable_eos(struct gpib_board *board)
{
struct ni_usb_priv *ni_priv = board->private_data;
/* adapter gets unhappy if you don't zero all the bits
@@ -1198,16 +1199,18 @@ static void ni_usb_disable_eos(gpib_board_t *board)
ni_priv->eos_char = 0;
}
-static unsigned int ni_usb_update_status(gpib_board_t *board, unsigned int clear_mask)
+static unsigned int ni_usb_update_status(struct gpib_board *board, unsigned int clear_mask)
{
int retval;
struct ni_usb_priv *ni_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface);
+ struct usb_device *usb_dev;
static const int buffer_length = 8;
u8 *buffer;
struct ni_usb_status_block status;
- //printk("%s: receive control pipe is %i\n", __func__, pipe);
+ if (!ni_priv->bus_interface)
+ return -ENODEV;
+ usb_dev = interface_to_usbdev(ni_priv->bus_interface);
buffer = kmalloc(buffer_length, GFP_KERNEL);
if (!buffer)
return board->status;
@@ -1216,7 +1219,7 @@ static unsigned int ni_usb_update_status(gpib_board_t *board, unsigned int clear
USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x200, 0x0, buffer, buffer_length, 1000);
if (retval != buffer_length) {
- dev_err(&usb_dev->dev, "%s: usb_control_msg returned %i\n", __func__, retval);
+ dev_err(&usb_dev->dev, "usb_control_msg returned %i\n", retval);
kfree(buffer);
return board->status;
}
@@ -1235,7 +1238,6 @@ static void ni_usb_stop(struct ni_usb_priv *ni_priv)
u8 *buffer;
struct ni_usb_status_block status;
- //printk("%s: receive control pipe is %i\n", __func__, pipe);
buffer = kmalloc(buffer_length, GFP_KERNEL);
if (!buffer)
return;
@@ -1244,7 +1246,7 @@ static void ni_usb_stop(struct ni_usb_priv *ni_priv)
USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x0, 0x0, buffer, buffer_length, 1000);
if (retval != buffer_length) {
- dev_err(&usb_dev->dev, "%s: usb_control_msg returned %i\n", __func__, retval);
+ dev_err(&usb_dev->dev, "usb_control_msg returned %i\n", retval);
kfree(buffer);
return;
}
@@ -1252,15 +1254,18 @@ static void ni_usb_stop(struct ni_usb_priv *ni_priv)
kfree(buffer);
}
-static int ni_usb_primary_address(gpib_board_t *board, unsigned int address)
+static int ni_usb_primary_address(struct gpib_board *board, unsigned int address)
{
int retval;
struct ni_usb_priv *ni_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface);
+ struct usb_device *usb_dev;
int i = 0;
struct ni_usb_register writes[2];
unsigned int ibsta;
+ if (!ni_priv->bus_interface)
+ return -ENODEV;
+ usb_dev = interface_to_usbdev(ni_priv->bus_interface);
writes[i].device = NIUSB_SUBDEV_TNT4882;
writes[i].address = nec7210_to_tnt4882_offset(ADR);
writes[i].value = address;
@@ -1271,7 +1276,7 @@ static int ni_usb_primary_address(gpib_board_t *board, unsigned int address)
i++;
retval = ni_usb_write_registers(ni_priv, writes, i, &ibsta);
if (retval < 0) {
- dev_err(&usb_dev->dev, "%s: register write failed, retval=%i\n", __func__, retval);
+ dev_err(&usb_dev->dev, "register write failed, retval=%i\n", retval);
return retval;
}
ni_usb_soft_update_status(board, ibsta, 0);
@@ -1307,30 +1312,33 @@ static int ni_usb_write_sad(struct ni_usb_register *writes, int address, int ena
return i;
}
-static int ni_usb_secondary_address(gpib_board_t *board, unsigned int address, int enable)
+static int ni_usb_secondary_address(struct gpib_board *board, unsigned int address, int enable)
{
int retval;
struct ni_usb_priv *ni_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface);
+ struct usb_device *usb_dev;
int i = 0;
struct ni_usb_register writes[3];
unsigned int ibsta;
+ if (!ni_priv->bus_interface)
+ return -ENODEV;
+ usb_dev = interface_to_usbdev(ni_priv->bus_interface);
i += ni_usb_write_sad(writes, address, enable);
retval = ni_usb_write_registers(ni_priv, writes, i, &ibsta);
if (retval < 0) {
- dev_err(&usb_dev->dev, "%s: register write failed, retval=%i\n", __func__, retval);
+ dev_err(&usb_dev->dev, "register write failed, retval=%i\n", retval);
return retval;
}
ni_usb_soft_update_status(board, ibsta, 0);
return 0;
}
-static int ni_usb_parallel_poll(gpib_board_t *board, uint8_t *result)
+static int ni_usb_parallel_poll(struct gpib_board *board, uint8_t *result)
{
int retval;
struct ni_usb_priv *ni_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface);
+ struct usb_device *usb_dev;
u8 *out_data, *in_data;
static const int out_data_length = 0x10;
static const int in_data_length = 0x20;
@@ -1339,6 +1347,9 @@ static int ni_usb_parallel_poll(gpib_board_t *board, uint8_t *result)
int j = 0;
struct ni_usb_status_block status;
+ if (!ni_priv->bus_interface)
+ return -ENODEV;
+ usb_dev = interface_to_usbdev(ni_priv->bus_interface);
out_data = kmalloc(out_data_length, GFP_KERNEL);
if (!out_data)
return -ENOMEM;
@@ -1353,8 +1364,8 @@ static int ni_usb_parallel_poll(gpib_board_t *board, uint8_t *result)
kfree(out_data);
if (retval || bytes_written != i) {
- dev_err(&usb_dev->dev, "%s: ni_usb_send_bulk_msg returned %i, bytes_written=%i, i=%i\n",
- __func__, retval, bytes_written, i);
+ dev_err(&usb_dev->dev, "send_bulk_msg returned %i, bytes_written=%i, i=%i\n",
+ retval, bytes_written, i);
return retval;
}
in_data = kmalloc(in_data_length, GFP_KERNEL);
@@ -1366,8 +1377,8 @@ static int ni_usb_parallel_poll(gpib_board_t *board, uint8_t *result)
&bytes_read, 1000, 1);
if (retval && retval != -ERESTARTSYS) {
- dev_err(&usb_dev->dev, "%s: ni_usb_receive_bulk_msg returned %i, bytes_read=%i\n",
- __func__, retval, bytes_read);
+ dev_err(&usb_dev->dev, "receive_bulk_msg returned %i, bytes_read=%i\n",
+ retval, bytes_read);
kfree(in_data);
return retval;
}
@@ -1378,37 +1389,43 @@ static int ni_usb_parallel_poll(gpib_board_t *board, uint8_t *result)
return retval;
}
-static void ni_usb_parallel_poll_configure(gpib_board_t *board, uint8_t config)
+static void ni_usb_parallel_poll_configure(struct gpib_board *board, uint8_t config)
{
int retval;
struct ni_usb_priv *ni_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface);
+ struct usb_device *usb_dev;
int i = 0;
struct ni_usb_register writes[1];
unsigned int ibsta;
+ if (!ni_priv->bus_interface)
+ return; // -ENODEV;
+ usb_dev = interface_to_usbdev(ni_priv->bus_interface);
writes[i].device = NIUSB_SUBDEV_TNT4882;
writes[i].address = nec7210_to_tnt4882_offset(AUXMR);
writes[i].value = PPR | config;
i++;
retval = ni_usb_write_registers(ni_priv, writes, i, &ibsta);
if (retval < 0) {
- dev_err(&usb_dev->dev, "%s: register write failed, retval=%i\n", __func__, retval);
+ dev_err(&usb_dev->dev, "register write failed, retval=%i\n", retval);
return;// retval;
}
ni_usb_soft_update_status(board, ibsta, 0);
return;// 0;
}
-static void ni_usb_parallel_poll_response(gpib_board_t *board, int ist)
+static void ni_usb_parallel_poll_response(struct gpib_board *board, int ist)
{
int retval;
struct ni_usb_priv *ni_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface);
+ struct usb_device *usb_dev;
int i = 0;
struct ni_usb_register writes[1];
unsigned int ibsta;
+ if (!ni_priv->bus_interface)
+ return; // -ENODEV;
+ usb_dev = interface_to_usbdev(ni_priv->bus_interface);
writes[i].device = NIUSB_SUBDEV_TNT4882;
writes[i].address = nec7210_to_tnt4882_offset(AUXMR);
if (ist)
@@ -1418,76 +1435,85 @@ static void ni_usb_parallel_poll_response(gpib_board_t *board, int ist)
i++;
retval = ni_usb_write_registers(ni_priv, writes, i, &ibsta);
if (retval < 0) {
- dev_err(&usb_dev->dev, "%s: register write failed, retval=%i\n", __func__, retval);
+ dev_err(&usb_dev->dev, "register write failed, retval=%i\n", retval);
return;// retval;
}
ni_usb_soft_update_status(board, ibsta, 0);
return;// 0;
}
-static void ni_usb_serial_poll_response(gpib_board_t *board, u8 status)
+static void ni_usb_serial_poll_response(struct gpib_board *board, u8 status)
{
int retval;
struct ni_usb_priv *ni_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface);
+ struct usb_device *usb_dev;
int i = 0;
struct ni_usb_register writes[1];
unsigned int ibsta;
+ if (!ni_priv->bus_interface)
+ return; // -ENODEV;
+ usb_dev = interface_to_usbdev(ni_priv->bus_interface);
writes[i].device = NIUSB_SUBDEV_TNT4882;
writes[i].address = nec7210_to_tnt4882_offset(SPMR);
writes[i].value = status;
i++;
retval = ni_usb_write_registers(ni_priv, writes, i, &ibsta);
if (retval < 0) {
- dev_err(&usb_dev->dev, "%s: register write failed, retval=%i\n", __func__, retval);
+ dev_err(&usb_dev->dev, "register write failed, retval=%i\n", retval);
return;// retval;
}
ni_usb_soft_update_status(board, ibsta, 0);
return;// 0;
}
-static uint8_t ni_usb_serial_poll_status(gpib_board_t *board)
+static uint8_t ni_usb_serial_poll_status(struct gpib_board *board)
{
return 0;
}
-static void ni_usb_return_to_local(gpib_board_t *board)
+static void ni_usb_return_to_local(struct gpib_board *board)
{
int retval;
struct ni_usb_priv *ni_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface);
+ struct usb_device *usb_dev;
int i = 0;
struct ni_usb_register writes[1];
unsigned int ibsta;
+ if (!ni_priv->bus_interface)
+ return; // -ENODEV;
+ usb_dev = interface_to_usbdev(ni_priv->bus_interface);
writes[i].device = NIUSB_SUBDEV_TNT4882;
writes[i].address = nec7210_to_tnt4882_offset(AUXMR);
writes[i].value = AUX_RTL;
i++;
retval = ni_usb_write_registers(ni_priv, writes, i, &ibsta);
if (retval < 0) {
- dev_err(&usb_dev->dev, "%s: register write failed, retval=%i\n", __func__, retval);
+ dev_err(&usb_dev->dev, "register write failed, retval=%i\n", retval);
return;// retval;
}
ni_usb_soft_update_status(board, ibsta, 0);
return;// 0;
}
-static int ni_usb_line_status(const gpib_board_t *board)
+static int ni_usb_line_status(const struct gpib_board *board)
{
int retval;
struct ni_usb_priv *ni_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface);
+ struct usb_device *usb_dev;
u8 *out_data, *in_data;
static const int out_data_length = 0x20;
static const int in_data_length = 0x20;
int bytes_written = 0, bytes_read = 0;
int i = 0;
unsigned int bsr_bits;
- int line_status = ValidALL;
+ int line_status = VALID_ALL;
// NI windows driver reads 0xd(HSSEL), 0xc (ARD0), 0x1f (BSR)
+ if (!ni_priv->bus_interface)
+ return -ENODEV;
+ usb_dev = interface_to_usbdev(ni_priv->bus_interface);
out_data = kmalloc(out_data_length, GFP_KERNEL);
if (!out_data)
return -ENOMEM;
@@ -1509,15 +1535,14 @@ static int ni_usb_line_status(const gpib_board_t *board)
if (retval || bytes_written != i) {
mutex_unlock(&ni_priv->addressed_transfer_lock);
if (retval != -EAGAIN)
- dev_err(&usb_dev->dev, "%s: ni_usb_send_bulk_msg returned %i, bytes_written=%i, i=%i\n",
- __func__, retval, bytes_written, i);
+ dev_err(&usb_dev->dev, "send_bulk_msg returned %i, bytes_written=%i, i=%i\n",
+ retval, bytes_written, i);
return retval;
}
in_data = kmalloc(in_data_length, GFP_KERNEL);
if (!in_data) {
mutex_unlock(&ni_priv->addressed_transfer_lock);
- dev_err(&usb_dev->dev, "%s: kmalloc failed\n", __func__);
return -ENOMEM;
}
retval = ni_usb_nonblocking_receive_bulk_msg(ni_priv, in_data, in_data_length,
@@ -1527,8 +1552,8 @@ static int ni_usb_line_status(const gpib_board_t *board)
if (retval) {
if (retval != -EAGAIN)
- dev_err(&usb_dev->dev, "%s: ni_usb_receive_bulk_msg returned %i, bytes_read=%i\n",
- __func__, retval, bytes_read);
+ dev_err(&usb_dev->dev, "receive_bulk_msg returned %i, bytes_read=%i\n",
+ retval, bytes_read);
kfree(in_data);
return retval;
}
@@ -1536,21 +1561,21 @@ static int ni_usb_line_status(const gpib_board_t *board)
ni_usb_parse_register_read_block(in_data, &bsr_bits, 1);
kfree(in_data);
if (bsr_bits & BCSR_REN_BIT)
- line_status |= BusREN;
+ line_status |= BUS_REN;
if (bsr_bits & BCSR_IFC_BIT)
- line_status |= BusIFC;
+ line_status |= BUS_IFC;
if (bsr_bits & BCSR_SRQ_BIT)
- line_status |= BusSRQ;
+ line_status |= BUS_SRQ;
if (bsr_bits & BCSR_EOI_BIT)
- line_status |= BusEOI;
+ line_status |= BUS_EOI;
if (bsr_bits & BCSR_NRFD_BIT)
- line_status |= BusNRFD;
+ line_status |= BUS_NRFD;
if (bsr_bits & BCSR_NDAC_BIT)
- line_status |= BusNDAC;
+ line_status |= BUS_NDAC;
if (bsr_bits & BCSR_DAV_BIT)
- line_status |= BusDAV;
+ line_status |= BUS_DAV;
if (bsr_bits & BCSR_ATN_BIT)
- line_status |= BusATN;
+ line_status |= BUS_ATN;
return line_status;
}
@@ -1591,28 +1616,31 @@ static int ni_usb_setup_t1_delay(struct ni_usb_register *reg, unsigned int nano_
return i;
}
-static unsigned int ni_usb_t1_delay(gpib_board_t *board, unsigned int nano_sec)
+static int ni_usb_t1_delay(struct gpib_board *board, unsigned int nano_sec)
{
int retval;
struct ni_usb_priv *ni_priv = board->private_data;
- struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface);
+ struct usb_device *usb_dev;
struct ni_usb_register writes[3];
unsigned int ibsta;
unsigned int actual_ns;
int i;
+ if (!ni_priv->bus_interface)
+ return -ENODEV;
+ usb_dev = interface_to_usbdev(ni_priv->bus_interface);
i = ni_usb_setup_t1_delay(writes, nano_sec, &actual_ns);
retval = ni_usb_write_registers(ni_priv, writes, i, &ibsta);
if (retval < 0) {
- dev_err(&usb_dev->dev, "%s: register write failed, retval=%i\n", __func__, retval);
- return -1; //FIXME should change return type to int for error reporting
+ dev_err(&usb_dev->dev, "register write failed, retval=%i\n", retval);
+ return retval;
}
board->t1_nano_sec = actual_ns;
ni_usb_soft_update_status(board, ibsta, 0);
return actual_ns;
}
-static int ni_usb_allocate_private(gpib_board_t *board)
+static int ni_usb_allocate_private(struct gpib_board *board)
{
struct ni_usb_priv *ni_priv;
@@ -1635,7 +1663,7 @@ static void ni_usb_free_private(struct ni_usb_priv *ni_priv)
}
#define NUM_INIT_WRITES 26
-static int ni_usb_setup_init(gpib_board_t *board, struct ni_usb_register *writes)
+static int ni_usb_setup_init(struct gpib_board *board, struct ni_usb_register *writes)
{
struct ni_usb_priv *ni_priv = board->private_data;
struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface);
@@ -1736,13 +1764,13 @@ static int ni_usb_setup_init(gpib_board_t *board, struct ni_usb_register *writes
writes[i].value = AUX_CPPF;
i++;
if (i > NUM_INIT_WRITES) {
- dev_err(&usb_dev->dev, "%s: bug!, buffer overrun, i=%i\n", __func__, i);
+ dev_err(&usb_dev->dev, "bug!, buffer overrun, i=%i\n", i);
return 0;
}
return i;
}
-static int ni_usb_init(gpib_board_t *board)
+static int ni_usb_init(struct gpib_board *board)
{
int retval;
struct ni_usb_priv *ni_priv = board->private_data;
@@ -1762,7 +1790,7 @@ static int ni_usb_init(gpib_board_t *board)
return -EFAULT;
kfree(writes);
if (retval) {
- dev_err(&usb_dev->dev, "%s: register write failed, retval=%i\n", __func__, retval);
+ dev_err(&usb_dev->dev, "register write failed, retval=%i\n", retval);
return retval;
}
ni_usb_soft_update_status(board, ibsta, 0);
@@ -1771,16 +1799,13 @@ static int ni_usb_init(gpib_board_t *board)
static void ni_usb_interrupt_complete(struct urb *urb)
{
- gpib_board_t *board = urb->context;
+ struct gpib_board *board = urb->context;
struct ni_usb_priv *ni_priv = board->private_data;
struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface);
int retval;
struct ni_usb_status_block status;
unsigned long flags;
-// printk("debug: %s: status=0x%x, error_count=%i, actual_length=%i\n", __func__,
-// urb->status, urb->error_count, urb->actual_length);
-
switch (urb->status) {
/* success */
case 0:
@@ -1793,26 +1818,24 @@ static void ni_usb_interrupt_complete(struct urb *urb)
default: /* other error, resubmit */
retval = usb_submit_urb(ni_priv->interrupt_urb, GFP_ATOMIC);
if (retval)
- dev_err(&usb_dev->dev, "%s: failed to resubmit interrupt urb\n", __func__);
+ dev_err(&usb_dev->dev, "failed to resubmit interrupt urb\n");
return;
}
ni_usb_parse_status_block(urb->transfer_buffer, &status);
-// printk("debug: ibsta=0x%x\n", status.ibsta);
spin_lock_irqsave(&board->spinlock, flags);
ni_priv->monitored_ibsta_bits &= ~status.ibsta;
-// printk("debug: monitored_ibsta_bits=0x%x\n", ni_priv->monitored_ibsta_bits);
spin_unlock_irqrestore(&board->spinlock, flags);
wake_up_interruptible(&board->wait);
retval = usb_submit_urb(ni_priv->interrupt_urb, GFP_ATOMIC);
if (retval)
- dev_err(&usb_dev->dev, "%s: failed to resubmit interrupt urb\n", __func__);
+ dev_err(&usb_dev->dev, "failed to resubmit interrupt urb\n");
}
-static int ni_usb_set_interrupt_monitor(gpib_board_t *board, unsigned int monitored_bits)
+static int ni_usb_set_interrupt_monitor(struct gpib_board *board, unsigned int monitored_bits)
{
int retval;
struct ni_usb_priv *ni_priv = board->private_data;
@@ -1821,22 +1844,20 @@ static int ni_usb_set_interrupt_monitor(gpib_board_t *board, unsigned int monito
u8 *buffer;
struct ni_usb_status_block status;
unsigned long flags;
- //printk("%s: receive control pipe is %i\n", __func__, pipe);
+
buffer = kmalloc(buffer_length, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
spin_lock_irqsave(&board->spinlock, flags);
ni_priv->monitored_ibsta_bits = ni_usb_ibsta_monitor_mask & monitored_bits;
-// dev_err(&usb_dev->dev, "debug: %s: monitored_ibsta_bits=0x%x\n",
-// __func__, ni_priv->monitored_ibsta_bits);
spin_unlock_irqrestore(&board->spinlock, flags);
retval = ni_usb_receive_control_msg(ni_priv, NI_USB_WAIT_REQUEST, USB_DIR_IN |
USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x300, ni_usb_ibsta_monitor_mask & monitored_bits,
buffer, buffer_length, 1000);
if (retval != buffer_length) {
- dev_err(&usb_dev->dev, "%s: usb_control_msg returned %i\n", __func__, retval);
+ dev_err(&usb_dev->dev, "usb_control_msg returned %i\n", retval);
kfree(buffer);
return -1;
}
@@ -1845,7 +1866,7 @@ static int ni_usb_set_interrupt_monitor(gpib_board_t *board, unsigned int monito
return 0;
}
-static int ni_usb_setup_urbs(gpib_board_t *board)
+static int ni_usb_setup_urbs(struct gpib_board *board)
{
struct ni_usb_priv *ni_priv = board->private_data;
struct usb_device *usb_dev;
@@ -1872,8 +1893,7 @@ static int ni_usb_setup_urbs(gpib_board_t *board)
retval = usb_submit_urb(ni_priv->interrupt_urb, GFP_KERNEL);
mutex_unlock(&ni_priv->interrupt_transfer_lock);
if (retval) {
- dev_err(&usb_dev->dev, "%s: failed to submit first interrupt urb, retval=%i\n",
- __func__, retval);
+ dev_err(&usb_dev->dev, "failed to submit first interrupt urb, retval=%i\n", retval);
return retval;
}
return 0;
@@ -1904,7 +1924,6 @@ static int ni_usb_b_read_serial_number(struct ni_usb_priv *ni_priv)
int j;
unsigned int serial_number;
-// printk("%s: %s\n", __func__);
in_data = kmalloc(in_data_length, GFP_KERNEL);
if (!in_data)
return -ENOMEM;
@@ -1924,20 +1943,19 @@ static int ni_usb_b_read_serial_number(struct ni_usb_priv *ni_priv)
i += ni_usb_bulk_termination(&out_data[i]);
retval = ni_usb_send_bulk_msg(ni_priv, out_data, out_data_length, &bytes_written, 1000);
if (retval) {
- dev_err(&usb_dev->dev, "%s: ni_usb_send_bulk_msg returned %i, bytes_written=%i, i=%li\n",
- __func__,
+ dev_err(&usb_dev->dev, "send_bulk_msg returned %i, bytes_written=%i, i=%li\n",
retval, bytes_written, (long)out_data_length);
goto serial_out;
}
retval = ni_usb_receive_bulk_msg(ni_priv, in_data, in_data_length, &bytes_read, 1000, 0);
if (retval) {
- dev_err(&usb_dev->dev, "%s: ni_usb_receive_bulk_msg returned %i, bytes_read=%i\n",
- __func__, retval, bytes_read);
+ dev_err(&usb_dev->dev, "receive_bulk_msg returned %i, bytes_read=%i\n",
+ retval, bytes_read);
ni_usb_dump_raw_block(in_data, bytes_read);
goto serial_out;
}
if (ARRAY_SIZE(results) < num_reads) {
- dev_err(&usb_dev->dev, "Setup bug\n");
+ dev_err(&usb_dev->dev, "serial number eetup bug\n");
retval = -EINVAL;
goto serial_out;
}
@@ -1945,7 +1963,7 @@ static int ni_usb_b_read_serial_number(struct ni_usb_priv *ni_priv)
serial_number = 0;
for (j = 0; j < num_reads; ++j)
serial_number |= (results[j] & 0xff) << (8 * j);
- dev_info(&usb_dev->dev, "%s: board serial number is 0x%x\n", __func__, serial_number);
+ dev_dbg(&usb_dev->dev, "board serial number is 0x%x\n", serial_number);
retval = 0;
serial_out:
kfree(in_data);
@@ -1973,22 +1991,22 @@ static int ni_usb_hs_wait_for_ready(struct ni_usb_priv *ni_priv)
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x0, 0x0, buffer, buffer_size, 1000);
if (retval < 0) {
- dev_err(&usb_dev->dev, "%s: usb_control_msg request 0x%x returned %i\n",
- __func__, NI_USB_SERIAL_NUMBER_REQUEST, retval);
+ dev_err(&usb_dev->dev, "usb_control_msg request 0x%x returned %i\n",
+ NI_USB_SERIAL_NUMBER_REQUEST, retval);
goto ready_out;
}
j = 0;
if (buffer[j] != NI_USB_SERIAL_NUMBER_REQUEST) {
- dev_err(&usb_dev->dev, "%s: unexpected data: buffer[%i]=0x%x, expected 0x%x\n",
- __func__, j, (int)buffer[j], NI_USB_SERIAL_NUMBER_REQUEST);
+ dev_err(&usb_dev->dev, "unexpected data: buffer[%i]=0x%x, expected 0x%x\n",
+ j, (int)buffer[j], NI_USB_SERIAL_NUMBER_REQUEST);
unexpected = 1;
}
if (unexpected)
ni_usb_dump_raw_block(buffer, retval);
// NI-USB-HS+ pads the serial with 0x0 to make 16 bytes
if (retval != 5 && retval != 16) {
- dev_err(&usb_dev->dev, "%s: received unexpected number of bytes = %i, expected 5 or 16\n",
- __func__, retval);
+ dev_err(&usb_dev->dev, "received unexpected number of bytes = %i, expected 5 or 16\n",
+ retval);
ni_usb_dump_raw_block(buffer, retval);
}
serial_number = 0;
@@ -1996,7 +2014,7 @@ static int ni_usb_hs_wait_for_ready(struct ni_usb_priv *ni_priv)
serial_number |= (buffer[++j] << 8);
serial_number |= (buffer[++j] << 16);
serial_number |= (buffer[++j] << 24);
- dev_info(&usb_dev->dev, "%s: board serial number is 0x%x\n", __func__, serial_number);
+ dev_dbg(&usb_dev->dev, "board serial number is 0x%x\n", serial_number);
for (i = 0; i < timeout; ++i) {
int ready = 0;
@@ -2004,26 +2022,26 @@ static int ni_usb_hs_wait_for_ready(struct ni_usb_priv *ni_priv)
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x0, 0x0, buffer, buffer_size, 100);
if (retval < 0) {
- dev_err(&usb_dev->dev, "%s: usb_control_msg request 0x%x returned %i\n",
- __func__, NI_USB_POLL_READY_REQUEST, retval);
+ dev_err(&usb_dev->dev, "usb_control_msg request 0x%x returned %i\n",
+ NI_USB_POLL_READY_REQUEST, retval);
goto ready_out;
}
j = 0;
unexpected = 0;
if (buffer[j] != NI_USB_POLL_READY_REQUEST) { // [0]
- dev_err(&usb_dev->dev, "%s: unexpected data: buffer[%i]=0x%x, expected 0x%x\n",
- __func__, j, (int)buffer[j], NI_USB_POLL_READY_REQUEST);
+ dev_err(&usb_dev->dev, "unexpected data: buffer[%i]=0x%x, expected 0x%x\n",
+ j, (int)buffer[j], NI_USB_POLL_READY_REQUEST);
unexpected = 1;
}
++j;
if (buffer[j] != 0x1 && buffer[j] != 0x0) { // [1] HS+ sends 0x0
- dev_err(&usb_dev->dev, "%s: unexpected data: buffer[%i]=0x%x, expected 0x1 or 0x0\n",
- __func__, j, (int)buffer[j]);
+ dev_err(&usb_dev->dev, "unexpected data: buffer[%i]=0x%x, expected 0x1 or 0x0\n",
+ j, (int)buffer[j]);
unexpected = 1;
}
if (buffer[++j] != 0x0) { // [2]
- dev_err(&usb_dev->dev, "%s: unexpected data: buffer[%i]=0x%x, expected 0x%x\n",
- __func__, j, (int)buffer[j], 0x0);
+ dev_err(&usb_dev->dev, "unexpected data: buffer[%i]=0x%x, expected 0x%x\n",
+ j, (int)buffer[j], 0x0);
unexpected = 1;
}
++j;
@@ -2031,22 +2049,22 @@ static int ni_usb_hs_wait_for_ready(struct ni_usb_priv *ni_priv)
// NI-USB-HS+ sends 0x0
if (buffer[j] != 0x1 && buffer[j] != 0x8 && buffer[j] != 0x7 && buffer[j] != 0x0) {
// [3]
- dev_err(&usb_dev->dev, "%s: unexpected data: buffer[%i]=0x%x, expected 0x0, 0x1, 0x7 or 0x8\n",
- __func__, j, (int)buffer[j]);
+ dev_err(&usb_dev->dev, "unexpected data: buffer[%i]=0x%x, expected 0x0, 0x1, 0x7 or 0x8\n",
+ j, (int)buffer[j]);
unexpected = 1;
}
++j;
// NI-USB-HS+ sends 0 here
if (buffer[j] != 0x30 && buffer[j] != 0x0) { // [4]
- dev_err(&usb_dev->dev, "%s: unexpected data: buffer[%i]=0x%x, expected 0x0 or 0x30\n",
- __func__, j, (int)buffer[j]);
+ dev_err(&usb_dev->dev, "unexpected data: buffer[%i]=0x%x, expected 0x0 or 0x30\n",
+ j, (int)buffer[j]);
unexpected = 1;
}
++j;
// MC usb-488 (and sometimes NI-USB-HS?) and NI-USB-HS+ sends 0x0 here
if (buffer[j] != 0x1 && buffer[j] != 0x0) { // [5]
- dev_err(&usb_dev->dev, "%s: unexpected data: buffer[%i]=0x%x, expected 0x1 or 0x0\n",
- __func__, j, (int)buffer[j]);
+ dev_err(&usb_dev->dev, "unexpected data: buffer[%i]=0x%x, expected 0x1 or 0x0\n",
+ j, (int)buffer[j]);
unexpected = 1;
}
if (buffer[++j] != 0x0) { // [6]
@@ -2054,8 +2072,8 @@ static int ni_usb_hs_wait_for_ready(struct ni_usb_priv *ni_priv)
// NI-USB-HS+ sends 0xf here
if (buffer[j] != 0x2 && buffer[j] != 0xe && buffer[j] != 0xf &&
buffer[j] != 0x16) {
- dev_err(&usb_dev->dev, "%s: unexpected data: buffer[%i]=0x%x, expected 0x2, 0xe, 0xf or 0x16\n",
- __func__, j, (int)buffer[j]);
+ dev_err(&usb_dev->dev, "unexpected data: buffer[%i]=0x%x, expected 0x2, 0xe, 0xf or 0x16\n",
+ j, (int)buffer[j]);
unexpected = 1;
}
}
@@ -2064,30 +2082,30 @@ static int ni_usb_hs_wait_for_ready(struct ni_usb_priv *ni_priv)
// MC usb-488 sends 0x5 here; MC usb-488A sends 0x6 here
if (buffer[j] != 0x3 && buffer[j] != 0x5 && buffer[j] != 0x6 &&
buffer[j] != 0x8) {
- dev_err(&usb_dev->dev, "%s: unexpected data: buffer[%i]=0x%x, expected 0x3 or 0x5, 0x6 or 0x08\n",
- __func__, j, (int)buffer[j]);
+ dev_err(&usb_dev->dev, "unexpected data: buffer[%i]=0x%x, expected 0x3 or 0x5, 0x6 or 0x08\n",
+ j, (int)buffer[j]);
unexpected = 1;
}
}
++j;
if (buffer[j] != 0x0 && buffer[j] != 0x2) { // [8] MC usb-488 sends 0x2 here
- dev_err(&usb_dev->dev, "%s: unexpected data: buffer[%i]=0x%x, expected 0x0 or 0x2\n",
- __func__, j, (int)buffer[j]);
+ dev_err(&usb_dev->dev, " unexpected data: buffer[%i]=0x%x, expected 0x0 or 0x2\n",
+ j, (int)buffer[j]);
unexpected = 1;
}
++j;
// MC usb-488A and NI-USB-HS sends 0x3 here; NI-USB-HS+ sends 0x30 here
if (buffer[j] != 0x0 && buffer[j] != 0x3 && buffer[j] != 0x30) { // [9]
- dev_err(&usb_dev->dev, "%s: unexpected data: buffer[%i]=0x%x, expected 0x0, 0x3 or 0x30\n",
- __func__, j, (int)buffer[j]);
+ dev_err(&usb_dev->dev, "unexpected data: buffer[%i]=0x%x, expected 0x0, 0x3 or 0x30\n",
+ j, (int)buffer[j]);
unexpected = 1;
}
if (buffer[++j] != 0x0) {
ready = 1;
if (buffer[j] != 0x96 && buffer[j] != 0x7 && buffer[j] != 0x6e) {
// [10] MC usb-488 sends 0x7 here
- dev_err(&usb_dev->dev, "%s: unexpected data: buffer[%i]=0x%x, expected 0x96, 0x07 or 0x6e\n",
- __func__, j, (int)buffer[j]);
+ dev_err(&usb_dev->dev, "unexpected data: buffer[%i]=0x%x, expected 0x96, 0x07 or 0x6e\n",
+ j, (int)buffer[j]);
unexpected = 1;
}
}
@@ -2097,7 +2115,6 @@ static int ni_usb_hs_wait_for_ready(struct ni_usb_priv *ni_priv)
break;
retval = msleep_interruptible(msec_sleep_duration);
if (retval) {
- dev_err(&usb_dev->dev, "ni_usb_gpib: msleep interrupted\n");
retval = -ERESTARTSYS;
goto ready_out;
}
@@ -2106,7 +2123,7 @@ static int ni_usb_hs_wait_for_ready(struct ni_usb_priv *ni_priv)
ready_out:
kfree(buffer);
- dev_dbg(&usb_dev->dev, "%s: exit retval=%d\n", __func__, retval);
+ dev_dbg(&usb_dev->dev, "exit retval=%d\n", retval);
return retval;
}
@@ -2134,14 +2151,14 @@ static int ni_usb_hs_plus_extra_init(struct ni_usb_priv *ni_priv)
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x0, 0x0, buffer, transfer_size, 1000);
if (retval < 0) {
- dev_err(&usb_dev->dev, "%s: usb_control_msg request 0x%x returned %i\n",
- __func__, NI_USB_HS_PLUS_0x48_REQUEST, retval);
+ dev_err(&usb_dev->dev, "usb_control_msg request 0x%x returned %i\n",
+ NI_USB_HS_PLUS_0x48_REQUEST, retval);
break;
}
// expected response data: 48 f3 30 00 00 00 00 00 00 00 00 00 00 00 00 00
if (buffer[0] != NI_USB_HS_PLUS_0x48_REQUEST)
- dev_err(&usb_dev->dev, "%s: unexpected data: buffer[0]=0x%x, expected 0x%x\n",
- __func__, (int)buffer[0], NI_USB_HS_PLUS_0x48_REQUEST);
+ dev_err(&usb_dev->dev, "unexpected data: buffer[0]=0x%x, expected 0x%x\n",
+ (int)buffer[0], NI_USB_HS_PLUS_0x48_REQUEST);
transfer_size = 2;
@@ -2149,14 +2166,14 @@ static int ni_usb_hs_plus_extra_init(struct ni_usb_priv *ni_priv)
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x1, 0x0, buffer, transfer_size, 1000);
if (retval < 0) {
- dev_err(&usb_dev->dev, "%s: usb_control_msg request 0x%x returned %i\n",
- __func__, NI_USB_HS_PLUS_LED_REQUEST, retval);
+ dev_err(&usb_dev->dev, "usb_control_msg request 0x%x returned %i\n",
+ NI_USB_HS_PLUS_LED_REQUEST, retval);
break;
}
// expected response data: 4b 00
if (buffer[0] != NI_USB_HS_PLUS_LED_REQUEST)
- dev_err(&usb_dev->dev, "%s: unexpected data: buffer[0]=0x%x, expected 0x%x\n",
- __func__, (int)buffer[0], NI_USB_HS_PLUS_LED_REQUEST);
+ dev_err(&usb_dev->dev, "unexpected data: buffer[0]=0x%x, expected 0x%x\n",
+ (int)buffer[0], NI_USB_HS_PLUS_LED_REQUEST);
transfer_size = 9;
@@ -2165,15 +2182,14 @@ static int ni_usb_hs_plus_extra_init(struct ni_usb_priv *ni_priv)
USB_RECIP_INTERFACE,
0x0, 0x1, buffer, transfer_size, 1000);
if (retval < 0) {
- dev_err(&usb_dev->dev, "%s: usb_control_msg request 0x%x returned %i\n",
- __func__, NI_USB_HS_PLUS_0xf8_REQUEST, retval);
+ dev_err(&usb_dev->dev, "usb_control_msg request 0x%x returned %i\n",
+ NI_USB_HS_PLUS_0xf8_REQUEST, retval);
break;
}
// expected response data: f8 01 00 00 00 01 00 00 00
if (buffer[0] != NI_USB_HS_PLUS_0xf8_REQUEST)
- dev_err(&usb_dev->dev, "%s: unexpected data: buffer[0]=0x%x, expected 0x%x\n",
- __func__, (int)buffer[0], NI_USB_HS_PLUS_0xf8_REQUEST);
-
+ dev_err(&usb_dev->dev, "unexpected data: buffer[0]=0x%x, expected 0x%x\n",
+ (int)buffer[0], NI_USB_HS_PLUS_0xf8_REQUEST);
} while (0);
// cleanup
@@ -2189,10 +2205,10 @@ static inline int ni_usb_device_match(struct usb_interface *interface,
return 1;
}
-static int ni_usb_attach(gpib_board_t *board, const gpib_board_config_t *config)
+static int ni_usb_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
int retval;
- int i;
+ int i, index;
struct ni_usb_priv *ni_priv;
int product_id;
struct usb_device *usb_dev;
@@ -2211,19 +2227,17 @@ static int ni_usb_attach(gpib_board_t *board, const gpib_board_config_t *config)
ni_priv->bus_interface = ni_usb_driver_interfaces[i];
usb_set_intfdata(ni_usb_driver_interfaces[i], board);
usb_dev = interface_to_usbdev(ni_priv->bus_interface);
- dev_info(&usb_dev->dev,
- "bus %d dev num %d attached to gpib minor %d, NI usb interface %i\n",
- usb_dev->bus->busnum, usb_dev->devnum, board->minor, i);
+ index = i;
break;
}
}
if (i == MAX_NUM_NI_USB_INTERFACES) {
mutex_unlock(&ni_usb_hotplug_lock);
- pr_err("No supported NI usb gpib adapters found, have you loaded its firmware?\n");
+ dev_err(board->gpib_dev, "No supported adapters found, have you loaded its firmware?\n");
return -ENODEV;
}
if (usb_reset_configuration(interface_to_usbdev(ni_priv->bus_interface)))
- dev_err(&usb_dev->dev, "ni_usb_gpib: usb_reset_configuration() failed.\n");
+ dev_err(&usb_dev->dev, "usb_reset_configuration() failed.\n");
product_id = le16_to_cpu(usb_dev->descriptor.idProduct);
ni_priv->product_id = product_id;
@@ -2296,7 +2310,9 @@ static int ni_usb_attach(gpib_board_t *board, const gpib_board_config_t *config)
}
mutex_unlock(&ni_usb_hotplug_lock);
- dev_info(&usb_dev->dev, "%s: attached\n", __func__);
+ dev_info(&usb_dev->dev,
+ "bus %d dev num %d attached to gpib%d, intf %i\n",
+ usb_dev->bus->busnum, usb_dev->devnum, board->minor, index);
return retval;
}
@@ -2304,33 +2320,25 @@ static int ni_usb_shutdown_hardware(struct ni_usb_priv *ni_priv)
{
struct usb_device *usb_dev = interface_to_usbdev(ni_priv->bus_interface);
int retval;
- int i = 0;
struct ni_usb_register writes[2];
static const int writes_length = ARRAY_SIZE(writes);
unsigned int ibsta;
-// printk("%s: %s\n", __func__);
- writes[i].device = NIUSB_SUBDEV_TNT4882;
- writes[i].address = nec7210_to_tnt4882_offset(AUXMR);
- writes[i].value = AUX_CR;
- i++;
- writes[i].device = NIUSB_SUBDEV_UNKNOWN3;
- writes[i].address = 0x10;
- writes[i].value = 0x0;
- i++;
- if (i > writes_length) {
- dev_err(&usb_dev->dev, "%s: bug!, buffer overrun, i=%i\n", __func__, i);
- return -EINVAL;
- }
- retval = ni_usb_write_registers(ni_priv, writes, i, &ibsta);
+ writes[0].device = NIUSB_SUBDEV_TNT4882;
+ writes[0].address = nec7210_to_tnt4882_offset(AUXMR);
+ writes[0].value = AUX_CR;
+ writes[1].device = NIUSB_SUBDEV_UNKNOWN3;
+ writes[1].address = 0x10;
+ writes[1].value = 0x0;
+ retval = ni_usb_write_registers(ni_priv, writes, writes_length, &ibsta);
if (retval) {
- dev_err(&usb_dev->dev, "%s: register write failed, retval=%i\n", __func__, retval);
+ dev_err(&usb_dev->dev, "register write failed, retval=%i\n", retval);
return retval;
}
return 0;
}
-static void ni_usb_detach(gpib_board_t *board)
+static void ni_usb_detach(struct gpib_board *board)
{
struct ni_usb_priv *ni_priv;
@@ -2413,7 +2421,7 @@ static int ni_usb_driver_probe(struct usb_interface *interface, const struct usb
if (i == MAX_NUM_NI_USB_INTERFACES) {
usb_put_dev(usb_dev);
mutex_unlock(&ni_usb_hotplug_lock);
- dev_err(&usb_dev->dev, "%s: ni_usb_driver_interfaces[] full\n", __func__);
+ dev_err(&usb_dev->dev, "ni_usb_driver_interfaces[] full\n");
return -1;
}
path = kmalloc(path_length, GFP_KERNEL);
@@ -2423,7 +2431,7 @@ static int ni_usb_driver_probe(struct usb_interface *interface, const struct usb
return -ENOMEM;
}
usb_make_path(usb_dev, path, path_length);
- dev_info(&usb_dev->dev, "ni_usb_gpib: probe succeeded for path: %s\n", path);
+ dev_info(&usb_dev->dev, "probe succeeded for path: %s\n", path);
kfree(path);
mutex_unlock(&ni_usb_hotplug_lock);
return 0;
@@ -2437,7 +2445,7 @@ static void ni_usb_driver_disconnect(struct usb_interface *interface)
mutex_lock(&ni_usb_hotplug_lock);
for (i = 0; i < MAX_NUM_NI_USB_INTERFACES; i++) {
if (ni_usb_driver_interfaces[i] == interface) {
- gpib_board_t *board = usb_get_intfdata(interface);
+ struct gpib_board *board = usb_get_intfdata(interface);
if (board) {
struct ni_usb_priv *ni_priv = board->private_data;
@@ -2458,8 +2466,7 @@ static void ni_usb_driver_disconnect(struct usb_interface *interface)
}
}
if (i == MAX_NUM_NI_USB_INTERFACES)
- dev_err(&usb_dev->dev, "%s: unable to find interface in ni_usb_driver_interfaces[]? bug?\n",
- __func__);
+ dev_err(&usb_dev->dev, "unable to find interface bug?\n");
usb_put_dev(usb_dev);
mutex_unlock(&ni_usb_hotplug_lock);
}
@@ -2467,7 +2474,7 @@ static void ni_usb_driver_disconnect(struct usb_interface *interface)
static int ni_usb_driver_suspend(struct usb_interface *interface, pm_message_t message)
{
struct usb_device *usb_dev = interface_to_usbdev(interface);
- gpib_board_t *board;
+ struct gpib_board *board;
int i, retval;
mutex_lock(&ni_usb_hotplug_lock);
@@ -2498,9 +2505,9 @@ static int ni_usb_driver_suspend(struct usb_interface *interface, pm_message_t m
ni_usb_cleanup_urbs(ni_priv);
mutex_unlock(&ni_priv->interrupt_transfer_lock);
}
- dev_info(&usb_dev->dev,
- "bus %d dev num %d gpib minor %d, ni usb interface %i suspended\n",
- usb_dev->bus->busnum, usb_dev->devnum, board->minor, i);
+ dev_dbg(&usb_dev->dev,
+ "bus %d dev num %d gpib%d, interface %i suspended\n",
+ usb_dev->bus->busnum, usb_dev->devnum, board->minor, i);
}
mutex_unlock(&ni_usb_hotplug_lock);
@@ -2511,7 +2518,7 @@ static int ni_usb_driver_resume(struct usb_interface *interface)
{
struct usb_device *usb_dev = interface_to_usbdev(interface);
- gpib_board_t *board;
+ struct gpib_board *board;
int i, retval;
mutex_lock(&ni_usb_hotplug_lock);
@@ -2535,15 +2542,15 @@ static int ni_usb_driver_resume(struct usb_interface *interface)
mutex_lock(&ni_priv->interrupt_transfer_lock);
retval = usb_submit_urb(ni_priv->interrupt_urb, GFP_KERNEL);
if (retval) {
- dev_err(&usb_dev->dev, "%s: failed to resubmit interrupt urb, retval=%i\n",
- __func__, retval);
+ dev_err(&usb_dev->dev, "resume failed to resubmit interrupt urb, retval=%i\n",
+ retval);
mutex_unlock(&ni_priv->interrupt_transfer_lock);
mutex_unlock(&ni_usb_hotplug_lock);
return retval;
}
mutex_unlock(&ni_priv->interrupt_transfer_lock);
} else {
- dev_err(&usb_dev->dev, "%s: bug! int urb not set up\n", __func__);
+ dev_err(&usb_dev->dev, "bug! resume int urb not set up\n");
mutex_unlock(&ni_usb_hotplug_lock);
return -EINVAL;
}
@@ -2600,9 +2607,9 @@ static int ni_usb_driver_resume(struct usb_interface *interface)
if (ni_priv->ren_state)
ni_usb_remote_enable(board, 1);
- dev_info(&usb_dev->dev,
- "bus %d dev num %d gpib minor %d, ni usb interface %i resumed\n",
- usb_dev->bus->busnum, usb_dev->devnum, board->minor, i);
+ dev_dbg(&usb_dev->dev,
+ "bus %d dev num %d gpib%d, interface %i resumed\n",
+ usb_dev->bus->busnum, usb_dev->devnum, board->minor, i);
}
mutex_unlock(&ni_usb_hotplug_lock);
@@ -2610,7 +2617,7 @@ static int ni_usb_driver_resume(struct usb_interface *interface)
}
static struct usb_driver ni_usb_bus_driver = {
- .name = "ni_usb_gpib",
+ .name = DRV_NAME,
.probe = ni_usb_driver_probe,
.disconnect = ni_usb_driver_disconnect,
.suspend = ni_usb_driver_suspend,
@@ -2623,19 +2630,18 @@ static int __init ni_usb_init_module(void)
int i;
int ret;
- pr_info("ni_usb_gpib driver loading\n");
for (i = 0; i < MAX_NUM_NI_USB_INTERFACES; i++)
ni_usb_driver_interfaces[i] = NULL;
ret = usb_register(&ni_usb_bus_driver);
if (ret) {
- pr_err("ni_usb_gpib: usb_register failed: error = %d\n", ret);
+ pr_err("usb_register failed: error = %d\n", ret);
return ret;
}
ret = gpib_register_driver(&ni_usb_gpib_interface, THIS_MODULE);
if (ret) {
- pr_err("ni_usb_gpib: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
return ret;
}
@@ -2644,7 +2650,6 @@ static int __init ni_usb_init_module(void)
static void __exit ni_usb_exit_module(void)
{
- pr_info("ni_usb_gpib driver unloading\n");
gpib_unregister_driver(&ni_usb_gpib_interface);
usb_deregister(&ni_usb_bus_driver);
}
diff --git a/drivers/staging/gpib/pc2/pc2_gpib.c b/drivers/staging/gpib/pc2/pc2_gpib.c
index c0b07cb63d9a..96d3c09f2273 100644
--- a/drivers/staging/gpib/pc2/pc2_gpib.c
+++ b/drivers/staging/gpib/pc2/pc2_gpib.c
@@ -4,6 +4,9 @@
* copyright : (C) 2001, 2002 by Frank Mori Hess
***************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define dev_fmt pr_fmt
+
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/module.h>
@@ -49,22 +52,13 @@ static inline unsigned int CLEAR_INTR_REG(unsigned int irq)
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GPIB driver for PC2/PC2a and compatible devices");
-static int pc2_attach(gpib_board_t *board, const gpib_board_config_t *config);
-static int pc2a_attach(gpib_board_t *board, const gpib_board_config_t *config);
-static int pc2a_cb7210_attach(gpib_board_t *board, const gpib_board_config_t *config);
-static int pc2_2a_attach(gpib_board_t *board, const gpib_board_config_t *config);
-
-static void pc2_detach(gpib_board_t *board);
-static void pc2a_detach(gpib_board_t *board);
-static void pc2_2a_detach(gpib_board_t *board);
-
/*
* GPIB interrupt service routines
*/
irqreturn_t pc2_interrupt(int irq, void *arg)
{
- gpib_board_t *board = arg;
+ struct gpib_board *board = arg;
struct pc2_priv *priv = board->private_data;
unsigned long flags;
irqreturn_t retval;
@@ -77,7 +71,7 @@ irqreturn_t pc2_interrupt(int irq, void *arg)
irqreturn_t pc2a_interrupt(int irq, void *arg)
{
- gpib_board_t *board = arg;
+ struct gpib_board *board = arg;
struct pc2_priv *priv = board->private_data;
int status1, status2;
unsigned long flags;
@@ -96,7 +90,7 @@ irqreturn_t pc2a_interrupt(int irq, void *arg)
}
// wrappers for interface functions
-static int pc2_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end,
+static int pc2_read(struct gpib_board *board, uint8_t *buffer, size_t length, int *end,
size_t *bytes_read)
{
struct pc2_priv *priv = board->private_data;
@@ -104,7 +98,7 @@ static int pc2_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *en
return nec7210_read(board, &priv->nec7210_priv, buffer, length, end, bytes_read);
}
-static int pc2_write(gpib_board_t *board, uint8_t *buffer, size_t length, int send_eoi,
+static int pc2_write(struct gpib_board *board, uint8_t *buffer, size_t length, int send_eoi,
size_t *bytes_written)
{
struct pc2_priv *priv = board->private_data;
@@ -112,245 +106,133 @@ static int pc2_write(gpib_board_t *board, uint8_t *buffer, size_t length, int se
return nec7210_write(board, &priv->nec7210_priv, buffer, length, send_eoi, bytes_written);
}
-static int pc2_command(gpib_board_t *board, uint8_t *buffer, size_t length, size_t *bytes_written)
+static int pc2_command(struct gpib_board *board, uint8_t *buffer, size_t length, size_t *bytes_written)
{
struct pc2_priv *priv = board->private_data;
return nec7210_command(board, &priv->nec7210_priv, buffer, length, bytes_written);
}
-static int pc2_take_control(gpib_board_t *board, int synchronous)
+static int pc2_take_control(struct gpib_board *board, int synchronous)
{
struct pc2_priv *priv = board->private_data;
return nec7210_take_control(board, &priv->nec7210_priv, synchronous);
}
-static int pc2_go_to_standby(gpib_board_t *board)
+static int pc2_go_to_standby(struct gpib_board *board)
{
struct pc2_priv *priv = board->private_data;
return nec7210_go_to_standby(board, &priv->nec7210_priv);
}
-static void pc2_request_system_control(gpib_board_t *board, int request_control)
+static void pc2_request_system_control(struct gpib_board *board, int request_control)
{
struct pc2_priv *priv = board->private_data;
nec7210_request_system_control(board, &priv->nec7210_priv, request_control);
}
-static void pc2_interface_clear(gpib_board_t *board, int assert)
+static void pc2_interface_clear(struct gpib_board *board, int assert)
{
struct pc2_priv *priv = board->private_data;
nec7210_interface_clear(board, &priv->nec7210_priv, assert);
}
-static void pc2_remote_enable(gpib_board_t *board, int enable)
+static void pc2_remote_enable(struct gpib_board *board, int enable)
{
struct pc2_priv *priv = board->private_data;
nec7210_remote_enable(board, &priv->nec7210_priv, enable);
}
-static int pc2_enable_eos(gpib_board_t *board, uint8_t eos_byte, int compare_8_bits)
+static int pc2_enable_eos(struct gpib_board *board, uint8_t eos_byte, int compare_8_bits)
{
struct pc2_priv *priv = board->private_data;
return nec7210_enable_eos(board, &priv->nec7210_priv, eos_byte, compare_8_bits);
}
-static void pc2_disable_eos(gpib_board_t *board)
+static void pc2_disable_eos(struct gpib_board *board)
{
struct pc2_priv *priv = board->private_data;
nec7210_disable_eos(board, &priv->nec7210_priv);
}
-static unsigned int pc2_update_status(gpib_board_t *board, unsigned int clear_mask)
+static unsigned int pc2_update_status(struct gpib_board *board, unsigned int clear_mask)
{
struct pc2_priv *priv = board->private_data;
return nec7210_update_status(board, &priv->nec7210_priv, clear_mask);
}
-static int pc2_primary_address(gpib_board_t *board, unsigned int address)
+static int pc2_primary_address(struct gpib_board *board, unsigned int address)
{
struct pc2_priv *priv = board->private_data;
return nec7210_primary_address(board, &priv->nec7210_priv, address);
}
-static int pc2_secondary_address(gpib_board_t *board, unsigned int address, int enable)
+static int pc2_secondary_address(struct gpib_board *board, unsigned int address, int enable)
{
struct pc2_priv *priv = board->private_data;
return nec7210_secondary_address(board, &priv->nec7210_priv, address, enable);
}
-static int pc2_parallel_poll(gpib_board_t *board, uint8_t *result)
+static int pc2_parallel_poll(struct gpib_board *board, uint8_t *result)
{
struct pc2_priv *priv = board->private_data;
return nec7210_parallel_poll(board, &priv->nec7210_priv, result);
}
-static void pc2_parallel_poll_configure(gpib_board_t *board, uint8_t config)
+static void pc2_parallel_poll_configure(struct gpib_board *board, uint8_t config)
{
struct pc2_priv *priv = board->private_data;
nec7210_parallel_poll_configure(board, &priv->nec7210_priv, config);
}
-static void pc2_parallel_poll_response(gpib_board_t *board, int ist)
+static void pc2_parallel_poll_response(struct gpib_board *board, int ist)
{
struct pc2_priv *priv = board->private_data;
nec7210_parallel_poll_response(board, &priv->nec7210_priv, ist);
}
-static void pc2_serial_poll_response(gpib_board_t *board, uint8_t status)
+static void pc2_serial_poll_response(struct gpib_board *board, uint8_t status)
{
struct pc2_priv *priv = board->private_data;
nec7210_serial_poll_response(board, &priv->nec7210_priv, status);
}
-static uint8_t pc2_serial_poll_status(gpib_board_t *board)
+static uint8_t pc2_serial_poll_status(struct gpib_board *board)
{
struct pc2_priv *priv = board->private_data;
return nec7210_serial_poll_status(board, &priv->nec7210_priv);
}
-static unsigned int pc2_t1_delay(gpib_board_t *board, unsigned int nano_sec)
+static int pc2_t1_delay(struct gpib_board *board, unsigned int nano_sec)
{
struct pc2_priv *priv = board->private_data;
return nec7210_t1_delay(board, &priv->nec7210_priv, nano_sec);
}
-static void pc2_return_to_local(gpib_board_t *board)
+static void pc2_return_to_local(struct gpib_board *board)
{
struct pc2_priv *priv = board->private_data;
nec7210_return_to_local(board, &priv->nec7210_priv);
}
-static gpib_interface_t pc2_interface = {
- .name = "pcII",
- .attach = pc2_attach,
- .detach = pc2_detach,
- .read = pc2_read,
- .write = pc2_write,
- .command = pc2_command,
- .take_control = pc2_take_control,
- .go_to_standby = pc2_go_to_standby,
- .request_system_control = pc2_request_system_control,
- .interface_clear = pc2_interface_clear,
- .remote_enable = pc2_remote_enable,
- .enable_eos = pc2_enable_eos,
- .disable_eos = pc2_disable_eos,
- .parallel_poll = pc2_parallel_poll,
- .parallel_poll_configure = pc2_parallel_poll_configure,
- .parallel_poll_response = pc2_parallel_poll_response,
- .local_parallel_poll_mode = NULL, // XXX
- .line_status = NULL,
- .update_status = pc2_update_status,
- .primary_address = pc2_primary_address,
- .secondary_address = pc2_secondary_address,
- .serial_poll_response = pc2_serial_poll_response,
- .serial_poll_status = pc2_serial_poll_status,
- .t1_delay = pc2_t1_delay,
- .return_to_local = pc2_return_to_local,
-};
-
-static gpib_interface_t pc2a_interface = {
- .name = "pcIIa",
- .attach = pc2a_attach,
- .detach = pc2a_detach,
- .read = pc2_read,
- .write = pc2_write,
- .command = pc2_command,
- .take_control = pc2_take_control,
- .go_to_standby = pc2_go_to_standby,
- .request_system_control = pc2_request_system_control,
- .interface_clear = pc2_interface_clear,
- .remote_enable = pc2_remote_enable,
- .enable_eos = pc2_enable_eos,
- .disable_eos = pc2_disable_eos,
- .parallel_poll = pc2_parallel_poll,
- .parallel_poll_configure = pc2_parallel_poll_configure,
- .parallel_poll_response = pc2_parallel_poll_response,
- .local_parallel_poll_mode = NULL, // XXX
- .line_status = NULL,
- .update_status = pc2_update_status,
- .primary_address = pc2_primary_address,
- .secondary_address = pc2_secondary_address,
- .serial_poll_response = pc2_serial_poll_response,
- .serial_poll_status = pc2_serial_poll_status,
- .t1_delay = pc2_t1_delay,
- .return_to_local = pc2_return_to_local,
-};
-
-static gpib_interface_t pc2a_cb7210_interface = {
- .name = "pcIIa_cb7210",
- .attach = pc2a_cb7210_attach,
- .detach = pc2a_detach,
- .read = pc2_read,
- .write = pc2_write,
- .command = pc2_command,
- .take_control = pc2_take_control,
- .go_to_standby = pc2_go_to_standby,
- .request_system_control = pc2_request_system_control,
- .interface_clear = pc2_interface_clear,
- .remote_enable = pc2_remote_enable,
- .enable_eos = pc2_enable_eos,
- .disable_eos = pc2_disable_eos,
- .parallel_poll = pc2_parallel_poll,
- .parallel_poll_configure = pc2_parallel_poll_configure,
- .parallel_poll_response = pc2_parallel_poll_response,
- .local_parallel_poll_mode = NULL, // XXX
- .line_status = NULL, //XXX
- .update_status = pc2_update_status,
- .primary_address = pc2_primary_address,
- .secondary_address = pc2_secondary_address,
- .serial_poll_response = pc2_serial_poll_response,
- .serial_poll_status = pc2_serial_poll_status,
- .t1_delay = pc2_t1_delay,
- .return_to_local = pc2_return_to_local,
-};
-
-static gpib_interface_t pc2_2a_interface = {
- .name = "pcII_IIa",
- .attach = pc2_2a_attach,
- .detach = pc2_2a_detach,
- .read = pc2_read,
- .write = pc2_write,
- .command = pc2_command,
- .take_control = pc2_take_control,
- .go_to_standby = pc2_go_to_standby,
- .request_system_control = pc2_request_system_control,
- .interface_clear = pc2_interface_clear,
- .remote_enable = pc2_remote_enable,
- .enable_eos = pc2_enable_eos,
- .disable_eos = pc2_disable_eos,
- .parallel_poll = pc2_parallel_poll,
- .parallel_poll_configure = pc2_parallel_poll_configure,
- .parallel_poll_response = pc2_parallel_poll_response,
- .local_parallel_poll_mode = NULL, // XXX
- .line_status = NULL,
- .update_status = pc2_update_status,
- .primary_address = pc2_primary_address,
- .secondary_address = pc2_secondary_address,
- .serial_poll_response = pc2_serial_poll_response,
- .serial_poll_status = pc2_serial_poll_status,
- .t1_delay = pc2_t1_delay,
- .return_to_local = pc2_return_to_local,
-};
-
-static int allocate_private(gpib_board_t *board)
+static int allocate_private(struct gpib_board *board)
{
struct pc2_priv *priv;
@@ -363,13 +245,13 @@ static int allocate_private(gpib_board_t *board)
return 0;
}
-static void free_private(gpib_board_t *board)
+static void free_private(struct gpib_board *board)
{
kfree(board->private_data);
board->private_data = NULL;
}
-static int pc2_generic_attach(gpib_board_t *board, const gpib_board_config_t *config,
+static int pc2_generic_attach(struct gpib_board *board, const gpib_board_config_t *config,
enum nec7210_chipset chipset)
{
struct pc2_priv *pc2_priv;
@@ -389,7 +271,8 @@ static int pc2_generic_attach(gpib_board_t *board, const gpib_board_config_t *co
* is adapted to use isa_register_driver.
*/
if (config->ibdma)
- pr_err("DMA disabled for pc2 gpib, driver needs to be adapted to use isa_register_driver to get a struct device*");
+ // driver needs to be adapted to use isa_register_driver to get a struct device*
+ dev_err(board->gpib_dev, "DMA disabled for pc2 gpib");
#else
if (config->ibdma) {
nec_priv->dma_buffer_length = 0x1000;
@@ -401,7 +284,7 @@ static int pc2_generic_attach(gpib_board_t *board, const gpib_board_config_t *co
// request isa dma channel
if (request_dma(config->ibdma, "pc2")) {
- pr_err("gpib: can't request DMA %d\n", config->ibdma);
+ dev_err(board->gpib_dev, "can't request DMA %d\n", config->ibdma);
return -1;
}
nec_priv->dma_channel = config->ibdma;
@@ -411,7 +294,7 @@ static int pc2_generic_attach(gpib_board_t *board, const gpib_board_config_t *co
return 0;
}
-int pc2_attach(gpib_board_t *board, const gpib_board_config_t *config)
+static int pc2_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
int isr_flags = 0;
struct pc2_priv *pc2_priv;
@@ -427,8 +310,8 @@ int pc2_attach(gpib_board_t *board, const gpib_board_config_t *config)
nec_priv->offset = pc2_reg_offset;
if (!request_region(config->ibbase, pc2_iosize, "pc2")) {
- pr_err("gpib: ioports are already in use\n");
- return -1;
+ dev_err(board->gpib_dev, "ioports are already in use\n");
+ return -EBUSY;
}
nec_priv->iobase = config->ibbase;
@@ -437,14 +320,14 @@ int pc2_attach(gpib_board_t *board, const gpib_board_config_t *config)
// install interrupt handler
if (config->ibirq) {
if (request_irq(config->ibirq, pc2_interrupt, isr_flags, "pc2", board)) {
- pr_err("gpib: can't request IRQ %d\n", config->ibirq);
- return -1;
+ dev_err(board->gpib_dev, "can't request IRQ %d\n", config->ibirq);
+ return -EBUSY;
}
}
pc2_priv->irq = config->ibirq;
/* poll so we can detect assertion of ATN */
if (gpib_request_pseudo_irq(board, pc2_interrupt)) {
- pr_err("pc2_gpib: failed to allocate pseudo_irq\n");
+ dev_err(board->gpib_dev, "failed to allocate pseudo_irq\n");
return -1;
}
/* set internal counter register for 8 MHz input clock */
@@ -455,7 +338,7 @@ int pc2_attach(gpib_board_t *board, const gpib_board_config_t *config)
return 0;
}
-void pc2_detach(gpib_board_t *board)
+static void pc2_detach(struct gpib_board *board)
{
struct pc2_priv *pc2_priv = board->private_data;
struct nec7210_priv *nec_priv;
@@ -482,7 +365,7 @@ void pc2_detach(gpib_board_t *board)
free_private(board);
}
-static int pc2a_common_attach(gpib_board_t *board, const gpib_board_config_t *config,
+static int pc2a_common_attach(struct gpib_board *board, const gpib_board_config_t *config,
unsigned int num_registers, enum nec7210_chipset chipset)
{
unsigned int i, j;
@@ -505,18 +388,19 @@ static int pc2a_common_attach(gpib_board_t *board, const gpib_board_config_t *co
case 0x62e1:
break;
default:
- pr_err("PCIIa base range invalid, must be one of 0x[0246]2e1, but is 0x%d\n",
- config->ibbase);
+ dev_err(board->gpib_dev, "PCIIa base range invalid, must be one of 0x[0246]2e1, but is 0x%x\n",
+ config->ibbase);
return -1;
}
if (config->ibirq) {
if (config->ibirq < 2 || config->ibirq > 7) {
- pr_err("pc2_gpib: illegal interrupt level %i\n", config->ibirq);
+ dev_err(board->gpib_dev, "illegal interrupt level %i\n",
+ config->ibirq);
return -1;
}
} else {
- pr_err("pc2_gpib: interrupt disabled, using polling mode (slow)\n");
+ dev_err(board->gpib_dev, "interrupt disabled, using polling mode (slow)\n");
}
#ifdef CHECK_IOPORTS
unsigned int err = 0;
@@ -528,36 +412,36 @@ static int pc2a_common_attach(gpib_board_t *board, const gpib_board_config_t *co
if (config->ibirq && check_region(pc2a_clear_intr_iobase + config->ibirq, 1))
err++;
if (err) {
- pr_err("gpib: ioports are already in use");
- return -1;
+ dev_err(board->gpib_dev, "ioports are already in use");
+ return -EBUSY;
}
#endif
for (i = 0; i < num_registers; i++) {
if (!request_region(config->ibbase +
i * pc2a_reg_offset, 1, "pc2a")) {
- pr_err("gpib: ioports are already in use");
+ dev_err(board->gpib_dev, "ioports are already in use");
for (j = 0; j < i; j++)
release_region(config->ibbase +
j * pc2a_reg_offset, 1);
- return -1;
+ return -EBUSY;
}
}
nec_priv->iobase = config->ibbase;
if (config->ibirq) {
if (!request_region(pc2a_clear_intr_iobase + config->ibirq, 1, "pc2a")) {
- pr_err("gpib: ioports are already in use");
+ dev_err(board->gpib_dev, "ioports are already in use");
return -1;
}
pc2_priv->clear_intr_addr = pc2a_clear_intr_iobase + config->ibirq;
if (request_irq(config->ibirq, pc2a_interrupt, 0, "pc2a", board)) {
- pr_err("gpib: can't request IRQ %d\n", config->ibirq);
- return -1;
+ dev_err(board->gpib_dev, "can't request IRQ %d\n", config->ibirq);
+ return -EBUSY;
}
}
pc2_priv->irq = config->ibirq;
/* poll so we can detect assertion of ATN */
if (gpib_request_pseudo_irq(board, pc2_interrupt)) {
- pr_err("pc2_gpib: failed to allocate pseudo_irq\n");
+ dev_err(board->gpib_dev, "failed to allocate pseudo_irq\n");
return -1;
}
@@ -575,22 +459,22 @@ static int pc2a_common_attach(gpib_board_t *board, const gpib_board_config_t *co
return 0;
}
-int pc2a_attach(gpib_board_t *board, const gpib_board_config_t *config)
+static int pc2a_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
return pc2a_common_attach(board, config, pc2a_iosize, NEC7210);
}
-int pc2a_cb7210_attach(gpib_board_t *board, const gpib_board_config_t *config)
+static int pc2a_cb7210_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
return pc2a_common_attach(board, config, pc2a_iosize, CB7210);
}
-int pc2_2a_attach(gpib_board_t *board, const gpib_board_config_t *config)
+static int pc2_2a_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
return pc2a_common_attach(board, config, pc2_2a_iosize, NAT4882);
}
-static void pc2a_common_detach(gpib_board_t *board, unsigned int num_registers)
+static void pc2a_common_detach(struct gpib_board *board, unsigned int num_registers)
{
int i;
struct pc2_priv *pc2_priv = board->private_data;
@@ -623,41 +507,153 @@ static void pc2a_common_detach(gpib_board_t *board, unsigned int num_registers)
free_private(board);
}
-void pc2a_detach(gpib_board_t *board)
+static void pc2a_detach(struct gpib_board *board)
{
pc2a_common_detach(board, pc2a_iosize);
}
-void pc2_2a_detach(gpib_board_t *board)
+static void pc2_2a_detach(struct gpib_board *board)
{
pc2a_common_detach(board, pc2_2a_iosize);
}
+static gpib_interface_t pc2_interface = {
+ .name = "pcII",
+ .attach = pc2_attach,
+ .detach = pc2_detach,
+ .read = pc2_read,
+ .write = pc2_write,
+ .command = pc2_command,
+ .take_control = pc2_take_control,
+ .go_to_standby = pc2_go_to_standby,
+ .request_system_control = pc2_request_system_control,
+ .interface_clear = pc2_interface_clear,
+ .remote_enable = pc2_remote_enable,
+ .enable_eos = pc2_enable_eos,
+ .disable_eos = pc2_disable_eos,
+ .parallel_poll = pc2_parallel_poll,
+ .parallel_poll_configure = pc2_parallel_poll_configure,
+ .parallel_poll_response = pc2_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = NULL,
+ .update_status = pc2_update_status,
+ .primary_address = pc2_primary_address,
+ .secondary_address = pc2_secondary_address,
+ .serial_poll_response = pc2_serial_poll_response,
+ .serial_poll_status = pc2_serial_poll_status,
+ .t1_delay = pc2_t1_delay,
+ .return_to_local = pc2_return_to_local,
+};
+
+static gpib_interface_t pc2a_interface = {
+ .name = "pcIIa",
+ .attach = pc2a_attach,
+ .detach = pc2a_detach,
+ .read = pc2_read,
+ .write = pc2_write,
+ .command = pc2_command,
+ .take_control = pc2_take_control,
+ .go_to_standby = pc2_go_to_standby,
+ .request_system_control = pc2_request_system_control,
+ .interface_clear = pc2_interface_clear,
+ .remote_enable = pc2_remote_enable,
+ .enable_eos = pc2_enable_eos,
+ .disable_eos = pc2_disable_eos,
+ .parallel_poll = pc2_parallel_poll,
+ .parallel_poll_configure = pc2_parallel_poll_configure,
+ .parallel_poll_response = pc2_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = NULL,
+ .update_status = pc2_update_status,
+ .primary_address = pc2_primary_address,
+ .secondary_address = pc2_secondary_address,
+ .serial_poll_response = pc2_serial_poll_response,
+ .serial_poll_status = pc2_serial_poll_status,
+ .t1_delay = pc2_t1_delay,
+ .return_to_local = pc2_return_to_local,
+};
+
+static gpib_interface_t pc2a_cb7210_interface = {
+ .name = "pcIIa_cb7210",
+ .attach = pc2a_cb7210_attach,
+ .detach = pc2a_detach,
+ .read = pc2_read,
+ .write = pc2_write,
+ .command = pc2_command,
+ .take_control = pc2_take_control,
+ .go_to_standby = pc2_go_to_standby,
+ .request_system_control = pc2_request_system_control,
+ .interface_clear = pc2_interface_clear,
+ .remote_enable = pc2_remote_enable,
+ .enable_eos = pc2_enable_eos,
+ .disable_eos = pc2_disable_eos,
+ .parallel_poll = pc2_parallel_poll,
+ .parallel_poll_configure = pc2_parallel_poll_configure,
+ .parallel_poll_response = pc2_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = NULL, //XXX
+ .update_status = pc2_update_status,
+ .primary_address = pc2_primary_address,
+ .secondary_address = pc2_secondary_address,
+ .serial_poll_response = pc2_serial_poll_response,
+ .serial_poll_status = pc2_serial_poll_status,
+ .t1_delay = pc2_t1_delay,
+ .return_to_local = pc2_return_to_local,
+};
+
+static gpib_interface_t pc2_2a_interface = {
+ .name = "pcII_IIa",
+ .attach = pc2_2a_attach,
+ .detach = pc2_2a_detach,
+ .read = pc2_read,
+ .write = pc2_write,
+ .command = pc2_command,
+ .take_control = pc2_take_control,
+ .go_to_standby = pc2_go_to_standby,
+ .request_system_control = pc2_request_system_control,
+ .interface_clear = pc2_interface_clear,
+ .remote_enable = pc2_remote_enable,
+ .enable_eos = pc2_enable_eos,
+ .disable_eos = pc2_disable_eos,
+ .parallel_poll = pc2_parallel_poll,
+ .parallel_poll_configure = pc2_parallel_poll_configure,
+ .parallel_poll_response = pc2_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = NULL,
+ .update_status = pc2_update_status,
+ .primary_address = pc2_primary_address,
+ .secondary_address = pc2_secondary_address,
+ .serial_poll_response = pc2_serial_poll_response,
+ .serial_poll_status = pc2_serial_poll_status,
+ .t1_delay = pc2_t1_delay,
+ .return_to_local = pc2_return_to_local,
+};
+
static int __init pc2_init_module(void)
{
int ret;
ret = gpib_register_driver(&pc2_interface, THIS_MODULE);
if (ret) {
- pr_err("pc2_gpib: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
return ret;
}
ret = gpib_register_driver(&pc2a_interface, THIS_MODULE);
if (ret) {
- pr_err("pc2_gpib: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
goto err_pc2a;
}
ret = gpib_register_driver(&pc2a_cb7210_interface, THIS_MODULE);
if (ret) {
- pr_err("pc2_gpib: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
goto err_cb7210;
}
ret = gpib_register_driver(&pc2_2a_interface, THIS_MODULE);
if (ret) {
- pr_err("pc2_gpib: gpib_register_driver failed: error = %d\n", ret);
+ pr_err("gpib_register_driver failed: error = %d\n", ret);
goto err_pc2_2a;
}
diff --git a/drivers/staging/gpib/tms9914/tms9914.c b/drivers/staging/gpib/tms9914/tms9914.c
index ec8e1d4d762f..2abda9d7dfcb 100644
--- a/drivers/staging/gpib/tms9914/tms9914.c
+++ b/drivers/staging/gpib/tms9914/tms9914.c
@@ -4,6 +4,9 @@
* copyright : (C) 2001, 2002 by Frank Mori Hess
***************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define dev_fmt pr_fmt
+
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/module.h>
@@ -24,9 +27,9 @@
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GPIB library for tms9914");
-static unsigned int update_status_nolock(gpib_board_t *board, struct tms9914_priv *priv);
+static unsigned int update_status_nolock(struct gpib_board *board, struct tms9914_priv *priv);
-int tms9914_take_control(gpib_board_t *board, struct tms9914_priv *priv, int synchronous)
+int tms9914_take_control(struct gpib_board *board, struct tms9914_priv *priv, int synchronous)
{
int i;
const int timeout = 100;
@@ -63,7 +66,7 @@ EXPORT_SYMBOL_GPL(tms9914_take_control);
* The rest of the tms9914 based drivers still use tms9914_take_control
* directly (which does issue tcs).
*/
-int tms9914_take_control_workaround(gpib_board_t *board, struct tms9914_priv *priv, int synchronous)
+int tms9914_take_control_workaround(struct gpib_board *board, struct tms9914_priv *priv, int synchronous)
{
if (synchronous)
return -ETIMEDOUT;
@@ -71,7 +74,7 @@ int tms9914_take_control_workaround(gpib_board_t *board, struct tms9914_priv *pr
}
EXPORT_SYMBOL_GPL(tms9914_take_control_workaround);
-int tms9914_go_to_standby(gpib_board_t *board, struct tms9914_priv *priv)
+int tms9914_go_to_standby(struct gpib_board *board, struct tms9914_priv *priv)
{
int i;
const int timeout = 1000;
@@ -83,10 +86,8 @@ int tms9914_go_to_standby(gpib_board_t *board, struct tms9914_priv *priv)
break;
udelay(1);
}
- if (i == timeout) {
- pr_err("error waiting for NATN\n");
+ if (i == timeout)
return -ETIMEDOUT;
- }
clear_bit(COMMAND_READY_BN, &priv->state);
@@ -94,7 +95,7 @@ int tms9914_go_to_standby(gpib_board_t *board, struct tms9914_priv *priv)
}
EXPORT_SYMBOL_GPL(tms9914_go_to_standby);
-void tms9914_interface_clear(gpib_board_t *board, struct tms9914_priv *priv, int assert)
+void tms9914_interface_clear(struct gpib_board *board, struct tms9914_priv *priv, int assert)
{
if (assert) {
write_byte(priv, AUX_SIC | AUX_CS, AUXCR);
@@ -106,7 +107,7 @@ void tms9914_interface_clear(gpib_board_t *board, struct tms9914_priv *priv, int
}
EXPORT_SYMBOL_GPL(tms9914_interface_clear);
-void tms9914_remote_enable(gpib_board_t *board, struct tms9914_priv *priv, int enable)
+void tms9914_remote_enable(struct gpib_board *board, struct tms9914_priv *priv, int enable)
{
if (enable)
write_byte(priv, AUX_SRE | AUX_CS, AUXCR);
@@ -115,7 +116,7 @@ void tms9914_remote_enable(gpib_board_t *board, struct tms9914_priv *priv, int e
}
EXPORT_SYMBOL_GPL(tms9914_remote_enable);
-void tms9914_request_system_control(gpib_board_t *board, struct tms9914_priv *priv,
+void tms9914_request_system_control(struct gpib_board *board, struct tms9914_priv *priv,
int request_control)
{
if (request_control) {
@@ -127,7 +128,7 @@ void tms9914_request_system_control(gpib_board_t *board, struct tms9914_priv *pr
}
EXPORT_SYMBOL_GPL(tms9914_request_system_control);
-unsigned int tms9914_t1_delay(gpib_board_t *board, struct tms9914_priv *priv,
+unsigned int tms9914_t1_delay(struct gpib_board *board, struct tms9914_priv *priv,
unsigned int nano_sec)
{
static const int clock_period = 200; // assuming 5Mhz input clock
@@ -153,7 +154,7 @@ unsigned int tms9914_t1_delay(gpib_board_t *board, struct tms9914_priv *priv,
}
EXPORT_SYMBOL_GPL(tms9914_t1_delay);
-void tms9914_return_to_local(const gpib_board_t *board, struct tms9914_priv *priv)
+void tms9914_return_to_local(const struct gpib_board *board, struct tms9914_priv *priv)
{
write_byte(priv, AUX_RTL, AUXCR);
}
@@ -175,7 +176,7 @@ void tms9914_set_holdoff_mode(struct tms9914_priv *priv, enum tms9914_holdoff_mo
write_byte(priv, AUX_HLDA | AUX_CS, AUXCR);
break;
default:
- pr_err("%s: bug! bad holdoff mode %i\n", __func__, mode);
+ pr_err("bug! bad holdoff mode %i\n", mode);
break;
}
priv->holdoff_mode = mode;
@@ -191,7 +192,7 @@ void tms9914_release_holdoff(struct tms9914_priv *priv)
}
EXPORT_SYMBOL_GPL(tms9914_release_holdoff);
-int tms9914_enable_eos(gpib_board_t *board, struct tms9914_priv *priv, uint8_t eos_byte,
+int tms9914_enable_eos(struct gpib_board *board, struct tms9914_priv *priv, uint8_t eos_byte,
int compare_8_bits)
{
priv->eos = eos_byte;
@@ -202,13 +203,13 @@ int tms9914_enable_eos(gpib_board_t *board, struct tms9914_priv *priv, uint8_t e
}
EXPORT_SYMBOL(tms9914_enable_eos);
-void tms9914_disable_eos(gpib_board_t *board, struct tms9914_priv *priv)
+void tms9914_disable_eos(struct gpib_board *board, struct tms9914_priv *priv)
{
priv->eos_flags &= ~REOS;
}
EXPORT_SYMBOL(tms9914_disable_eos);
-int tms9914_parallel_poll(gpib_board_t *board, struct tms9914_priv *priv, uint8_t *result)
+int tms9914_parallel_poll(struct gpib_board *board, struct tms9914_priv *priv, uint8_t *result)
{
// execute parallel poll
write_byte(priv, AUX_CS | AUX_RPP, AUXCR);
@@ -233,7 +234,7 @@ static void set_ppoll_reg(struct tms9914_priv *priv, int enable,
}
}
-void tms9914_parallel_poll_configure(gpib_board_t *board,
+void tms9914_parallel_poll_configure(struct gpib_board *board,
struct tms9914_priv *priv, uint8_t config)
{
priv->ppoll_enable = (config & PPC_DISABLE) == 0;
@@ -243,14 +244,14 @@ void tms9914_parallel_poll_configure(gpib_board_t *board,
}
EXPORT_SYMBOL(tms9914_parallel_poll_configure);
-void tms9914_parallel_poll_response(gpib_board_t *board,
+void tms9914_parallel_poll_response(struct gpib_board *board,
struct tms9914_priv *priv, int ist)
{
set_ppoll_reg(priv, priv->ppoll_enable, priv->ppoll_line, priv->ppoll_sense, ist);
}
EXPORT_SYMBOL(tms9914_parallel_poll_response);
-void tms9914_serial_poll_response(gpib_board_t *board, struct tms9914_priv *priv, uint8_t status)
+void tms9914_serial_poll_response(struct gpib_board *board, struct tms9914_priv *priv, uint8_t status)
{
unsigned long flags;
@@ -265,7 +266,7 @@ void tms9914_serial_poll_response(gpib_board_t *board, struct tms9914_priv *priv
}
EXPORT_SYMBOL(tms9914_serial_poll_response);
-uint8_t tms9914_serial_poll_status(gpib_board_t *board, struct tms9914_priv *priv)
+uint8_t tms9914_serial_poll_status(struct gpib_board *board, struct tms9914_priv *priv)
{
u8 status;
unsigned long flags;
@@ -278,7 +279,7 @@ uint8_t tms9914_serial_poll_status(gpib_board_t *board, struct tms9914_priv *pri
}
EXPORT_SYMBOL(tms9914_serial_poll_status);
-int tms9914_primary_address(gpib_board_t *board, struct tms9914_priv *priv, unsigned int address)
+int tms9914_primary_address(struct gpib_board *board, struct tms9914_priv *priv, unsigned int address)
{
// put primary address in address0
write_byte(priv, address & ADDRESS_MASK, ADR);
@@ -286,7 +287,7 @@ int tms9914_primary_address(gpib_board_t *board, struct tms9914_priv *priv, unsi
}
EXPORT_SYMBOL(tms9914_primary_address);
-int tms9914_secondary_address(gpib_board_t *board, struct tms9914_priv *priv,
+int tms9914_secondary_address(struct gpib_board *board, struct tms9914_priv *priv,
unsigned int address, int enable)
{
if (enable)
@@ -299,7 +300,7 @@ int tms9914_secondary_address(gpib_board_t *board, struct tms9914_priv *priv,
}
EXPORT_SYMBOL(tms9914_secondary_address);
-unsigned int tms9914_update_status(gpib_board_t *board, struct tms9914_priv *priv,
+unsigned int tms9914_update_status(struct gpib_board *board, struct tms9914_priv *priv,
unsigned int clear_mask)
{
unsigned long flags;
@@ -341,7 +342,7 @@ static void update_listener_state(struct tms9914_priv *priv, unsigned int addres
}
}
-static unsigned int update_status_nolock(gpib_board_t *board, struct tms9914_priv *priv)
+static unsigned int update_status_nolock(struct gpib_board *board, struct tms9914_priv *priv)
{
int address_status;
int bsr_bits;
@@ -387,29 +388,29 @@ static unsigned int update_status_nolock(gpib_board_t *board, struct tms9914_pri
return board->status;
}
-int tms9914_line_status(const gpib_board_t *board, struct tms9914_priv *priv)
+int tms9914_line_status(const struct gpib_board *board, struct tms9914_priv *priv)
{
int bsr_bits;
- int status = ValidALL;
+ int status = VALID_ALL;
bsr_bits = read_byte(priv, BSR);
if (bsr_bits & BSR_REN_BIT)
- status |= BusREN;
+ status |= BUS_REN;
if (bsr_bits & BSR_IFC_BIT)
- status |= BusIFC;
+ status |= BUS_IFC;
if (bsr_bits & BSR_SRQ_BIT)
- status |= BusSRQ;
+ status |= BUS_SRQ;
if (bsr_bits & BSR_EOI_BIT)
- status |= BusEOI;
+ status |= BUS_EOI;
if (bsr_bits & BSR_NRFD_BIT)
- status |= BusNRFD;
+ status |= BUS_NRFD;
if (bsr_bits & BSR_NDAC_BIT)
- status |= BusNDAC;
+ status |= BUS_NDAC;
if (bsr_bits & BSR_DAV_BIT)
- status |= BusDAV;
+ status |= BUS_DAV;
if (bsr_bits & BSR_ATN_BIT)
- status |= BusATN;
+ status |= BUS_ATN;
return status;
}
@@ -432,15 +433,14 @@ static int check_for_eos(struct tms9914_priv *priv, uint8_t byte)
return 0;
}
-static int wait_for_read_byte(gpib_board_t *board, struct tms9914_priv *priv)
+static int wait_for_read_byte(struct gpib_board *board, struct tms9914_priv *priv)
{
if (wait_event_interruptible(board->wait,
test_bit(READ_READY_BN, &priv->state) ||
test_bit(DEV_CLEAR_BN, &priv->state) ||
- test_bit(TIMO_NUM, &board->status))) {
- pr_debug("gpib: pio read wait interrupted\n");
+ test_bit(TIMO_NUM, &board->status)))
return -ERESTARTSYS;
- }
+
if (test_bit(TIMO_NUM, &board->status))
return -ETIMEDOUT;
@@ -449,7 +449,7 @@ static int wait_for_read_byte(gpib_board_t *board, struct tms9914_priv *priv)
return 0;
}
-static inline uint8_t tms9914_read_data_in(gpib_board_t *board, struct tms9914_priv *priv, int *end)
+static inline uint8_t tms9914_read_data_in(struct gpib_board *board, struct tms9914_priv *priv, int *end)
{
unsigned long flags;
u8 data;
@@ -472,7 +472,7 @@ static inline uint8_t tms9914_read_data_in(gpib_board_t *board, struct tms9914_p
case TMS9914_HOLDOFF_NONE:
break;
default:
- pr_err("%s: bug! bad holdoff mode %i\n", __func__, priv->holdoff_mode);
+ dev_err(board->gpib_dev, "bug! bad holdoff mode %i\n", priv->holdoff_mode);
break;
}
spin_unlock_irqrestore(&board->spinlock, flags);
@@ -480,7 +480,7 @@ static inline uint8_t tms9914_read_data_in(gpib_board_t *board, struct tms9914_p
return data;
}
-static int pio_read(gpib_board_t *board, struct tms9914_priv *priv, uint8_t *buffer,
+static int pio_read(struct gpib_board *board, struct tms9914_priv *priv, uint8_t *buffer,
size_t length, int *end, size_t *bytes_read)
{
ssize_t retval = 0;
@@ -501,7 +501,7 @@ static int pio_read(gpib_board_t *board, struct tms9914_priv *priv, uint8_t *buf
return retval;
}
-int tms9914_read(gpib_board_t *board, struct tms9914_priv *priv, uint8_t *buffer,
+int tms9914_read(struct gpib_board *board, struct tms9914_priv *priv, uint8_t *buffer,
size_t length, int *end, size_t *bytes_read)
{
ssize_t retval = 0;
@@ -541,17 +541,16 @@ int tms9914_read(gpib_board_t *board, struct tms9914_priv *priv, uint8_t *buffer
}
EXPORT_SYMBOL(tms9914_read);
-static int pio_write_wait(gpib_board_t *board, struct tms9914_priv *priv)
+static int pio_write_wait(struct gpib_board *board, struct tms9914_priv *priv)
{
// wait until next byte is ready to be sent
if (wait_event_interruptible(board->wait,
test_bit(WRITE_READY_BN, &priv->state) ||
test_bit(BUS_ERROR_BN, &priv->state) ||
test_bit(DEV_CLEAR_BN, &priv->state) ||
- test_bit(TIMO_NUM, &board->status))) {
- dev_dbg(board->gpib_dev, "gpib write interrupted!\n");
+ test_bit(TIMO_NUM, &board->status)))
return -ERESTARTSYS;
- }
+
if (test_bit(TIMO_NUM, &board->status))
return -ETIMEDOUT;
if (test_bit(BUS_ERROR_BN, &priv->state))
@@ -562,7 +561,7 @@ static int pio_write_wait(gpib_board_t *board, struct tms9914_priv *priv)
return 0;
}
-static int pio_write(gpib_board_t *board, struct tms9914_priv *priv, uint8_t *buffer,
+static int pio_write(struct gpib_board *board, struct tms9914_priv *priv, uint8_t *buffer,
size_t length, size_t *bytes_written)
{
ssize_t retval = 0;
@@ -586,7 +585,7 @@ static int pio_write(gpib_board_t *board, struct tms9914_priv *priv, uint8_t *bu
return length;
}
-int tms9914_write(gpib_board_t *board, struct tms9914_priv *priv, uint8_t *buffer, size_t length,
+int tms9914_write(struct gpib_board *board, struct tms9914_priv *priv, uint8_t *buffer, size_t length,
int send_eoi, size_t *bytes_written)
{
ssize_t retval = 0;
@@ -621,7 +620,7 @@ int tms9914_write(gpib_board_t *board, struct tms9914_priv *priv, uint8_t *buffe
}
EXPORT_SYMBOL(tms9914_write);
-static void check_my_address_state(gpib_board_t *board, struct tms9914_priv *priv, int cmd_byte)
+static void check_my_address_state(struct gpib_board *board, struct tms9914_priv *priv, int cmd_byte)
{
if (cmd_byte == MLA(board->pad)) {
priv->primary_listen_addressed = 1;
@@ -656,7 +655,7 @@ static void check_my_address_state(gpib_board_t *board, struct tms9914_priv *pri
}
}
-int tms9914_command(gpib_board_t *board, struct tms9914_priv *priv, uint8_t *buffer,
+int tms9914_command(struct gpib_board *board, struct tms9914_priv *priv, uint8_t *buffer,
size_t length, size_t *bytes_written)
{
int retval = 0;
@@ -667,10 +666,8 @@ int tms9914_command(gpib_board_t *board, struct tms9914_priv *priv, uint8_t *bu
if (wait_event_interruptible(board->wait,
test_bit(COMMAND_READY_BN,
&priv->state) ||
- test_bit(TIMO_NUM, &board->status))) {
- pr_debug("gpib command wait interrupted\n");
+ test_bit(TIMO_NUM, &board->status)))
break;
- }
if (test_bit(TIMO_NUM, &board->status))
break;
@@ -695,7 +692,7 @@ int tms9914_command(gpib_board_t *board, struct tms9914_priv *priv, uint8_t *bu
}
EXPORT_SYMBOL(tms9914_command);
-irqreturn_t tms9914_interrupt(gpib_board_t *board, struct tms9914_priv *priv)
+irqreturn_t tms9914_interrupt(struct gpib_board *board, struct tms9914_priv *priv)
{
int status0, status1;
@@ -706,7 +703,7 @@ irqreturn_t tms9914_interrupt(gpib_board_t *board, struct tms9914_priv *priv)
}
EXPORT_SYMBOL(tms9914_interrupt);
-irqreturn_t tms9914_interrupt_have_status(gpib_board_t *board, struct tms9914_priv *priv,
+irqreturn_t tms9914_interrupt_have_status(struct gpib_board *board, struct tms9914_priv *priv,
int status0, int status1)
{
// record reception of END
@@ -761,8 +758,6 @@ irqreturn_t tms9914_interrupt_have_status(gpib_board_t *board, struct tms9914_pr
write_byte(priv, AUX_INVAL, AUXCR);
}
} else {
- // printk("tms9914: unrecognized gpib command pass thru 0x%x\n",
- // command_byte);
// clear dac holdoff
write_byte(priv, AUX_INVAL, AUXCR);
}
@@ -799,7 +794,7 @@ irqreturn_t tms9914_interrupt_have_status(gpib_board_t *board, struct tms9914_pr
// check for being addressed with secondary addressing
if (status1 & HR_APT) {
if (board->sad < 0)
- pr_err("tms9914: bug, APT interrupt without secondary addressing?\n");
+ dev_err(board->gpib_dev, "bug, APT interrupt without secondary addressing?\n");
if ((read_byte(priv, CPTR) & gpib_command_mask) == MSA(board->sad))
write_byte(priv, AUX_VAL, AUXCR);
else
@@ -807,8 +802,8 @@ irqreturn_t tms9914_interrupt_have_status(gpib_board_t *board, struct tms9914_pr
}
if ((status0 & priv->imr0_bits) || (status1 & priv->imr1_bits)) {
-// dev_dbg(board->gpib_dev, "isr0 0x%x, imr0 0x%x, isr1 0x%x, imr1 0x%x\n",
-// status0, priv->imr0_bits, status1, priv->imr1_bits);
+ dev_dbg(board->gpib_dev, "isr0 0x%x, imr0 0x%x, isr1 0x%x, imr1 0x%x\n",
+ status0, priv->imr0_bits, status1, priv->imr1_bits);
update_status_nolock(board, priv);
wake_up_interruptible(&board->wait);
}
@@ -842,7 +837,7 @@ void tms9914_board_reset(struct tms9914_priv *priv)
}
EXPORT_SYMBOL_GPL(tms9914_board_reset);
-void tms9914_online(gpib_board_t *board, struct tms9914_priv *priv)
+void tms9914_online(struct gpib_board *board, struct tms9914_priv *priv)
{
/* set GPIB address */
tms9914_primary_address(board, priv, board->pad);
diff --git a/drivers/staging/gpib/tnt4882/Makefile b/drivers/staging/gpib/tnt4882/Makefile
index a3c3fb96d5ed..fa1687ad0d1b 100644
--- a/drivers/staging/gpib/tnt4882/Makefile
+++ b/drivers/staging/gpib/tnt4882/Makefile
@@ -1,4 +1,3 @@
-ccflags-$(CONFIG_GPIB_PCMCIA) := -DGPIB_PCMCIA
obj-$(CONFIG_GPIB_NI_PCI_ISA) += tnt4882.o
tnt4882-objs := tnt4882_gpib.o mite.o
diff --git a/drivers/staging/gpib/tnt4882/mite.c b/drivers/staging/gpib/tnt4882/mite.c
index ea64dde46bcb..847b96f411bd 100644
--- a/drivers/staging/gpib/tnt4882/mite.c
+++ b/drivers/staging/gpib/tnt4882/mite.c
@@ -88,7 +88,6 @@ int mite_setup(struct mite_struct *mite)
pr_err("mite: failed to remap mite io memory address.\n");
return -ENOMEM;
}
- pr_info("mite: 0x%08lx mapped to %p\n", mite->mite_phys_addr, mite->mite_io_addr);
addr = pci_resource_start(mite->pcidev, 1);
mite->daq_phys_addr = addr;
mite->daq_io_addr = ioremap(mite->daq_phys_addr, pci_resource_len(mite->pcidev, 1));
@@ -96,7 +95,6 @@ int mite_setup(struct mite_struct *mite)
pr_err("mite: failed to remap daq io memory address.\n");
return -ENOMEM;
}
- pr_info("mite: daq: 0x%08lx mapped to %p\n", mite->daq_phys_addr, mite->daq_io_addr);
writel(mite->daq_phys_addr | WENAB, mite->mite_io_addr + MITE_IODWBSR);
mite->used = 1;
return 0;
@@ -133,18 +131,3 @@ void mite_unsetup(struct mite_struct *mite)
}
mite->used = 0;
}
-
-void mite_list_devices(void)
-{
- struct mite_struct *mite, *next;
-
- pr_info("Available NI PCI device IDs:");
- if (mite_devices)
- for (mite = mite_devices; mite; mite = next) {
- next = mite->next;
- pr_info(" 0x%04x", mite_device_id(mite));
- if (mite->used)
- pr_info("(used)");
- }
- pr_info("\n");
-}
diff --git a/drivers/staging/gpib/tnt4882/tnt4882_gpib.c b/drivers/staging/gpib/tnt4882/tnt4882_gpib.c
index b39ab2abe495..c35b084b6fd0 100644
--- a/drivers/staging/gpib/tnt4882/tnt4882_gpib.c
+++ b/drivers/staging/gpib/tnt4882/tnt4882_gpib.c
@@ -5,6 +5,10 @@
* copyright : (C) 2001, 2002 by Frank Mori Hess
***************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define dev_fmt pr_fmt
+#define DRV_NAME KBUILD_MODNAME
+
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/module.h>
@@ -47,49 +51,7 @@ struct tnt4882_priv {
unsigned short auxg_bits; // bits written to auxiliary register G
};
-// interface functions
-static int tnt4882_read(gpib_board_t *board, uint8_t *buffer, size_t length,
- int *end, size_t *bytes_read);
-static int tnt4882_accel_read(gpib_board_t *board, uint8_t *buffer, size_t length,
- int *end, size_t *bytes_read);
-static int tnt4882_write(gpib_board_t *board, uint8_t *buffer, size_t length,
- int send_eoi, size_t *bytes_written);
-static int tnt4882_accel_write(gpib_board_t *board, uint8_t *buffer, size_t length,
- int send_eoi, size_t *bytes_written);
-static int tnt4882_command(gpib_board_t *board, uint8_t *buffer, size_t length,
- size_t *bytes_written);
-static int tnt4882_command_unaccel(gpib_board_t *board, uint8_t *buffer,
- size_t length, size_t *bytes_written);
-static int tnt4882_take_control(gpib_board_t *board, int synchronous);
-static int tnt4882_go_to_standby(gpib_board_t *board);
-static void tnt4882_request_system_control(gpib_board_t *board, int request_control);
-static void tnt4882_interface_clear(gpib_board_t *board, int assert);
-static void tnt4882_remote_enable(gpib_board_t *board, int enable);
-static int tnt4882_enable_eos(gpib_board_t *board, uint8_t eos_byte, int
- compare_8_bits);
-static void tnt4882_disable_eos(gpib_board_t *board);
-static unsigned int tnt4882_update_status(gpib_board_t *board, unsigned int clear_mask);
-static int tnt4882_primary_address(gpib_board_t *board, unsigned int address);
-static int tnt4882_secondary_address(gpib_board_t *board, unsigned int address,
- int enable);
-static int tnt4882_parallel_poll(gpib_board_t *board, uint8_t *result);
-static void tnt4882_parallel_poll_configure(gpib_board_t *board, uint8_t config);
-static void tnt4882_parallel_poll_response(gpib_board_t *board, int ist);
-static void tnt4882_serial_poll_response(gpib_board_t *board, uint8_t status);
-static uint8_t tnt4882_serial_poll_status(gpib_board_t *board);
-static int tnt4882_line_status(const gpib_board_t *board);
-static unsigned int tnt4882_t1_delay(gpib_board_t *board, unsigned int nano_sec);
-static void tnt4882_return_to_local(gpib_board_t *board);
-
-// interrupt service routines
-static irqreturn_t tnt4882_internal_interrupt(gpib_board_t *board);
-static irqreturn_t tnt4882_interrupt(int irq, void *arg);
-
-// utility functions
-static int tnt4882_allocate_private(gpib_board_t *board);
-static void tnt4882_free_private(gpib_board_t *board);
-static void tnt4882_init(struct tnt4882_priv *tnt_priv, const gpib_board_t *board);
-static void tnt4882_board_reset(struct tnt4882_priv *tnt_priv, gpib_board_t *board);
+static irqreturn_t tnt4882_internal_interrupt(struct gpib_board *board);
// register offset for nec7210 compatible registers
static const int atgpib_reg_offset = 2;
@@ -139,7 +101,6 @@ static inline unsigned short tnt_readb(struct tnt4882_priv *priv, unsigned long
retval = 0;
break;
default:
- pr_err("tnt4882: bug! unsupported ni_chipset\n");
retval = 0;
break;
}
@@ -174,7 +135,6 @@ static inline void tnt_writeb(struct tnt4882_priv *priv, unsigned short value, u
case NEC7210:
break;
default:
- pr_err("tnt4882: bug! unsupported ni_chipset\n");
break;
}
break;
@@ -188,9 +148,9 @@ static inline void tnt_writeb(struct tnt4882_priv *priv, unsigned short value, u
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GPIB driver for National Instruments boards using tnt4882 or compatible chips");
-int tnt4882_line_status(const gpib_board_t *board)
+static int tnt4882_line_status(const struct gpib_board *board)
{
- int status = ValidALL;
+ int status = VALID_ALL;
int bcsr_bits;
struct tnt4882_priv *tnt_priv;
@@ -199,26 +159,26 @@ int tnt4882_line_status(const gpib_board_t *board)
bcsr_bits = tnt_readb(tnt_priv, BSR);
if (bcsr_bits & BCSR_REN_BIT)
- status |= BusREN;
+ status |= BUS_REN;
if (bcsr_bits & BCSR_IFC_BIT)
- status |= BusIFC;
+ status |= BUS_IFC;
if (bcsr_bits & BCSR_SRQ_BIT)
- status |= BusSRQ;
+ status |= BUS_SRQ;
if (bcsr_bits & BCSR_EOI_BIT)
- status |= BusEOI;
+ status |= BUS_EOI;
if (bcsr_bits & BCSR_NRFD_BIT)
- status |= BusNRFD;
+ status |= BUS_NRFD;
if (bcsr_bits & BCSR_NDAC_BIT)
- status |= BusNDAC;
+ status |= BUS_NDAC;
if (bcsr_bits & BCSR_DAV_BIT)
- status |= BusDAV;
+ status |= BUS_DAV;
if (bcsr_bits & BCSR_ATN_BIT)
- status |= BusATN;
+ status |= BUS_ATN;
return status;
}
-unsigned int tnt4882_t1_delay(gpib_board_t *board, unsigned int nano_sec)
+static int tnt4882_t1_delay(struct gpib_board *board, unsigned int nano_sec)
{
struct tnt4882_priv *tnt_priv = board->private_data;
struct nec7210_priv *nec_priv = &tnt_priv->nec7210_priv;
@@ -291,7 +251,7 @@ static int drain_fifo_words(struct tnt4882_priv *tnt_priv, uint8_t *buffer, int
return count;
}
-static void tnt4882_release_holdoff(gpib_board_t *board, struct tnt4882_priv *tnt_priv)
+static void tnt4882_release_holdoff(struct gpib_board *board, struct tnt4882_priv *tnt_priv)
{
struct nec7210_priv *nec_priv = &tnt_priv->nec7210_priv;
unsigned short sasr_bits;
@@ -314,8 +274,8 @@ static void tnt4882_release_holdoff(gpib_board_t *board, struct tnt4882_priv *tn
}
}
-int tnt4882_accel_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end,
- size_t *bytes_read)
+static int tnt4882_accel_read(struct gpib_board *board, uint8_t *buffer, size_t length, int *end,
+ size_t *bytes_read)
{
size_t count = 0;
ssize_t retval = 0;
@@ -368,22 +328,18 @@ int tnt4882_accel_read(gpib_board_t *board, uint8_t *buffer, size_t length, int
test_bit(DEV_CLEAR_BN, &nec_priv->state) ||
test_bit(ADR_CHANGE_BN, &nec_priv->state) ||
test_bit(TIMO_NUM, &board->status))) {
- pr_err("tnt4882: read interrupted\n");
retval = -ERESTARTSYS;
break;
}
if (test_bit(TIMO_NUM, &board->status)) {
- //pr_info("tnt4882: minor %i read timed out\n", board->minor);
retval = -ETIMEDOUT;
break;
}
if (test_bit(DEV_CLEAR_BN, &nec_priv->state)) {
- pr_err("tnt4882: device clear interrupted read\n");
retval = -EINTR;
break;
}
if (test_bit(ADR_CHANGE_BN, &nec_priv->state)) {
- pr_err("tnt4882: address change interrupted read\n");
retval = -EINTR;
break;
}
@@ -410,20 +366,14 @@ int tnt4882_accel_read(gpib_board_t *board, uint8_t *buffer, size_t length, int
test_bit(DEV_CLEAR_BN, &nec_priv->state) ||
test_bit(ADR_CHANGE_BN, &nec_priv->state) ||
test_bit(TIMO_NUM, &board->status))) {
- pr_err("tnt4882: read interrupted\n");
retval = -ERESTARTSYS;
}
if (test_bit(TIMO_NUM, &board->status))
- //pr_info("tnt4882: read timed out\n");
retval = -ETIMEDOUT;
- if (test_bit(DEV_CLEAR_BN, &nec_priv->state)) {
- pr_err("tnt4882: device clear interrupted read\n");
+ if (test_bit(DEV_CLEAR_BN, &nec_priv->state))
retval = -EINTR;
- }
- if (test_bit(ADR_CHANGE_BN, &nec_priv->state)) {
- pr_err("tnt4882: address change interrupted read\n");
+ if (test_bit(ADR_CHANGE_BN, &nec_priv->state))
retval = -EINTR;
- }
count += drain_fifo_words(tnt_priv, &buffer[count], length - count);
if (fifo_byte_available(tnt_priv) && count < length)
buffer[count++] = tnt_readb(tnt_priv, FIFOB);
@@ -476,7 +426,7 @@ static unsigned int tnt_transfer_count(struct tnt4882_priv *tnt_priv)
return -count;
};
-static int write_wait(gpib_board_t *board, struct tnt4882_priv *tnt_priv,
+static int write_wait(struct gpib_board *board, struct tnt4882_priv *tnt_priv,
int wait_for_done, int send_commands)
{
struct nec7210_priv *nec_priv = &tnt_priv->nec7210_priv;
@@ -486,26 +436,19 @@ static int write_wait(gpib_board_t *board, struct tnt4882_priv *tnt_priv,
fifo_xfer_done(tnt_priv) ||
test_bit(BUS_ERROR_BN, &nec_priv->state) ||
test_bit(DEV_CLEAR_BN, &nec_priv->state) ||
- test_bit(TIMO_NUM, &board->status))) {
- dev_dbg(board->gpib_dev, "gpib write interrupted\n");
+ test_bit(TIMO_NUM, &board->status)))
return -ERESTARTSYS;
- }
- if (test_bit(TIMO_NUM, &board->status)) {
- pr_info("tnt4882: write timed out\n");
+
+ if (test_bit(TIMO_NUM, &board->status))
return -ETIMEDOUT;
- }
- if (test_and_clear_bit(BUS_ERROR_BN, &nec_priv->state)) {
- pr_err("tnt4882: write bus error\n");
+ if (test_and_clear_bit(BUS_ERROR_BN, &nec_priv->state))
return (send_commands) ? -ENOTCONN : -ECOMM;
- }
- if (test_bit(DEV_CLEAR_BN, &nec_priv->state)) {
- pr_err("tnt4882: device clear interrupted write\n");
+ if (test_bit(DEV_CLEAR_BN, &nec_priv->state))
return -EINTR;
- }
return 0;
}
-static int generic_write(gpib_board_t *board, uint8_t *buffer, size_t length,
+static int generic_write(struct gpib_board *board, uint8_t *buffer, size_t length,
int send_eoi, int send_commands, size_t *bytes_written)
{
size_t count = 0;
@@ -596,18 +539,19 @@ static int generic_write(gpib_board_t *board, uint8_t *buffer, size_t length,
return retval;
}
-int tnt4882_accel_write(gpib_board_t *board, uint8_t *buffer, size_t length, int send_eoi,
- size_t *bytes_written)
+static int tnt4882_accel_write(struct gpib_board *board, uint8_t *buffer, size_t length, int send_eoi,
+ size_t *bytes_written)
{
return generic_write(board, buffer, length, send_eoi, 0, bytes_written);
}
-int tnt4882_command(gpib_board_t *board, uint8_t *buffer, size_t length, size_t *bytes_written)
+static int tnt4882_command(struct gpib_board *board, uint8_t *buffer, size_t length,
+ size_t *bytes_written)
{
return generic_write(board, buffer, length, 0, 1, bytes_written);
}
-irqreturn_t tnt4882_internal_interrupt(gpib_board_t *board)
+static irqreturn_t tnt4882_internal_interrupt(struct gpib_board *board)
{
struct tnt4882_priv *priv = board->private_data;
int isr0_bits, isr3_bits, imr3_bits;
@@ -633,7 +577,7 @@ irqreturn_t tnt4882_internal_interrupt(gpib_board_t *board)
if (isr3_bits & HR_DONE)
priv->imr3_bits &= ~HR_DONE;
if (isr3_bits & (HR_INTR | HR_TLCI)) {
- dev_dbg(board->gpib_dev, "tnt4882: minor %i isr0 0x%x imr0 0x%x isr3 0x%x imr3 0x%x\n",
+ dev_dbg(board->gpib_dev, "minor %i isr0 0x%x imr0 0x%x isr3 0x%x imr3 0x%x\n",
board->minor, isr0_bits, priv->imr0_bits, isr3_bits, imr3_bits);
tnt_writeb(priv, priv->imr3_bits, IMR3);
wake_up_interruptible(&board->wait);
@@ -642,28 +586,14 @@ irqreturn_t tnt4882_internal_interrupt(gpib_board_t *board)
return IRQ_HANDLED;
}
-irqreturn_t tnt4882_interrupt(int irq, void *arg)
+static irqreturn_t tnt4882_interrupt(int irq, void *arg)
{
return tnt4882_internal_interrupt(arg);
}
-static int ni_tnt_isa_attach(gpib_board_t *board, const gpib_board_config_t *config);
-static int ni_nat4882_isa_attach(gpib_board_t *board, const gpib_board_config_t *config);
-static int ni_nec_isa_attach(gpib_board_t *board, const gpib_board_config_t *config);
-static int ni_pci_attach(gpib_board_t *board, const gpib_board_config_t *config);
-
-static void ni_isa_detach(gpib_board_t *board);
-static void ni_pci_detach(gpib_board_t *board);
-
-#ifdef GPIB_PCMCIA
-static int ni_pcmcia_attach(gpib_board_t *board, const gpib_board_config_t *config);
-static void ni_pcmcia_detach(gpib_board_t *board);
-static int init_ni_gpib_cs(void);
-static void __exit exit_ni_gpib_cs(void);
-#endif
-
// wrappers for interface functions
-int tnt4882_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end, size_t *bytes_read)
+static int tnt4882_read(struct gpib_board *board, uint8_t *buffer, size_t length, int *end,
+ size_t *bytes_read)
{
struct tnt4882_priv *priv = board->private_data;
struct nec7210_priv *nec_priv = &priv->nec7210_priv;
@@ -682,37 +612,37 @@ int tnt4882_read(gpib_board_t *board, uint8_t *buffer, size_t length, int *end,
return retval;
}
-int tnt4882_write(gpib_board_t *board, uint8_t *buffer, size_t length, int send_eoi,
- size_t *bytes_written)
+static int tnt4882_write(struct gpib_board *board, uint8_t *buffer, size_t length, int send_eoi,
+ size_t *bytes_written)
{
struct tnt4882_priv *priv = board->private_data;
return nec7210_write(board, &priv->nec7210_priv, buffer, length, send_eoi, bytes_written);
}
-int tnt4882_command_unaccel(gpib_board_t *board, uint8_t *buffer,
- size_t length, size_t *bytes_written)
+static int tnt4882_command_unaccel(struct gpib_board *board, uint8_t *buffer,
+ size_t length, size_t *bytes_written)
{
struct tnt4882_priv *priv = board->private_data;
return nec7210_command(board, &priv->nec7210_priv, buffer, length, bytes_written);
}
-int tnt4882_take_control(gpib_board_t *board, int synchronous)
+static int tnt4882_take_control(struct gpib_board *board, int synchronous)
{
struct tnt4882_priv *priv = board->private_data;
return nec7210_take_control(board, &priv->nec7210_priv, synchronous);
}
-int tnt4882_go_to_standby(gpib_board_t *board)
+static int tnt4882_go_to_standby(struct gpib_board *board)
{
struct tnt4882_priv *priv = board->private_data;
return nec7210_go_to_standby(board, &priv->nec7210_priv);
}
-void tnt4882_request_system_control(gpib_board_t *board, int request_control)
+static void tnt4882_request_system_control(struct gpib_board *board, int request_control)
{
struct tnt4882_priv *priv = board->private_data;
@@ -727,44 +657,43 @@ void tnt4882_request_system_control(gpib_board_t *board, int request_control)
}
}
-void tnt4882_interface_clear(gpib_board_t *board, int assert)
+static void tnt4882_interface_clear(struct gpib_board *board, int assert)
{
struct tnt4882_priv *priv = board->private_data;
nec7210_interface_clear(board, &priv->nec7210_priv, assert);
}
-void tnt4882_remote_enable(gpib_board_t *board, int enable)
+static void tnt4882_remote_enable(struct gpib_board *board, int enable)
{
struct tnt4882_priv *priv = board->private_data;
nec7210_remote_enable(board, &priv->nec7210_priv, enable);
}
-int tnt4882_enable_eos(gpib_board_t *board, uint8_t eos_byte, int compare_8_bits)
+static int tnt4882_enable_eos(struct gpib_board *board, uint8_t eos_byte, int compare_8_bits)
{
struct tnt4882_priv *priv = board->private_data;
return nec7210_enable_eos(board, &priv->nec7210_priv, eos_byte, compare_8_bits);
}
-void tnt4882_disable_eos(gpib_board_t *board)
+static void tnt4882_disable_eos(struct gpib_board *board)
{
struct tnt4882_priv *priv = board->private_data;
nec7210_disable_eos(board, &priv->nec7210_priv);
}
-unsigned int tnt4882_update_status(gpib_board_t *board, unsigned int clear_mask)
+static unsigned int tnt4882_update_status(struct gpib_board *board, unsigned int clear_mask)
{
unsigned long flags;
u8 line_status;
- unsigned int retval;
struct tnt4882_priv *priv = board->private_data;
spin_lock_irqsave(&board->spinlock, flags);
board->status &= ~clear_mask;
- retval = nec7210_update_status_nolock(board, &priv->nec7210_priv);
+ nec7210_update_status_nolock(board, &priv->nec7210_priv);
/* set / clear SRQ state since it is not cleared by interrupt */
line_status = tnt_readb(priv, BSR);
if (line_status & BCSR_SRQ_BIT)
@@ -775,22 +704,21 @@ unsigned int tnt4882_update_status(gpib_board_t *board, unsigned int clear_mask)
return board->status;
}
-int tnt4882_primary_address(gpib_board_t *board, unsigned int address)
+static int tnt4882_primary_address(struct gpib_board *board, unsigned int address)
{
struct tnt4882_priv *priv = board->private_data;
return nec7210_primary_address(board, &priv->nec7210_priv, address);
}
-int tnt4882_secondary_address(gpib_board_t *board, unsigned int address, int enable)
+static int tnt4882_secondary_address(struct gpib_board *board, unsigned int address, int enable)
{
struct tnt4882_priv *priv = board->private_data;
return nec7210_secondary_address(board, &priv->nec7210_priv, address, enable);
}
-int tnt4882_parallel_poll(gpib_board_t *board, uint8_t *result)
-
+static int tnt4882_parallel_poll(struct gpib_board *board, uint8_t *result)
{
struct tnt4882_priv *tnt_priv = board->private_data;
@@ -807,7 +735,7 @@ int tnt4882_parallel_poll(gpib_board_t *board, uint8_t *result)
}
}
-void tnt4882_parallel_poll_configure(gpib_board_t *board, uint8_t config)
+static void tnt4882_parallel_poll_configure(struct gpib_board *board, uint8_t config)
{
struct tnt4882_priv *priv = board->private_data;
@@ -825,7 +753,7 @@ void tnt4882_parallel_poll_configure(gpib_board_t *board, uint8_t config)
}
}
-void tnt4882_parallel_poll_response(gpib_board_t *board, int ist)
+static void tnt4882_parallel_poll_response(struct gpib_board *board, int ist)
{
struct tnt4882_priv *priv = board->private_data;
@@ -835,14 +763,14 @@ void tnt4882_parallel_poll_response(gpib_board_t *board, int ist)
/* this is just used by the old nec7210 isa interfaces, the newer
* boards use tnt4882_serial_poll_response2
*/
-void tnt4882_serial_poll_response(gpib_board_t *board, uint8_t status)
+static void tnt4882_serial_poll_response(struct gpib_board *board, uint8_t status)
{
struct tnt4882_priv *priv = board->private_data;
nec7210_serial_poll_response(board, &priv->nec7210_priv, status);
}
-static void tnt4882_serial_poll_response2(gpib_board_t *board, uint8_t status,
+static void tnt4882_serial_poll_response2(struct gpib_board *board, uint8_t status,
int new_reason_for_service)
{
struct tnt4882_priv *priv = board->private_data;
@@ -876,303 +804,21 @@ static void tnt4882_serial_poll_response2(gpib_board_t *board, uint8_t status,
spin_unlock_irqrestore(&board->spinlock, flags);
}
-uint8_t tnt4882_serial_poll_status(gpib_board_t *board)
+static uint8_t tnt4882_serial_poll_status(struct gpib_board *board)
{
struct tnt4882_priv *priv = board->private_data;
return nec7210_serial_poll_status(board, &priv->nec7210_priv);
}
-void tnt4882_return_to_local(gpib_board_t *board)
+static void tnt4882_return_to_local(struct gpib_board *board)
{
struct tnt4882_priv *priv = board->private_data;
nec7210_return_to_local(board, &priv->nec7210_priv);
}
-static gpib_interface_t ni_pci_interface = {
- .name = "ni_pci",
- .attach = ni_pci_attach,
- .detach = ni_pci_detach,
- .read = tnt4882_accel_read,
- .write = tnt4882_accel_write,
- .command = tnt4882_command,
- .take_control = tnt4882_take_control,
- .go_to_standby = tnt4882_go_to_standby,
- .request_system_control = tnt4882_request_system_control,
- .interface_clear = tnt4882_interface_clear,
- .remote_enable = tnt4882_remote_enable,
- .enable_eos = tnt4882_enable_eos,
- .disable_eos = tnt4882_disable_eos,
- .parallel_poll = tnt4882_parallel_poll,
- .parallel_poll_configure = tnt4882_parallel_poll_configure,
- .parallel_poll_response = tnt4882_parallel_poll_response,
- .local_parallel_poll_mode = NULL, // XXX
- .line_status = tnt4882_line_status,
- .update_status = tnt4882_update_status,
- .primary_address = tnt4882_primary_address,
- .secondary_address = tnt4882_secondary_address,
- .serial_poll_response2 = tnt4882_serial_poll_response2,
- .serial_poll_status = tnt4882_serial_poll_status,
- .t1_delay = tnt4882_t1_delay,
- .return_to_local = tnt4882_return_to_local,
-};
-
-static gpib_interface_t ni_pci_accel_interface = {
- .name = "ni_pci_accel",
- .attach = ni_pci_attach,
- .detach = ni_pci_detach,
- .read = tnt4882_accel_read,
- .write = tnt4882_accel_write,
- .command = tnt4882_command,
- .take_control = tnt4882_take_control,
- .go_to_standby = tnt4882_go_to_standby,
- .request_system_control = tnt4882_request_system_control,
- .interface_clear = tnt4882_interface_clear,
- .remote_enable = tnt4882_remote_enable,
- .enable_eos = tnt4882_enable_eos,
- .disable_eos = tnt4882_disable_eos,
- .parallel_poll = tnt4882_parallel_poll,
- .parallel_poll_configure = tnt4882_parallel_poll_configure,
- .parallel_poll_response = tnt4882_parallel_poll_response,
- .local_parallel_poll_mode = NULL, // XXX
- .line_status = tnt4882_line_status,
- .update_status = tnt4882_update_status,
- .primary_address = tnt4882_primary_address,
- .secondary_address = tnt4882_secondary_address,
- .serial_poll_response2 = tnt4882_serial_poll_response2,
- .serial_poll_status = tnt4882_serial_poll_status,
- .t1_delay = tnt4882_t1_delay,
- .return_to_local = tnt4882_return_to_local,
-};
-
-static gpib_interface_t ni_isa_interface = {
- .name = "ni_isa",
- .attach = ni_tnt_isa_attach,
- .detach = ni_isa_detach,
- .read = tnt4882_accel_read,
- .write = tnt4882_accel_write,
- .command = tnt4882_command,
- .take_control = tnt4882_take_control,
- .go_to_standby = tnt4882_go_to_standby,
- .request_system_control = tnt4882_request_system_control,
- .interface_clear = tnt4882_interface_clear,
- .remote_enable = tnt4882_remote_enable,
- .enable_eos = tnt4882_enable_eos,
- .disable_eos = tnt4882_disable_eos,
- .parallel_poll = tnt4882_parallel_poll,
- .parallel_poll_configure = tnt4882_parallel_poll_configure,
- .parallel_poll_response = tnt4882_parallel_poll_response,
- .local_parallel_poll_mode = NULL, // XXX
- .line_status = tnt4882_line_status,
- .update_status = tnt4882_update_status,
- .primary_address = tnt4882_primary_address,
- .secondary_address = tnt4882_secondary_address,
- .serial_poll_response2 = tnt4882_serial_poll_response2,
- .serial_poll_status = tnt4882_serial_poll_status,
- .t1_delay = tnt4882_t1_delay,
- .return_to_local = tnt4882_return_to_local,
-};
-
-static gpib_interface_t ni_nat4882_isa_interface = {
- .name = "ni_nat4882_isa",
- .attach = ni_nat4882_isa_attach,
- .detach = ni_isa_detach,
- .read = tnt4882_read,
- .write = tnt4882_write,
- .command = tnt4882_command_unaccel,
- .take_control = tnt4882_take_control,
- .go_to_standby = tnt4882_go_to_standby,
- .request_system_control = tnt4882_request_system_control,
- .interface_clear = tnt4882_interface_clear,
- .remote_enable = tnt4882_remote_enable,
- .enable_eos = tnt4882_enable_eos,
- .disable_eos = tnt4882_disable_eos,
- .parallel_poll = tnt4882_parallel_poll,
- .parallel_poll_configure = tnt4882_parallel_poll_configure,
- .parallel_poll_response = tnt4882_parallel_poll_response,
- .local_parallel_poll_mode = NULL, // XXX
- .line_status = tnt4882_line_status,
- .update_status = tnt4882_update_status,
- .primary_address = tnt4882_primary_address,
- .secondary_address = tnt4882_secondary_address,
- .serial_poll_response2 = tnt4882_serial_poll_response2,
- .serial_poll_status = tnt4882_serial_poll_status,
- .t1_delay = tnt4882_t1_delay,
- .return_to_local = tnt4882_return_to_local,
-};
-
-static gpib_interface_t ni_nec_isa_interface = {
- .name = "ni_nec_isa",
- .attach = ni_nec_isa_attach,
- .detach = ni_isa_detach,
- .read = tnt4882_read,
- .write = tnt4882_write,
- .command = tnt4882_command_unaccel,
- .take_control = tnt4882_take_control,
- .go_to_standby = tnt4882_go_to_standby,
- .request_system_control = tnt4882_request_system_control,
- .interface_clear = tnt4882_interface_clear,
- .remote_enable = tnt4882_remote_enable,
- .enable_eos = tnt4882_enable_eos,
- .disable_eos = tnt4882_disable_eos,
- .parallel_poll = tnt4882_parallel_poll,
- .parallel_poll_configure = tnt4882_parallel_poll_configure,
- .parallel_poll_response = tnt4882_parallel_poll_response,
- .local_parallel_poll_mode = NULL, // XXX
- .line_status = NULL,
- .update_status = tnt4882_update_status,
- .primary_address = tnt4882_primary_address,
- .secondary_address = tnt4882_secondary_address,
- .serial_poll_response = tnt4882_serial_poll_response,
- .serial_poll_status = tnt4882_serial_poll_status,
- .t1_delay = tnt4882_t1_delay,
- .return_to_local = tnt4882_return_to_local,
-};
-
-static gpib_interface_t ni_isa_accel_interface = {
- .name = "ni_isa_accel",
- .attach = ni_tnt_isa_attach,
- .detach = ni_isa_detach,
- .read = tnt4882_accel_read,
- .write = tnt4882_accel_write,
- .command = tnt4882_command,
- .take_control = tnt4882_take_control,
- .go_to_standby = tnt4882_go_to_standby,
- .request_system_control = tnt4882_request_system_control,
- .interface_clear = tnt4882_interface_clear,
- .remote_enable = tnt4882_remote_enable,
- .enable_eos = tnt4882_enable_eos,
- .disable_eos = tnt4882_disable_eos,
- .parallel_poll = tnt4882_parallel_poll,
- .parallel_poll_configure = tnt4882_parallel_poll_configure,
- .parallel_poll_response = tnt4882_parallel_poll_response,
- .local_parallel_poll_mode = NULL, // XXX
- .line_status = tnt4882_line_status,
- .update_status = tnt4882_update_status,
- .primary_address = tnt4882_primary_address,
- .secondary_address = tnt4882_secondary_address,
- .serial_poll_response2 = tnt4882_serial_poll_response2,
- .serial_poll_status = tnt4882_serial_poll_status,
- .t1_delay = tnt4882_t1_delay,
- .return_to_local = tnt4882_return_to_local,
-};
-
-static gpib_interface_t ni_nat4882_isa_accel_interface = {
- .name = "ni_nat4882_isa_accel",
- .attach = ni_nat4882_isa_attach,
- .detach = ni_isa_detach,
- .read = tnt4882_accel_read,
- .write = tnt4882_accel_write,
- .command = tnt4882_command_unaccel,
- .take_control = tnt4882_take_control,
- .go_to_standby = tnt4882_go_to_standby,
- .request_system_control = tnt4882_request_system_control,
- .interface_clear = tnt4882_interface_clear,
- .remote_enable = tnt4882_remote_enable,
- .enable_eos = tnt4882_enable_eos,
- .disable_eos = tnt4882_disable_eos,
- .parallel_poll = tnt4882_parallel_poll,
- .parallel_poll_configure = tnt4882_parallel_poll_configure,
- .parallel_poll_response = tnt4882_parallel_poll_response,
- .local_parallel_poll_mode = NULL, // XXX
- .line_status = tnt4882_line_status,
- .update_status = tnt4882_update_status,
- .primary_address = tnt4882_primary_address,
- .secondary_address = tnt4882_secondary_address,
- .serial_poll_response2 = tnt4882_serial_poll_response2,
- .serial_poll_status = tnt4882_serial_poll_status,
- .t1_delay = tnt4882_t1_delay,
- .return_to_local = tnt4882_return_to_local,
-};
-
-static gpib_interface_t ni_nec_isa_accel_interface = {
- .name = "ni_nec_isa_accel",
- .attach = ni_nec_isa_attach,
- .detach = ni_isa_detach,
- .read = tnt4882_accel_read,
- .write = tnt4882_accel_write,
- .command = tnt4882_command_unaccel,
- .take_control = tnt4882_take_control,
- .go_to_standby = tnt4882_go_to_standby,
- .request_system_control = tnt4882_request_system_control,
- .interface_clear = tnt4882_interface_clear,
- .remote_enable = tnt4882_remote_enable,
- .enable_eos = tnt4882_enable_eos,
- .disable_eos = tnt4882_disable_eos,
- .parallel_poll = tnt4882_parallel_poll,
- .parallel_poll_configure = tnt4882_parallel_poll_configure,
- .parallel_poll_response = tnt4882_parallel_poll_response,
- .local_parallel_poll_mode = NULL, // XXX
- .line_status = NULL,
- .update_status = tnt4882_update_status,
- .primary_address = tnt4882_primary_address,
- .secondary_address = tnt4882_secondary_address,
- .serial_poll_response = tnt4882_serial_poll_response,
- .serial_poll_status = tnt4882_serial_poll_status,
- .t1_delay = tnt4882_t1_delay,
- .return_to_local = tnt4882_return_to_local,
-};
-
-#ifdef GPIB_PCMCIA
-static gpib_interface_t ni_pcmcia_interface = {
- .name = "ni_pcmcia",
- .attach = ni_pcmcia_attach,
- .detach = ni_pcmcia_detach,
- .read = tnt4882_accel_read,
- .write = tnt4882_accel_write,
- .command = tnt4882_command,
- .take_control = tnt4882_take_control,
- .go_to_standby = tnt4882_go_to_standby,
- .request_system_control = tnt4882_request_system_control,
- .interface_clear = tnt4882_interface_clear,
- .remote_enable = tnt4882_remote_enable,
- .enable_eos = tnt4882_enable_eos,
- .disable_eos = tnt4882_disable_eos,
- .parallel_poll = tnt4882_parallel_poll,
- .parallel_poll_configure = tnt4882_parallel_poll_configure,
- .parallel_poll_response = tnt4882_parallel_poll_response,
- .local_parallel_poll_mode = NULL, // XXX
- .line_status = tnt4882_line_status,
- .update_status = tnt4882_update_status,
- .primary_address = tnt4882_primary_address,
- .secondary_address = tnt4882_secondary_address,
- .serial_poll_response = tnt4882_serial_poll_response,
- .serial_poll_status = tnt4882_serial_poll_status,
- .t1_delay = tnt4882_t1_delay,
- .return_to_local = tnt4882_return_to_local,
-};
-
-static gpib_interface_t ni_pcmcia_accel_interface = {
- .name = "ni_pcmcia_accel",
- .attach = ni_pcmcia_attach,
- .detach = ni_pcmcia_detach,
- .read = tnt4882_accel_read,
- .write = tnt4882_accel_write,
- .command = tnt4882_command,
- .take_control = tnt4882_take_control,
- .go_to_standby = tnt4882_go_to_standby,
- .request_system_control = tnt4882_request_system_control,
- .interface_clear = tnt4882_interface_clear,
- .remote_enable = tnt4882_remote_enable,
- .enable_eos = tnt4882_enable_eos,
- .disable_eos = tnt4882_disable_eos,
- .parallel_poll = tnt4882_parallel_poll,
- .parallel_poll_configure = tnt4882_parallel_poll_configure,
- .parallel_poll_response = tnt4882_parallel_poll_response,
- .local_parallel_poll_mode = NULL, // XXX
- .line_status = tnt4882_line_status,
- .update_status = tnt4882_update_status,
- .primary_address = tnt4882_primary_address,
- .secondary_address = tnt4882_secondary_address,
- .serial_poll_response = tnt4882_serial_poll_response,
- .serial_poll_status = tnt4882_serial_poll_status,
- .t1_delay = tnt4882_t1_delay,
- .return_to_local = tnt4882_return_to_local,
-};
-#endif
-
-void tnt4882_board_reset(struct tnt4882_priv *tnt_priv, gpib_board_t *board)
+static void tnt4882_board_reset(struct tnt4882_priv *tnt_priv, struct gpib_board *board)
{
struct nec7210_priv *nec_priv = &tnt_priv->nec7210_priv;
@@ -1185,7 +831,7 @@ void tnt4882_board_reset(struct tnt4882_priv *tnt_priv, gpib_board_t *board)
nec7210_board_reset(nec_priv, board);
}
-int tnt4882_allocate_private(gpib_board_t *board)
+static int tnt4882_allocate_private(struct gpib_board *board)
{
struct tnt4882_priv *tnt_priv;
@@ -1198,13 +844,13 @@ int tnt4882_allocate_private(gpib_board_t *board)
return 0;
}
-void tnt4882_free_private(gpib_board_t *board)
+static void tnt4882_free_private(struct gpib_board *board)
{
kfree(board->private_data);
board->private_data = NULL;
}
-void tnt4882_init(struct tnt4882_priv *tnt_priv, const gpib_board_t *board)
+static void tnt4882_init(struct tnt4882_priv *tnt_priv, const struct gpib_board *board)
{
struct nec7210_priv *nec_priv = &tnt_priv->nec7210_priv;
@@ -1252,7 +898,7 @@ void tnt4882_init(struct tnt4882_priv *tnt_priv, const gpib_board_t *board)
tnt_writeb(tnt_priv, tnt_priv->imr0_bits, IMR0);
}
-int ni_pci_attach(gpib_board_t *board, const gpib_board_config_t *config)
+static int ni_pci_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
struct tnt4882_priv *tnt_priv;
struct nec7210_priv *nec_priv;
@@ -1271,10 +917,8 @@ int ni_pci_attach(gpib_board_t *board, const gpib_board_config_t *config)
nec_priv->write_byte = nec7210_locking_iomem_write_byte;
nec_priv->offset = atgpib_reg_offset;
- if (!mite_devices) {
- pr_err("no National Instruments PCI boards found\n");
- return -1;
- }
+ if (!mite_devices)
+ return -ENODEV;
for (mite = mite_devices; mite; mite = mite->next) {
short found_board;
@@ -1305,37 +949,32 @@ int ni_pci_attach(gpib_board_t *board, const gpib_board_config_t *config)
if (found_board)
break;
}
- if (!mite) {
- pr_err("no NI PCI-GPIB boards found\n");
- return -1;
- }
+ if (!mite)
+ return -ENODEV;
+
tnt_priv->mite = mite;
retval = mite_setup(tnt_priv->mite);
- if (retval < 0) {
- pr_err("tnt4882: error setting up mite.\n");
+ if (retval < 0)
return retval;
- }
nec_priv->mmiobase = tnt_priv->mite->daq_io_addr;
// get irq
- if (request_irq(mite_irq(tnt_priv->mite), tnt4882_interrupt, isr_flags,
- "ni-pci-gpib", board)) {
- pr_err("gpib: can't request IRQ %d\n", mite_irq(tnt_priv->mite));
- return -1;
+ retval = request_irq(mite_irq(tnt_priv->mite), tnt4882_interrupt, isr_flags, "ni-pci-gpib",
+ board);
+ if (retval) {
+ dev_err(board->gpib_dev, "failed to obtain pci irq %d\n", mite_irq(tnt_priv->mite));
+ return retval;
}
tnt_priv->irq = mite_irq(tnt_priv->mite);
- pr_info("tnt4882: irq %i\n", tnt_priv->irq);
// TNT5004 detection
switch (tnt_readb(tnt_priv, CSR) & 0xf0) {
case 0x30:
nec_priv->type = TNT4882;
- pr_info("tnt4882: TNT4882 chipset detected\n");
break;
case 0x40:
nec_priv->type = TNT5004;
- pr_info("tnt4882: TNT5004 chipset detected\n");
break;
}
tnt4882_init(tnt_priv, board);
@@ -1343,7 +982,7 @@ int ni_pci_attach(gpib_board_t *board, const gpib_board_config_t *config)
return 0;
}
-void ni_pci_detach(gpib_board_t *board)
+static void ni_pci_detach(struct gpib_board *board)
{
struct tnt4882_priv *tnt_priv = board->private_data;
struct nec7210_priv *nec_priv;
@@ -1365,28 +1004,22 @@ static int ni_isapnp_find(struct pnp_dev **dev)
{
*dev = pnp_find_dev(NULL, ISAPNP_VENDOR_ID_NI,
ISAPNP_FUNCTION(ISAPNP_ID_NI_ATGPIB_TNT), NULL);
- if (!*dev || !(*dev)->card) {
- pr_err("tnt4882: failed to find isapnp board\n");
+ if (!*dev || !(*dev)->card)
return -ENODEV;
- }
- if (pnp_device_attach(*dev) < 0) {
- pr_err("tnt4882: atgpib/tnt board already active, skipping\n");
+ if (pnp_device_attach(*dev) < 0)
return -EBUSY;
- }
if (pnp_activate_dev(*dev) < 0) {
pnp_device_detach(*dev);
- pr_err("tnt4882: failed to activate() atgpib/tnt, aborting\n");
return -EAGAIN;
}
if (!pnp_port_valid(*dev, 0) || !pnp_irq_valid(*dev, 0)) {
pnp_device_detach(*dev);
- pr_err("tnt4882: invalid port or irq for atgpib/tnt, aborting\n");
- return -ENOMEM;
+ return -EINVAL;
}
return 0;
}
-static int ni_isa_attach_common(gpib_board_t *board, const gpib_board_config_t *config,
+static int ni_isa_attach_common(struct gpib_board *board, const gpib_board_config_t *config,
enum nec7210_chipset chipset)
{
struct tnt4882_priv *tnt_priv;
@@ -1394,6 +1027,7 @@ static int ni_isa_attach_common(gpib_board_t *board, const gpib_board_config_t *
int isr_flags = 0;
u32 iobase;
int irq;
+ int retval;
board->status = 0;
@@ -1409,7 +1043,6 @@ static int ni_isa_attach_common(gpib_board_t *board, const gpib_board_config_t *
// look for plug-n-play board
if (config->ibbase == 0) {
struct pnp_dev *dev;
- int retval;
retval = ni_isapnp_find(&dev);
if (retval < 0)
@@ -1422,18 +1055,18 @@ static int ni_isa_attach_common(gpib_board_t *board, const gpib_board_config_t *
irq = config->ibirq;
}
// allocate ioports
- if (!request_region(iobase, atgpib_iosize, "atgpib")) {
- pr_err("tnt4882: failed to allocate ioports\n");
- return -1;
- }
+ if (!request_region(iobase, atgpib_iosize, "atgpib"))
+ return -EBUSY;
+
nec_priv->mmiobase = ioport_map(iobase, atgpib_iosize);
if (!nec_priv->mmiobase)
- return -1;
+ return -EBUSY;
// get irq
- if (request_irq(irq, tnt4882_interrupt, isr_flags, "atgpib", board)) {
- pr_err("gpib: can't request IRQ %d\n", irq);
- return -1;
+ retval = request_irq(irq, tnt4882_interrupt, isr_flags, "atgpib", board);
+ if (retval) {
+ dev_err(board->gpib_dev, "failed to request ISA irq %d\n", irq);
+ return retval;
}
tnt_priv->irq = irq;
@@ -1442,22 +1075,22 @@ static int ni_isa_attach_common(gpib_board_t *board, const gpib_board_config_t *
return 0;
}
-int ni_tnt_isa_attach(gpib_board_t *board, const gpib_board_config_t *config)
+static int ni_tnt_isa_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
return ni_isa_attach_common(board, config, TNT4882);
}
-int ni_nat4882_isa_attach(gpib_board_t *board, const gpib_board_config_t *config)
+static int ni_nat4882_isa_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
return ni_isa_attach_common(board, config, NAT4882);
}
-int ni_nec_isa_attach(gpib_board_t *board, const gpib_board_config_t *config)
+static int ni_nec_isa_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
return ni_isa_attach_common(board, config, NEC7210);
}
-void ni_isa_detach(gpib_board_t *board)
+static void ni_isa_detach(struct gpib_board *board)
{
struct tnt4882_priv *tnt_priv = board->private_data;
struct nec7210_priv *nec_priv;
@@ -1483,6 +1116,230 @@ static int tnt4882_pci_probe(struct pci_dev *dev, const struct pci_device_id *id
return 0;
}
+static gpib_interface_t ni_pci_interface = {
+ .name = "ni_pci",
+ .attach = ni_pci_attach,
+ .detach = ni_pci_detach,
+ .read = tnt4882_accel_read,
+ .write = tnt4882_accel_write,
+ .command = tnt4882_command,
+ .take_control = tnt4882_take_control,
+ .go_to_standby = tnt4882_go_to_standby,
+ .request_system_control = tnt4882_request_system_control,
+ .interface_clear = tnt4882_interface_clear,
+ .remote_enable = tnt4882_remote_enable,
+ .enable_eos = tnt4882_enable_eos,
+ .disable_eos = tnt4882_disable_eos,
+ .parallel_poll = tnt4882_parallel_poll,
+ .parallel_poll_configure = tnt4882_parallel_poll_configure,
+ .parallel_poll_response = tnt4882_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = tnt4882_line_status,
+ .update_status = tnt4882_update_status,
+ .primary_address = tnt4882_primary_address,
+ .secondary_address = tnt4882_secondary_address,
+ .serial_poll_response2 = tnt4882_serial_poll_response2,
+ .serial_poll_status = tnt4882_serial_poll_status,
+ .t1_delay = tnt4882_t1_delay,
+ .return_to_local = tnt4882_return_to_local,
+};
+
+static gpib_interface_t ni_pci_accel_interface = {
+ .name = "ni_pci_accel",
+ .attach = ni_pci_attach,
+ .detach = ni_pci_detach,
+ .read = tnt4882_accel_read,
+ .write = tnt4882_accel_write,
+ .command = tnt4882_command,
+ .take_control = tnt4882_take_control,
+ .go_to_standby = tnt4882_go_to_standby,
+ .request_system_control = tnt4882_request_system_control,
+ .interface_clear = tnt4882_interface_clear,
+ .remote_enable = tnt4882_remote_enable,
+ .enable_eos = tnt4882_enable_eos,
+ .disable_eos = tnt4882_disable_eos,
+ .parallel_poll = tnt4882_parallel_poll,
+ .parallel_poll_configure = tnt4882_parallel_poll_configure,
+ .parallel_poll_response = tnt4882_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = tnt4882_line_status,
+ .update_status = tnt4882_update_status,
+ .primary_address = tnt4882_primary_address,
+ .secondary_address = tnt4882_secondary_address,
+ .serial_poll_response2 = tnt4882_serial_poll_response2,
+ .serial_poll_status = tnt4882_serial_poll_status,
+ .t1_delay = tnt4882_t1_delay,
+ .return_to_local = tnt4882_return_to_local,
+};
+
+static gpib_interface_t ni_isa_interface = {
+ .name = "ni_isa",
+ .attach = ni_tnt_isa_attach,
+ .detach = ni_isa_detach,
+ .read = tnt4882_accel_read,
+ .write = tnt4882_accel_write,
+ .command = tnt4882_command,
+ .take_control = tnt4882_take_control,
+ .go_to_standby = tnt4882_go_to_standby,
+ .request_system_control = tnt4882_request_system_control,
+ .interface_clear = tnt4882_interface_clear,
+ .remote_enable = tnt4882_remote_enable,
+ .enable_eos = tnt4882_enable_eos,
+ .disable_eos = tnt4882_disable_eos,
+ .parallel_poll = tnt4882_parallel_poll,
+ .parallel_poll_configure = tnt4882_parallel_poll_configure,
+ .parallel_poll_response = tnt4882_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = tnt4882_line_status,
+ .update_status = tnt4882_update_status,
+ .primary_address = tnt4882_primary_address,
+ .secondary_address = tnt4882_secondary_address,
+ .serial_poll_response2 = tnt4882_serial_poll_response2,
+ .serial_poll_status = tnt4882_serial_poll_status,
+ .t1_delay = tnt4882_t1_delay,
+ .return_to_local = tnt4882_return_to_local,
+};
+
+static gpib_interface_t ni_nat4882_isa_interface = {
+ .name = "ni_nat4882_isa",
+ .attach = ni_nat4882_isa_attach,
+ .detach = ni_isa_detach,
+ .read = tnt4882_read,
+ .write = tnt4882_write,
+ .command = tnt4882_command_unaccel,
+ .take_control = tnt4882_take_control,
+ .go_to_standby = tnt4882_go_to_standby,
+ .request_system_control = tnt4882_request_system_control,
+ .interface_clear = tnt4882_interface_clear,
+ .remote_enable = tnt4882_remote_enable,
+ .enable_eos = tnt4882_enable_eos,
+ .disable_eos = tnt4882_disable_eos,
+ .parallel_poll = tnt4882_parallel_poll,
+ .parallel_poll_configure = tnt4882_parallel_poll_configure,
+ .parallel_poll_response = tnt4882_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = tnt4882_line_status,
+ .update_status = tnt4882_update_status,
+ .primary_address = tnt4882_primary_address,
+ .secondary_address = tnt4882_secondary_address,
+ .serial_poll_response2 = tnt4882_serial_poll_response2,
+ .serial_poll_status = tnt4882_serial_poll_status,
+ .t1_delay = tnt4882_t1_delay,
+ .return_to_local = tnt4882_return_to_local,
+};
+
+static gpib_interface_t ni_nec_isa_interface = {
+ .name = "ni_nec_isa",
+ .attach = ni_nec_isa_attach,
+ .detach = ni_isa_detach,
+ .read = tnt4882_read,
+ .write = tnt4882_write,
+ .command = tnt4882_command_unaccel,
+ .take_control = tnt4882_take_control,
+ .go_to_standby = tnt4882_go_to_standby,
+ .request_system_control = tnt4882_request_system_control,
+ .interface_clear = tnt4882_interface_clear,
+ .remote_enable = tnt4882_remote_enable,
+ .enable_eos = tnt4882_enable_eos,
+ .disable_eos = tnt4882_disable_eos,
+ .parallel_poll = tnt4882_parallel_poll,
+ .parallel_poll_configure = tnt4882_parallel_poll_configure,
+ .parallel_poll_response = tnt4882_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = NULL,
+ .update_status = tnt4882_update_status,
+ .primary_address = tnt4882_primary_address,
+ .secondary_address = tnt4882_secondary_address,
+ .serial_poll_response = tnt4882_serial_poll_response,
+ .serial_poll_status = tnt4882_serial_poll_status,
+ .t1_delay = tnt4882_t1_delay,
+ .return_to_local = tnt4882_return_to_local,
+};
+
+static gpib_interface_t ni_isa_accel_interface = {
+ .name = "ni_isa_accel",
+ .attach = ni_tnt_isa_attach,
+ .detach = ni_isa_detach,
+ .read = tnt4882_accel_read,
+ .write = tnt4882_accel_write,
+ .command = tnt4882_command,
+ .take_control = tnt4882_take_control,
+ .go_to_standby = tnt4882_go_to_standby,
+ .request_system_control = tnt4882_request_system_control,
+ .interface_clear = tnt4882_interface_clear,
+ .remote_enable = tnt4882_remote_enable,
+ .enable_eos = tnt4882_enable_eos,
+ .disable_eos = tnt4882_disable_eos,
+ .parallel_poll = tnt4882_parallel_poll,
+ .parallel_poll_configure = tnt4882_parallel_poll_configure,
+ .parallel_poll_response = tnt4882_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = tnt4882_line_status,
+ .update_status = tnt4882_update_status,
+ .primary_address = tnt4882_primary_address,
+ .secondary_address = tnt4882_secondary_address,
+ .serial_poll_response2 = tnt4882_serial_poll_response2,
+ .serial_poll_status = tnt4882_serial_poll_status,
+ .t1_delay = tnt4882_t1_delay,
+ .return_to_local = tnt4882_return_to_local,
+};
+
+static gpib_interface_t ni_nat4882_isa_accel_interface = {
+ .name = "ni_nat4882_isa_accel",
+ .attach = ni_nat4882_isa_attach,
+ .detach = ni_isa_detach,
+ .read = tnt4882_accel_read,
+ .write = tnt4882_accel_write,
+ .command = tnt4882_command_unaccel,
+ .take_control = tnt4882_take_control,
+ .go_to_standby = tnt4882_go_to_standby,
+ .request_system_control = tnt4882_request_system_control,
+ .interface_clear = tnt4882_interface_clear,
+ .remote_enable = tnt4882_remote_enable,
+ .enable_eos = tnt4882_enable_eos,
+ .disable_eos = tnt4882_disable_eos,
+ .parallel_poll = tnt4882_parallel_poll,
+ .parallel_poll_configure = tnt4882_parallel_poll_configure,
+ .parallel_poll_response = tnt4882_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = tnt4882_line_status,
+ .update_status = tnt4882_update_status,
+ .primary_address = tnt4882_primary_address,
+ .secondary_address = tnt4882_secondary_address,
+ .serial_poll_response2 = tnt4882_serial_poll_response2,
+ .serial_poll_status = tnt4882_serial_poll_status,
+ .t1_delay = tnt4882_t1_delay,
+ .return_to_local = tnt4882_return_to_local,
+};
+
+static gpib_interface_t ni_nec_isa_accel_interface = {
+ .name = "ni_nec_isa_accel",
+ .attach = ni_nec_isa_attach,
+ .detach = ni_isa_detach,
+ .read = tnt4882_accel_read,
+ .write = tnt4882_accel_write,
+ .command = tnt4882_command_unaccel,
+ .take_control = tnt4882_take_control,
+ .go_to_standby = tnt4882_go_to_standby,
+ .request_system_control = tnt4882_request_system_control,
+ .interface_clear = tnt4882_interface_clear,
+ .remote_enable = tnt4882_remote_enable,
+ .enable_eos = tnt4882_enable_eos,
+ .disable_eos = tnt4882_disable_eos,
+ .parallel_poll = tnt4882_parallel_poll,
+ .parallel_poll_configure = tnt4882_parallel_poll_configure,
+ .parallel_poll_response = tnt4882_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = NULL,
+ .update_status = tnt4882_update_status,
+ .primary_address = tnt4882_primary_address,
+ .secondary_address = tnt4882_secondary_address,
+ .serial_poll_response = tnt4882_serial_poll_response,
+ .serial_poll_status = tnt4882_serial_poll_status,
+ .t1_delay = tnt4882_t1_delay,
+ .return_to_local = tnt4882_return_to_local,
+};
+
static const struct pci_device_id tnt4882_pci_table[] = {
{PCI_DEVICE(PCI_VENDOR_ID_NATINST, PCI_DEVICE_ID_NI_GPIB)},
{PCI_DEVICE(PCI_VENDOR_ID_NATINST, PCI_DEVICE_ID_NI_GPIB_PLUS)},
@@ -1499,16 +1356,26 @@ static const struct pci_device_id tnt4882_pci_table[] = {
MODULE_DEVICE_TABLE(pci, tnt4882_pci_table);
static struct pci_driver tnt4882_pci_driver = {
- .name = "tnt4882",
+ .name = DRV_NAME,
.id_table = tnt4882_pci_table,
.probe = &tnt4882_pci_probe
};
+#if 0
+/* unused, will be needed when the driver is turned into a pnp_driver */
static const struct pnp_device_id tnt4882_pnp_table[] = {
{.id = "NICC601"},
{.id = ""}
};
MODULE_DEVICE_TABLE(pnp, tnt4882_pnp_table);
+#endif
+
+#ifdef CONFIG_GPIB_PCMCIA
+static gpib_interface_t ni_pcmcia_interface;
+static gpib_interface_t ni_pcmcia_accel_interface;
+static int __init init_ni_gpib_cs(void);
+static void __exit exit_ni_gpib_cs(void);
+#endif
static int __init tnt4882_init_module(void)
{
@@ -1516,84 +1383,83 @@ static int __init tnt4882_init_module(void)
result = pci_register_driver(&tnt4882_pci_driver);
if (result) {
- pr_err("tnt4882_gpib: pci_register_driver failed: error = %d\n", result);
+ pr_err("pci_register_driver failed: error = %d\n", result);
return result;
}
result = gpib_register_driver(&ni_isa_interface, THIS_MODULE);
if (result) {
- pr_err("tnt4882_gpib: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
goto err_isa;
}
result = gpib_register_driver(&ni_isa_accel_interface, THIS_MODULE);
if (result) {
- pr_err("tnt4882_gpib: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
goto err_isa_accel;
}
result = gpib_register_driver(&ni_nat4882_isa_interface, THIS_MODULE);
if (result) {
- pr_err("tnt4882_gpib: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
goto err_nat4882_isa;
}
result = gpib_register_driver(&ni_nat4882_isa_accel_interface, THIS_MODULE);
if (result) {
- pr_err("tnt4882_gpib: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
goto err_nat4882_isa_accel;
}
result = gpib_register_driver(&ni_nec_isa_interface, THIS_MODULE);
if (result) {
- pr_err("tnt4882_gpib: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
goto err_nec_isa;
}
result = gpib_register_driver(&ni_nec_isa_accel_interface, THIS_MODULE);
if (result) {
- pr_err("tnt4882_gpib: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
goto err_nec_isa_accel;
}
result = gpib_register_driver(&ni_pci_interface, THIS_MODULE);
if (result) {
- pr_err("tnt4882_gpib: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
goto err_pci;
}
result = gpib_register_driver(&ni_pci_accel_interface, THIS_MODULE);
if (result) {
- pr_err("tnt4882_gpib: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
goto err_pci_accel;
}
-#ifdef GPIB_PCMCIA
+#ifdef CONFIG_GPIB_PCMCIA
result = gpib_register_driver(&ni_pcmcia_interface, THIS_MODULE);
if (result) {
- pr_err("tnt4882_gpib: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
goto err_pcmcia;
}
result = gpib_register_driver(&ni_pcmcia_accel_interface, THIS_MODULE);
if (result) {
- pr_err("tnt4882_gpib: gpib_register_driver failed: error = %d\n", result);
+ pr_err("gpib_register_driver failed: error = %d\n", result);
goto err_pcmcia_accel;
}
result = init_ni_gpib_cs();
if (result) {
- pr_err("tnt4882_gpib: pcmcia_register_driver failed: error = %d\n", result);
+ pr_err("pcmcia_register_driver failed: error = %d\n", result);
goto err_pcmcia_driver;
}
#endif
mite_init();
- mite_list_devices();
return 0;
-#ifdef GPIB_PCMCIA
+#ifdef CONFIG_GPIB_PCMCIA
err_pcmcia_driver:
gpib_unregister_driver(&ni_pcmcia_accel_interface);
err_pcmcia_accel:
@@ -1631,7 +1497,7 @@ static void __exit tnt4882_exit_module(void)
gpib_unregister_driver(&ni_nec_isa_accel_interface);
gpib_unregister_driver(&ni_pci_interface);
gpib_unregister_driver(&ni_pci_accel_interface);
-#ifdef GPIB_PCMCIA
+#ifdef CONFIG_GPIB_PCMCIA
gpib_unregister_driver(&ni_pcmcia_interface);
gpib_unregister_driver(&ni_pcmcia_accel_interface);
exit_ni_gpib_cs();
@@ -1642,7 +1508,7 @@ static void __exit tnt4882_exit_module(void)
pci_unregister_driver(&tnt4882_pci_driver);
}
-#ifdef GPIB_PCMCIA
+#ifdef CONFIG_GPIB_PCMCIA
#include <linux/kernel.h>
#include <linux/moduleparam.h>
@@ -1655,29 +1521,9 @@ static void __exit tnt4882_exit_module(void)
#include <pcmcia/cisreg.h>
#include <pcmcia/ds.h>
-/*
- * All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
- * you do not define PCMCIA_DEBUG at all, all the debug code will be
- * left out. If you compile with PCMCIA_DEBUG=0, the debug code will
- * be present but disabled -- but it can then be enabled for specific
- * modules at load time with a 'pc_debug=#' option to insmod.
- */
-#define PCMCIA_DEBUG 1
-#ifdef PCMCIA_DEBUG
-static int pc_debug = PCMCIA_DEBUG;
-module_param(pc_debug, int, 0);
-#define DEBUG(n, args...) \
- do {if (pc_debug > (n)) \
- pr_debug(args); } \
- while (0)
-#else
-#define DEBUG(args...)
-#endif
-
static int ni_gpib_config(struct pcmcia_device *link);
static void ni_gpib_release(struct pcmcia_device *link);
-static int ni_pcmcia_attach(gpib_board_t *board, const gpib_board_config_t *config);
-static void ni_pcmcia_detach(gpib_board_t *board);
+static void ni_pcmcia_detach(struct gpib_board *board);
/*
* A linked list of "instances" of the dummy device. Each actual
@@ -1696,7 +1542,7 @@ static struct pcmcia_device *curr_dev;
struct local_info_t {
struct pcmcia_device *p_dev;
- gpib_board_t *dev;
+ struct gpib_board *dev;
int stop;
struct bus_operations *bus;
};
@@ -1710,9 +1556,7 @@ struct local_info_t {
static int ni_gpib_probe(struct pcmcia_device *link)
{
struct local_info_t *info;
- //struct gpib_board_t *dev;
-
- DEBUG(0, "%s(0x%p)\n", __func__, link);
+ //struct struct gpib_board *dev;
/* Allocate space for private device-specific data */
info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -1745,9 +1589,7 @@ static int ni_gpib_probe(struct pcmcia_device *link)
static void ni_gpib_remove(struct pcmcia_device *link)
{
struct local_info_t *info = link->priv;
- //struct gpib_board_t *dev = info->dev;
-
- DEBUG(0, "%s(%p)\n", __func__, link);
+ //struct struct gpib_board *dev = info->dev;
if (info->dev)
ni_pcmcia_detach(info->dev);
@@ -1776,11 +1618,9 @@ static int ni_gpib_config_iteration(struct pcmcia_device *link, void *priv_data)
static int ni_gpib_config(struct pcmcia_device *link)
{
//struct local_info_t *info = link->priv;
- //gpib_board_t *dev = info->dev;
+ //struct gpib_board *dev = info->dev;
int last_ret;
- DEBUG(0, "%s(0x%p)\n", __func__, link);
-
last_ret = pcmcia_loop_config(link, &ni_gpib_config_iteration, NULL);
if (last_ret) {
dev_warn(&link->dev, "no configuration found\n");
@@ -1803,18 +1643,16 @@ static int ni_gpib_config(struct pcmcia_device *link)
*/
static void ni_gpib_release(struct pcmcia_device *link)
{
- DEBUG(0, "%s(0x%p)\n", __func__, link);
pcmcia_disable_device(link);
} /* ni_gpib_release */
static int ni_gpib_suspend(struct pcmcia_device *link)
{
//struct local_info_t *info = link->priv;
- //struct gpib_board_t *dev = info->dev;
- DEBUG(0, "%s(0x%p)\n", __func__, link);
+ //struct struct gpib_board *dev = info->dev;
if (link->open)
- pr_err("Device still open ???\n");
+ dev_warn(&link->dev, "Device still open\n");
//netif_device_detach(dev);
return 0;
@@ -1823,12 +1661,10 @@ static int ni_gpib_suspend(struct pcmcia_device *link)
static int ni_gpib_resume(struct pcmcia_device *link)
{
//struct local_info_t *info = link->priv;
- //struct gpib_board_t *dev = info->dev;
- DEBUG(0, "%s(0x%p)\n", __func__, link);
+ //struct struct gpib_board *dev = info->dev;
/*if (link->open) {
* ni_gpib_probe(dev); / really?
- * printk("Gpib resumed ???\n");
* //netif_device_attach(dev);
*}
*/
@@ -1854,32 +1690,28 @@ static struct pcmcia_driver ni_gpib_cs_driver = {
.resume = ni_gpib_resume,
};
-int __init init_ni_gpib_cs(void)
+static int __init init_ni_gpib_cs(void)
{
return pcmcia_register_driver(&ni_gpib_cs_driver);
}
-void __exit exit_ni_gpib_cs(void)
+static void __exit exit_ni_gpib_cs(void)
{
- DEBUG(0, "ni_gpib_cs: unloading\n");
pcmcia_unregister_driver(&ni_gpib_cs_driver);
}
static const int pcmcia_gpib_iosize = 32;
-int ni_pcmcia_attach(gpib_board_t *board, const gpib_board_config_t *config)
+static int ni_pcmcia_attach(struct gpib_board *board, const gpib_board_config_t *config)
{
struct local_info_t *info;
struct tnt4882_priv *tnt_priv;
struct nec7210_priv *nec_priv;
int isr_flags = IRQF_SHARED;
+ int retval;
- DEBUG(0, "%s(0x%p)\n", __func__, board);
-
- if (!curr_dev) {
- pr_err("gpib: no NI PCMCIA board found\n");
- return -1;
- }
+ if (!curr_dev)
+ return -ENODEV;
info = curr_dev->priv;
info->dev = board;
@@ -1888,6 +1720,7 @@ int ni_pcmcia_attach(gpib_board_t *board, const gpib_board_config_t *config)
if (tnt4882_allocate_private(board))
return -ENOMEM;
+
tnt_priv = board->private_data;
nec_priv = &tnt_priv->nec7210_priv;
nec_priv->type = TNT4882;
@@ -1895,23 +1728,20 @@ int ni_pcmcia_attach(gpib_board_t *board, const gpib_board_config_t *config)
nec_priv->write_byte = nec7210_locking_ioport_write_byte;
nec_priv->offset = atgpib_reg_offset;
- DEBUG(0, "ioport1 window attributes: 0x%lx\n", curr_dev->resource[0]->flags);
if (!request_region(curr_dev->resource[0]->start, resource_size(curr_dev->resource[0]),
- "tnt4882")) {
- pr_err("gpib: ioports starting at 0x%lx are already in use\n",
- (unsigned long)curr_dev->resource[0]->start);
- return -EIO;
- }
+ DRV_NAME))
+ return -ENOMEM;
nec_priv->mmiobase = ioport_map(curr_dev->resource[0]->start,
resource_size(curr_dev->resource[0]));
if (!nec_priv->mmiobase)
- return -1;
+ return -ENOMEM;
// get irq
- if (request_irq(curr_dev->irq, tnt4882_interrupt, isr_flags, "tnt4882", board)) {
- pr_err("gpib: can't request IRQ %d\n", curr_dev->irq);
- return -1;
+ retval = request_irq(curr_dev->irq, tnt4882_interrupt, isr_flags, DRV_NAME, board);
+ if (retval) {
+ dev_err(board->gpib_dev, "failed to obtain PCMCIA irq %d\n", curr_dev->irq);
+ return retval;
}
tnt_priv->irq = curr_dev->irq;
@@ -1920,13 +1750,11 @@ int ni_pcmcia_attach(gpib_board_t *board, const gpib_board_config_t *config)
return 0;
}
-void ni_pcmcia_detach(gpib_board_t *board)
+static void ni_pcmcia_detach(struct gpib_board *board)
{
struct tnt4882_priv *tnt_priv = board->private_data;
struct nec7210_priv *nec_priv;
- DEBUG(0, "%s(0x%p)\n", __func__, board);
-
if (tnt_priv) {
nec_priv = &tnt_priv->nec7210_priv;
if (tnt_priv->irq)
@@ -1941,7 +1769,63 @@ void ni_pcmcia_detach(gpib_board_t *board)
tnt4882_free_private(board);
}
-#endif // GPIB_PCMCIA
+static gpib_interface_t ni_pcmcia_interface = {
+ .name = "ni_pcmcia",
+ .attach = ni_pcmcia_attach,
+ .detach = ni_pcmcia_detach,
+ .read = tnt4882_accel_read,
+ .write = tnt4882_accel_write,
+ .command = tnt4882_command,
+ .take_control = tnt4882_take_control,
+ .go_to_standby = tnt4882_go_to_standby,
+ .request_system_control = tnt4882_request_system_control,
+ .interface_clear = tnt4882_interface_clear,
+ .remote_enable = tnt4882_remote_enable,
+ .enable_eos = tnt4882_enable_eos,
+ .disable_eos = tnt4882_disable_eos,
+ .parallel_poll = tnt4882_parallel_poll,
+ .parallel_poll_configure = tnt4882_parallel_poll_configure,
+ .parallel_poll_response = tnt4882_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = tnt4882_line_status,
+ .update_status = tnt4882_update_status,
+ .primary_address = tnt4882_primary_address,
+ .secondary_address = tnt4882_secondary_address,
+ .serial_poll_response = tnt4882_serial_poll_response,
+ .serial_poll_status = tnt4882_serial_poll_status,
+ .t1_delay = tnt4882_t1_delay,
+ .return_to_local = tnt4882_return_to_local,
+};
+
+static gpib_interface_t ni_pcmcia_accel_interface = {
+ .name = "ni_pcmcia_accel",
+ .attach = ni_pcmcia_attach,
+ .detach = ni_pcmcia_detach,
+ .read = tnt4882_accel_read,
+ .write = tnt4882_accel_write,
+ .command = tnt4882_command,
+ .take_control = tnt4882_take_control,
+ .go_to_standby = tnt4882_go_to_standby,
+ .request_system_control = tnt4882_request_system_control,
+ .interface_clear = tnt4882_interface_clear,
+ .remote_enable = tnt4882_remote_enable,
+ .enable_eos = tnt4882_enable_eos,
+ .disable_eos = tnt4882_disable_eos,
+ .parallel_poll = tnt4882_parallel_poll,
+ .parallel_poll_configure = tnt4882_parallel_poll_configure,
+ .parallel_poll_response = tnt4882_parallel_poll_response,
+ .local_parallel_poll_mode = NULL, // XXX
+ .line_status = tnt4882_line_status,
+ .update_status = tnt4882_update_status,
+ .primary_address = tnt4882_primary_address,
+ .secondary_address = tnt4882_secondary_address,
+ .serial_poll_response = tnt4882_serial_poll_response,
+ .serial_poll_status = tnt4882_serial_poll_status,
+ .t1_delay = tnt4882_t1_delay,
+ .return_to_local = tnt4882_return_to_local,
+};
+
+#endif // CONFIG_GPIB_PCMCIA
module_init(tnt4882_init_module);
module_exit(tnt4882_exit_module);
diff --git a/drivers/staging/gpib/uapi/gpib_user.h b/drivers/staging/gpib/uapi/gpib_user.h
index 0896a55a758f..5ff4588686fd 100644
--- a/drivers/staging/gpib/uapi/gpib_user.h
+++ b/drivers/staging/gpib/uapi/gpib_user.h
@@ -106,26 +106,15 @@ enum eos_flags {
/* GPIB Bus Control Lines bit vector */
enum bus_control_line {
- ValidDAV = 0x01,
- ValidNDAC = 0x02,
- ValidNRFD = 0x04,
- ValidIFC = 0x08,
- ValidREN = 0x10,
- ValidSRQ = 0x20,
- ValidATN = 0x40,
- ValidEOI = 0x80,
- ValidALL = 0xff,
- BusDAV = 0x0100, /* DAV line status bit */
- BusNDAC = 0x0200, /* NDAC line status bit */
- BusNRFD = 0x0400, /* NRFD line status bit */
- BusIFC = 0x0800, /* IFC line status bit */
- BusREN = 0x1000, /* REN line status bit */
- BusSRQ = 0x2000, /* SRQ line status bit */
- BusATN = 0x4000, /* ATN line status bit */
- BusEOI = 0x8000 /* EOI line status bit */
-};
-
-enum old_bus_control_line {
+ VALID_DAV = 0x01,
+ VALID_NDAC = 0x02,
+ VALID_NRFD = 0x04,
+ VALID_IFC = 0x08,
+ VALID_REN = 0x10,
+ VALID_SRQ = 0x20,
+ VALID_ATN = 0x40,
+ VALID_EOI = 0x80,
+ VALID_ALL = 0xff,
BUS_DAV = 0x0100, /* DAV line status bit */
BUS_NDAC = 0x0200, /* NDAC line status bit */
BUS_NRFD = 0x0400, /* NRFD line status bit */
diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c
index 8eab94cb06fa..308ed1ca9947 100644
--- a/drivers/staging/greybus/uart.c
+++ b/drivers/staging/greybus/uart.c
@@ -948,7 +948,8 @@ static int gb_tty_init(void)
{
int retval = 0;
- gb_tty_driver = tty_alloc_driver(GB_NUM_MINORS, 0);
+ gb_tty_driver = tty_alloc_driver(GB_NUM_MINORS, TTY_DRIVER_REAL_RAW |
+ TTY_DRIVER_DYNAMIC_DEV);
if (IS_ERR(gb_tty_driver)) {
pr_err("Can not allocate tty driver\n");
retval = -ENOMEM;
@@ -961,7 +962,6 @@ static int gb_tty_init(void)
gb_tty_driver->minor_start = 0;
gb_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
gb_tty_driver->subtype = SERIAL_TYPE_NORMAL;
- gb_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
gb_tty_driver->init_termios = tty_std_termios;
gb_tty_driver->init_termios.c_cflag = B9600 | CS8 |
CREAD | HUPCL | CLOCAL;
diff --git a/drivers/staging/rtl8723bs/Kconfig b/drivers/staging/rtl8723bs/Kconfig
index 8d48c61961a6..353e6ee2c145 100644
--- a/drivers/staging/rtl8723bs/Kconfig
+++ b/drivers/staging/rtl8723bs/Kconfig
@@ -4,6 +4,7 @@ config RTL8723BS
depends on WLAN && MMC && CFG80211
depends on m
select CRYPTO
+ select CRYPTO_LIB_AES
select CRYPTO_LIB_ARC4
help
This option enables support for RTL8723BS SDIO drivers, such as
diff --git a/drivers/staging/rtl8723bs/core/rtw_ap.c b/drivers/staging/rtl8723bs/core/rtw_ap.c
index a6dc88dd4ba1..50022bb5911e 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ap.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ap.c
@@ -324,7 +324,7 @@ void add_RATid(struct adapter *padapter, struct sta_info *psta, u8 rssi_level)
{
unsigned char sta_band = 0, shortGIrate = false;
unsigned int tx_ra_bitmap = 0;
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_bssid_ex
*pcur_network = (struct wlan_bssid_ex *)&pmlmepriv->cur_network.network;
@@ -372,9 +372,9 @@ void update_bmc_sta(struct adapter *padapter)
unsigned char network_type;
int supportRateNum = 0;
unsigned int tx_ra_bitmap = 0;
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
- struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct wlan_bssid_ex
*pcur_network = (struct wlan_bssid_ex *)&pmlmepriv->cur_network.network;
struct sta_info *psta = rtw_get_bcmc_stainfo(padapter);
@@ -451,9 +451,9 @@ void update_bmc_sta(struct adapter *padapter)
void update_sta_info_apmode(struct adapter *padapter, struct sta_info *psta)
{
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct ht_priv *phtpriv_ap = &pmlmepriv->htpriv;
struct ht_priv *phtpriv_sta = &psta->htpriv;
u8 cur_ldpc_cap = 0, cur_stbc_cap = 0, cur_beamform_cap = 0;
@@ -563,10 +563,10 @@ void update_sta_info_apmode(struct adapter *padapter, struct sta_info *psta)
static void update_ap_info(struct adapter *padapter, struct sta_info *psta)
{
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_bssid_ex
*pnetwork = (struct wlan_bssid_ex *)&pmlmepriv->cur_network.network;
- struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct ht_priv *phtpriv_ap = &pmlmepriv->htpriv;
psta->wireless_mode = pmlmeext->cur_wireless_mode;
@@ -609,7 +609,7 @@ static void update_hw_ht_param(struct adapter *padapter)
unsigned char max_AMPDU_len;
unsigned char min_MPDU_spacing;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
/* handle A-MPDU parameter field
*
@@ -645,13 +645,13 @@ void start_bss_network(struct adapter *padapter)
u32 acparm;
int ie_len;
struct registry_priv *pregpriv = &padapter->registrypriv;
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
- struct security_priv *psecuritypriv = &(padapter->securitypriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
struct wlan_bssid_ex
*pnetwork = (struct wlan_bssid_ex *)&pmlmepriv->cur_network.network;
- struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
- struct wlan_bssid_ex *pnetwork_mlmeext = &(pmlmeinfo->network);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct wlan_bssid_ex *pnetwork_mlmeext = &pmlmeinfo->network;
struct HT_info_element *pht_info = NULL;
u8 cbw40_enable = 0;
@@ -823,7 +823,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
u8 WMM_PARA_IE[] = {0x00, 0x50, 0xf2, 0x02, 0x01, 0x01};
struct registry_priv *pregistrypriv = &padapter->registrypriv;
struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_bssid_ex
*pbss_network = (struct wlan_bssid_ex *)&pmlmepriv->cur_network.network;
u8 *ie = pbss_network->ies;
@@ -845,7 +845,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
pbss_network->rssi = 0;
- memcpy(pbss_network->mac_address, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pbss_network->mac_address, myid(&padapter->eeprompriv), ETH_ALEN);
/* beacon interval */
p = rtw_get_beacon_interval_from_ie(ie);/* ie + 8; 8: TimeStamp, 2: Beacon Interval 2:Capability */
@@ -1186,7 +1186,7 @@ int rtw_acl_add_sta(struct adapter *padapter, u8 *addr)
if ((NUM_ACL - 1) < pacl_list->num)
return (-1);
- spin_lock_bh(&(pacl_node_q->lock));
+ spin_lock_bh(&pacl_node_q->lock);
phead = get_list_head(pacl_node_q);
list_for_each(plist, phead) {
@@ -1200,12 +1200,12 @@ int rtw_acl_add_sta(struct adapter *padapter, u8 *addr)
}
}
- spin_unlock_bh(&(pacl_node_q->lock));
+ spin_unlock_bh(&pacl_node_q->lock);
if (added)
return ret;
- spin_lock_bh(&(pacl_node_q->lock));
+ spin_lock_bh(&pacl_node_q->lock);
for (i = 0; i < NUM_ACL; i++) {
paclnode = &pacl_list->aclnode[i];
@@ -1225,7 +1225,7 @@ int rtw_acl_add_sta(struct adapter *padapter, u8 *addr)
}
}
- spin_unlock_bh(&(pacl_node_q->lock));
+ spin_unlock_bh(&pacl_node_q->lock);
return ret;
}
@@ -1238,7 +1238,7 @@ void rtw_acl_remove_sta(struct adapter *padapter, u8 *addr)
struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
struct __queue *pacl_node_q = &pacl_list->acl_node_q;
- spin_lock_bh(&(pacl_node_q->lock));
+ spin_lock_bh(&pacl_node_q->lock);
phead = get_list_head(pacl_node_q);
list_for_each_safe(plist, tmp, phead) {
@@ -1258,7 +1258,7 @@ void rtw_acl_remove_sta(struct adapter *padapter, u8 *addr)
}
}
- spin_unlock_bh(&(pacl_node_q->lock));
+ spin_unlock_bh(&pacl_node_q->lock);
}
@@ -1308,7 +1308,7 @@ static int rtw_ap_set_key(
u8 keylen;
struct cmd_obj *pcmd;
struct setkey_parm *psetkeyparm;
- struct cmd_priv *pcmdpriv = &(padapter->cmdpriv);
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
int res = _SUCCESS;
pcmd = rtw_zmalloc(sizeof(struct cmd_obj));
@@ -1345,7 +1345,7 @@ static int rtw_ap_set_key(
keylen = 16;
}
- memcpy(&(psetkeyparm->key[0]), key, keylen);
+ memcpy(&psetkeyparm->key[0], key, keylen);
pcmd->cmdcode = _SetKey_CMD_;
pcmd->parmbuf = (u8 *)psetkeyparm;
@@ -1397,10 +1397,10 @@ static void update_bcn_fixed_ie(struct adapter *padapter)
static void update_bcn_erpinfo_ie(struct adapter *padapter)
{
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
- struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
- struct wlan_bssid_ex *pnetwork = &(pmlmeinfo->network);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
unsigned char *p, *ie = pnetwork->ies;
u32 len = 0;
@@ -1461,10 +1461,10 @@ static void update_bcn_wps_ie(struct adapter *padapter)
u8 *pbackup_remainder_ie = NULL;
uint wps_ielen = 0, wps_offset, remainder_ielen;
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
- struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
- struct wlan_bssid_ex *pnetwork = &(pmlmeinfo->network);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
unsigned char *ie = pnetwork->ies;
u32 ielen = pnetwork->ie_length;
@@ -1537,8 +1537,8 @@ void update_beacon(struct adapter *padapter, u8 ie_id, u8 *oui, u8 tx)
if (!padapter)
return;
- pmlmepriv = &(padapter->mlmepriv);
- pmlmeext = &(padapter->mlmeextpriv);
+ pmlmepriv = &padapter->mlmepriv;
+ pmlmeext = &padapter->mlmeextpriv;
/* pmlmeinfo = &(pmlmeext->mlmext_info); */
if (!pmlmeext->bstart_bss)
@@ -1619,7 +1619,7 @@ static int rtw_ht_operation_update(struct adapter *padapter)
{
u16 cur_op_mode, new_op_mode;
int op_mode_changes = 0;
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct ht_priv *phtpriv_ap = &pmlmepriv->htpriv;
if (pmlmepriv->htpriv.ht_option)
@@ -1703,8 +1703,8 @@ void associated_clients_update(struct adapter *padapter, u8 updated)
void bss_cap_update_on_sta_join(struct adapter *padapter, struct sta_info *psta)
{
u8 beacon_updated = false;
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
- struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
if (!(psta->flags & WLAN_STA_SHORT_PREAMBLE)) {
if (!psta->no_short_preamble_set) {
@@ -1823,8 +1823,8 @@ void bss_cap_update_on_sta_join(struct adapter *padapter, struct sta_info *psta)
u8 bss_cap_update_on_sta_leave(struct adapter *padapter, struct sta_info *psta)
{
u8 beacon_updated = false;
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
- struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
if (!psta)
return beacon_updated;
@@ -1932,7 +1932,7 @@ void rtw_sta_flush(struct adapter *padapter)
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
if ((pmlmeinfo->state & 0x03) != WIFI_FW_AP_STATE)
@@ -1962,7 +1962,7 @@ void rtw_sta_flush(struct adapter *padapter)
void sta_info_update(struct adapter *padapter, struct sta_info *psta)
{
int flags = psta->flags;
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
/* update wmm cap. */
if (WLAN_STA_WME & flags)
@@ -1991,7 +1991,7 @@ void sta_info_update(struct adapter *padapter, struct sta_info *psta)
void ap_sta_info_defer_update(struct adapter *padapter, struct sta_info *psta)
{
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
if (psta->state & _FW_LINKED) {
pmlmeinfo->FW_sta_info[psta->mac_id].psta = psta;
@@ -2006,7 +2006,7 @@ void rtw_ap_restore_network(struct adapter *padapter)
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct sta_priv *pstapriv = &padapter->stapriv;
struct sta_info *psta;
- struct security_priv *psecuritypriv = &(padapter->securitypriv);
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
struct list_head *phead, *plist;
u8 chk_alive_num = 0;
char chk_alive_list[NUM_STA];
@@ -2072,7 +2072,7 @@ void rtw_ap_restore_network(struct adapter *padapter)
void start_ap_mode(struct adapter *padapter)
{
int i;
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct sta_priv *pstapriv = &padapter->stapriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
@@ -2109,7 +2109,7 @@ void start_ap_mode(struct adapter *padapter)
pmlmepriv->p2p_probe_resp_ie = NULL;
/* for ACL */
- INIT_LIST_HEAD(&(pacl_list->acl_node_q.queue));
+ INIT_LIST_HEAD(&pacl_list->acl_node_q.queue);
pacl_list->num = 0;
pacl_list->mode = 0;
for (i = 0; i < NUM_ACL; i++) {
@@ -2124,7 +2124,7 @@ void stop_ap_mode(struct adapter *padapter)
struct rtw_wlan_acl_node *paclnode;
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
struct __queue *pacl_node_q = &pacl_list->acl_node_q;
@@ -2142,7 +2142,7 @@ void stop_ap_mode(struct adapter *padapter)
padapter->securitypriv.ndisencryptstatus = Ndis802_11WEPDisabled;
/* for ACL */
- spin_lock_bh(&(pacl_node_q->lock));
+ spin_lock_bh(&pacl_node_q->lock);
phead = get_list_head(pacl_node_q);
list_for_each_safe(plist, tmp, phead) {
paclnode = list_entry(plist, struct rtw_wlan_acl_node, list);
@@ -2155,7 +2155,7 @@ void stop_ap_mode(struct adapter *padapter)
pacl_list->num--;
}
}
- spin_unlock_bh(&(pacl_node_q->lock));
+ spin_unlock_bh(&pacl_node_q->lock);
rtw_sta_flush(padapter);
diff --git a/drivers/staging/rtl8723bs/hal/hal_com.c b/drivers/staging/rtl8723bs/hal/hal_com.c
index b41ec89932af..1213a91cffff 100644
--- a/drivers/staging/rtl8723bs/hal/hal_com.c
+++ b/drivers/staging/rtl8723bs/hal/hal_com.c
@@ -884,6 +884,9 @@ static u32 Array_kfreemap[] = {
0xfc, 0x0,
};
+#define REG_RF_BB_GAIN_OFFSET 0x7f
+//#define RF_GAIN_OFFSET_MASK 0xfffff
+
void rtw_bb_rf_gain_offset(struct adapter *padapter)
{
u8 value = padapter->eeprompriv.EEPROMRFGainOffset;
diff --git a/drivers/staging/rtl8723bs/include/osdep_intf.h b/drivers/staging/rtl8723bs/include/osdep_intf.h
index 73199be78139..83a25598e962 100644
--- a/drivers/staging/rtl8723bs/include/osdep_intf.h
+++ b/drivers/staging/rtl8723bs/include/osdep_intf.h
@@ -8,33 +8,6 @@
#ifndef __OSDEP_INTF_H_
#define __OSDEP_INTF_H_
-
-struct intf_priv {
-
- u8 *intf_dev;
- u32 max_iosz; /* USB2.0: 128, USB1.1: 64, SDIO:64 */
- u32 max_xmitsz; /* USB2.0: unlimited, SDIO:512 */
- u32 max_recvsz; /* USB2.0: unlimited, SDIO:512 */
-
- volatile u8 *io_rwmem;
- volatile u8 *allocated_io_rwmem;
- u32 io_wsz; /* unit: 4bytes */
- u32 io_rsz;/* unit: 4bytes */
- u8 intf_status;
-
- void (*_bus_io)(u8 *priv);
-
-/*
-Under Sync. IRP (SDIO/USB)
-A protection mechanism is necessary for the io_rwmem(read/write protocol)
-
-Under Async. IRP (SDIO/USB)
-The protection mechanism is through the pending queue.
-*/
-
- struct mutex ioctl_mutex;
-};
-
struct dvobj_priv *devobj_init(void);
void devobj_deinit(struct dvobj_priv *pdvobj);
diff --git a/drivers/staging/rtl8723bs/include/rtl8723b_hal.h b/drivers/staging/rtl8723bs/include/rtl8723b_hal.h
index e6d6e9de5474..a4a14474c35d 100644
--- a/drivers/staging/rtl8723bs/include/rtl8723b_hal.h
+++ b/drivers/staging/rtl8723bs/include/rtl8723b_hal.h
@@ -15,7 +15,6 @@
#include "rtl8723b_recv.h"
#include "rtl8723b_xmit.h"
#include "rtl8723b_cmd.h"
-#include "rtw_mp.h"
#include "hal_pwr_seq.h"
#include "Hal8192CPhyReg.h"
#include "hal_phy_cfg.h"
diff --git a/drivers/staging/rtl8723bs/include/rtw_io.h b/drivers/staging/rtl8723bs/include/rtw_io.h
index 0ee87be6dc4f..adf1de4d7924 100644
--- a/drivers/staging/rtl8723bs/include/rtw_io.h
+++ b/drivers/staging/rtl8723bs/include/rtw_io.h
@@ -8,16 +8,7 @@
#ifndef _RTW_IO_H_
#define _RTW_IO_H_
-/*
- For prompt mode accessing, caller shall free io_req
- Otherwise, io_handler will free io_req
-*/
-
-/* below is for the intf_option bit definition... */
-
-struct intf_priv;
struct intf_hdl;
-struct io_queue;
struct _io_ops {
u8 (*_read8)(struct intf_hdl *pintfhdl, u32 addr);
@@ -36,8 +27,6 @@ struct _io_ops {
void (*_read_mem)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pmem);
void (*_write_mem)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pmem);
- void (*_sync_irp_protocol_rw)(struct io_queue *pio_q);
-
u32 (*_read_interrupt)(struct intf_hdl *pintfhdl, u32 addr);
u32 (*_read_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pmem);
@@ -49,18 +38,6 @@ struct _io_ops {
void (*_write_port_cancel)(struct intf_hdl *pintfhdl);
};
-struct io_req {
- struct list_head list;
- u32 addr;
- volatile u32 val;
- u32 command;
- u32 status;
- u8 *pbuf;
-
- void (*_async_io_callback)(struct adapter *padater, struct io_req *pio_req, u8 *cnxt);
- u8 *cnxt;
-};
-
struct intf_hdl {
struct adapter *padapter;
struct dvobj_priv *pintf_dev;/* pointer to &(padapter->dvobjpriv); */
@@ -74,21 +51,6 @@ struct intf_hdl {
int rtw_inc_and_chk_continual_io_error(struct dvobj_priv *dvobj);
void rtw_reset_continual_io_error(struct dvobj_priv *dvobj);
-/*
-Below is the data structure used by _io_handler
-
-*/
-
-struct io_queue {
- spinlock_t lock;
- struct list_head free_ioreqs;
- struct list_head pending; /* The io_req list that will be served in the single protocol read/write. */
- struct list_head processing;
- u8 *free_ioreqs_buf; /* 4-byte aligned */
- u8 *pallocated_free_ioreqs_buf;
- struct intf_hdl intf;
-};
-
struct io_priv {
struct adapter *padapter;
@@ -97,20 +59,6 @@ struct io_priv {
};
-extern uint ioreq_flush(struct adapter *adapter, struct io_queue *ioqueue);
-extern void sync_ioreq_enqueue(struct io_req *preq, struct io_queue *ioqueue);
-extern uint sync_ioreq_flush(struct adapter *adapter, struct io_queue *ioqueue);
-
-
-extern uint free_ioreq(struct io_req *preq, struct io_queue *pio_queue);
-extern struct io_req *alloc_ioreq(struct io_queue *pio_q);
-
-extern uint register_intf_hdl(u8 *dev, struct intf_hdl *pintfhdl);
-extern void unregister_intf_hdl(struct intf_hdl *pintfhdl);
-
-extern void _rtw_attrib_read(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
-extern void _rtw_attrib_write(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
-
extern u8 rtw_read8(struct adapter *adapter, u32 addr);
extern u16 rtw_read16(struct adapter *adapter, u32 addr);
extern u32 rtw_read32(struct adapter *adapter, u32 addr);
@@ -121,46 +69,6 @@ extern int rtw_write32(struct adapter *adapter, u32 addr, u32 val);
extern u32 rtw_write_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
-extern void rtw_write_scsi(struct adapter *adapter, u32 cnt, u8 *pmem);
-
-/* ioreq */
-extern void ioreq_read8(struct adapter *adapter, u32 addr, u8 *pval);
-extern void ioreq_read16(struct adapter *adapter, u32 addr, u16 *pval);
-extern void ioreq_read32(struct adapter *adapter, u32 addr, u32 *pval);
-extern void ioreq_write8(struct adapter *adapter, u32 addr, u8 val);
-extern void ioreq_write16(struct adapter *adapter, u32 addr, u16 val);
-extern void ioreq_write32(struct adapter *adapter, u32 addr, u32 val);
-
-
-extern uint async_read8(struct adapter *adapter, u32 addr, u8 *pbuff,
- void (*_async_io_callback)(struct adapter *padater, struct io_req *pio_req, u8 *cnxt), u8 *cnxt);
-extern uint async_read16(struct adapter *adapter, u32 addr, u8 *pbuff,
- void (*_async_io_callback)(struct adapter *padater, struct io_req *pio_req, u8 *cnxt), u8 *cnxt);
-extern uint async_read32(struct adapter *adapter, u32 addr, u8 *pbuff,
- void (*_async_io_callback)(struct adapter *padater, struct io_req *pio_req, u8 *cnxt), u8 *cnxt);
-
-extern void async_read_mem(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
-extern void async_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
-
-extern void async_write8(struct adapter *adapter, u32 addr, u8 val,
- void (*_async_io_callback)(struct adapter *padater, struct io_req *pio_req, u8 *cnxt), u8 *cnxt);
-extern void async_write16(struct adapter *adapter, u32 addr, u16 val,
- void (*_async_io_callback)(struct adapter *padater, struct io_req *pio_req, u8 *cnxt), u8 *cnxt);
-extern void async_write32(struct adapter *adapter, u32 addr, u32 val,
- void (*_async_io_callback)(struct adapter *padater, struct io_req *pio_req, u8 *cnxt), u8 *cnxt);
-
-extern void async_write_mem(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
-extern void async_write_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
-
-
int rtw_init_io_priv(struct adapter *padapter, void (*set_intf_ops)(struct adapter *padapter, struct _io_ops *pops));
-
-extern uint alloc_io_queue(struct adapter *adapter);
-extern void free_io_queue(struct adapter *adapter);
-extern void async_bus_io(struct io_queue *pio_q);
-extern void bus_sync_io(struct io_queue *pio_q);
-extern u32 _ioreq2rwmem(struct io_queue *pio_q);
-extern void dev_power_down(struct adapter *Adapter, u8 bpwrup);
-
#endif /* _RTL8711_IO_H_ */
diff --git a/drivers/staging/rtl8723bs/include/rtw_mp.h b/drivers/staging/rtl8723bs/include/rtw_mp.h
deleted file mode 100644
index 5a1cbd2ed851..000000000000
--- a/drivers/staging/rtl8723bs/include/rtw_mp.h
+++ /dev/null
@@ -1,341 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- ******************************************************************************/
-#ifndef _RTW_MP_H_
-#define _RTW_MP_H_
-
-#define MAX_MP_XMITBUF_SZ 2048
-
-struct mp_xmit_frame {
- struct list_head list;
-
- struct pkt_attrib attrib;
-
- struct sk_buff *pkt;
-
- int frame_tag;
-
- struct adapter *padapter;
-
- uint mem[(MAX_MP_XMITBUF_SZ >> 2)];
-};
-
-struct mp_wiparam {
- u32 bcompleted;
- u32 act_type;
- u32 io_offset;
- u32 io_value;
-};
-
-struct mp_tx {
- u8 stop;
- u32 count, sended;
- u8 payload;
- struct pkt_attrib attrib;
- /* struct tx_desc desc; */
- /* u8 resvdtx[7]; */
- u8 desc[TXDESC_SIZE];
- u8 *pallocated_buf;
- u8 *buf;
- u32 buf_size, write_size;
- void *PktTxThread;
-};
-
-#define MP_MAX_LINES 1000
-#define MP_MAX_LINES_BYTES 256
-
-typedef void (*MPT_WORK_ITEM_HANDLER)(void *Adapter);
-struct mpt_context {
- /* Indicate if we have started Mass Production Test. */
- bool bMassProdTest;
-
- /* Indicate if the driver is unloading or unloaded. */
- bool bMptDrvUnload;
-
- struct timer_list MPh2c_timeout_timer;
-/* Event used to sync H2c for BT control */
-
- bool MptH2cRspEvent;
- bool MptBtC2hEvent;
- bool bMPh2c_timeout;
-
- /* 8190 PCI does not support NDIS_WORK_ITEM. */
- /* Work Item for Mass Production Test. */
- /* NDIS_WORK_ITEM MptWorkItem; */
-/* RT_WORK_ITEM MptWorkItem; */
- /* Event used to sync the case unloading driver and MptWorkItem is still in progress. */
-/* NDIS_EVENT MptWorkItemEvent; */
- /* To protect the following variables. */
-/* NDIS_SPIN_LOCK MptWorkItemSpinLock; */
- /* Indicate a MptWorkItem is scheduled and not yet finished. */
- bool bMptWorkItemInProgress;
- /* An instance which implements function and context of MptWorkItem. */
- MPT_WORK_ITEM_HANDLER CurrMptAct;
-
- /* 1 =Start, 0 =Stop from UI. */
- u32 MptTestStart;
- /* _TEST_MODE, defined in MPT_Req2.h */
- u32 MptTestItem;
- /* Variable needed in each implementation of CurrMptAct. */
- u32 MptActType; /* Type of action performed in CurrMptAct. */
- /* The Offset of IO operation is depend of MptActType. */
- u32 MptIoOffset;
- /* The Value of IO operation is depend of MptActType. */
- u32 MptIoValue;
- /* The RfPath of IO operation is depend of MptActType. */
- u32 MptRfPath;
-
- enum wireless_mode MptWirelessModeToSw; /* Wireless mode to switch. */
- u8 MptChannelToSw; /* Channel to switch. */
- u8 MptInitGainToSet; /* Initial gain to set. */
- u32 MptBandWidth; /* bandwidth to switch. */
- u32 MptRateIndex; /* rate index. */
- /* Register value kept for Single Carrier Tx test. */
- u8 btMpCckTxPower;
- /* Register value kept for Single Carrier Tx test. */
- u8 btMpOfdmTxPower;
- /* For MP Tx Power index */
- u8 TxPwrLevel[2]; /* rf-A, rf-B */
- u32 RegTxPwrLimit;
- /* Content of RCR Register for Mass Production Test. */
- u32 MptRCR;
- /* true if we only receive packets with specific pattern. */
- bool bMptFilterPattern;
- /* Rx OK count, statistics used in Mass Production Test. */
- u32 MptRxOkCnt;
- /* Rx CRC32 error count, statistics used in Mass Production Test. */
- u32 MptRxCrcErrCnt;
-
- bool bCckContTx; /* true if we are in CCK Continuous Tx test. */
- bool bOfdmContTx; /* true if we are in OFDM Continuous Tx test. */
- bool bStartContTx; /* true if we have start Continuous Tx test. */
- /* true if we are in Single Carrier Tx test. */
- bool bSingleCarrier;
- /* true if we are in Carrier Suppression Tx Test. */
- bool bCarrierSuppression;
- /* true if we are in Single Tone Tx test. */
- bool bSingleTone;
-
- /* ACK counter asked by K.Y.. */
- bool bMptEnableAckCounter;
- u32 MptAckCounter;
-
- /* SD3 Willis For 8192S to save 1T/2T RF table for ACUT Only fro ACUT delete later ~~~! */
- /* s8 BufOfLines[2][MAX_LINES_HWCONFIG_TXT][MAX_BYTES_LINE_HWCONFIG_TXT]; */
- /* s8 BufOfLines[2][MP_MAX_LINES][MP_MAX_LINES_BYTES]; */
- /* s32 RfReadLine[2]; */
-
- u8 APK_bound[2]; /* for APK path A/path B */
- bool bMptIndexEven;
-
- u8 backup0xc50;
- u8 backup0xc58;
- u8 backup0xc30;
- u8 backup0x52_RF_A;
- u8 backup0x52_RF_B;
-
- u32 backup0x58_RF_A;
- u32 backup0x58_RF_B;
-
- u8 h2cReqNum;
- u8 c2hBuf[32];
-
- u8 btInBuf[100];
- u32 mptOutLen;
- u8 mptOutBuf[100];
-
-};
-/* endif */
-
-/* define RTPRIV_IOCTL_MP (SIOCIWFIRSTPRIV + 0x17) */
-enum {
- WRITE_REG = 1,
- READ_REG,
- WRITE_RF,
- READ_RF,
- MP_START,
- MP_STOP,
- MP_RATE,
- MP_CHANNEL,
- MP_BANDWIDTH,
- MP_TXPOWER,
- MP_ANT_TX,
- MP_ANT_RX,
- MP_CTX,
- MP_QUERY,
- MP_ARX,
- MP_PSD,
- MP_PWRTRK,
- MP_THER,
- MP_IOCTL,
- EFUSE_GET,
- EFUSE_SET,
- MP_RESET_STATS,
- MP_DUMP,
- MP_PHYPARA,
- MP_SetRFPathSwh,
- MP_QueryDrvStats,
- MP_SetBT,
- CTA_TEST,
- MP_DISABLE_BT_COEXIST,
- MP_PwrCtlDM,
- MP_NULL,
- MP_GET_TXPOWER_INX,
-};
-
-struct mp_priv {
- struct adapter *papdater;
-
- /* Testing Flag */
- u32 mode;/* 0 for normal type packet, 1 for loopback packet (16bytes TXCMD) */
-
- u32 prev_fw_state;
-
- /* OID cmd handler */
- struct mp_wiparam workparam;
-/* u8 act_in_progress; */
-
- /* Tx Section */
- u8 TID;
- u32 tx_pktcount;
- u32 pktInterval;
- struct mp_tx tx;
-
- /* Rx Section */
- u32 rx_bssidpktcount;
- u32 rx_pktcount;
- u32 rx_pktcount_filter_out;
- u32 rx_crcerrpktcount;
- u32 rx_pktloss;
- bool rx_bindicatePkt;
- struct recv_stat rxstat;
-
- /* RF/BB relative */
- u8 channel;
- u8 bandwidth;
- u8 prime_channel_offset;
- u8 txpoweridx;
- u8 txpoweridx_b;
- u8 rateidx;
- u32 preamble;
-/* u8 modem; */
- u32 CrystalCap;
-/* u32 curr_crystalcap; */
-
- u16 antenna_tx;
- u16 antenna_rx;
-/* u8 curr_rfpath; */
-
- u8 check_mp_pkt;
-
- u8 bSetTxPower;
-/* uint ForcedDataRate; */
- u8 mp_dm;
- u8 mac_filter[ETH_ALEN];
- u8 bmac_filter;
-
- struct wlan_network mp_network;
- NDIS_802_11_MAC_ADDRESS network_macaddr;
-
- u8 *pallocated_mp_xmitframe_buf;
- u8 *pmp_xmtframe_buf;
- struct __queue free_mp_xmitqueue;
- u32 free_mp_xmitframe_cnt;
- bool bSetRxBssid;
- bool bTxBufCkFail;
-
- struct mpt_context MptCtx;
-
- u8 *TXradomBuffer;
-};
-
-/* Hardware Registers */
-extern u8 mpdatarate[NumRates];
-
-#define MAX_TX_PWR_INDEX_N_MODE 64 /* 0x3F */
-
-#define REG_RF_BB_GAIN_OFFSET 0x7f
-#define RF_GAIN_OFFSET_MASK 0xfffff
-
-/* */
-/* struct mp_xmit_frame *alloc_mp_xmitframe(struct mp_priv *pmp_priv); */
-/* int free_mp_xmitframe(struct xmit_priv *pxmitpriv, struct mp_xmit_frame *pmp_xmitframe); */
-
-s32 init_mp_priv(struct adapter *padapter);
-void free_mp_priv(struct mp_priv *pmp_priv);
-s32 MPT_InitializeAdapter(struct adapter *padapter, u8 Channel);
-void MPT_DeInitAdapter(struct adapter *padapter);
-s32 mp_start_test(struct adapter *padapter);
-void mp_stop_test(struct adapter *padapter);
-
-u32 _read_rfreg(struct adapter *padapter, u8 rfpath, u32 addr, u32 bitmask);
-void _write_rfreg(struct adapter *padapter, u8 rfpath, u32 addr, u32 bitmask, u32 val);
-
-u32 read_macreg(struct adapter *padapter, u32 addr, u32 sz);
-void write_macreg(struct adapter *padapter, u32 addr, u32 val, u32 sz);
-
-void SetChannel(struct adapter *padapter);
-void SetBandwidth(struct adapter *padapter);
-int SetTxPower(struct adapter *padapter);
-void SetAntennaPathPower(struct adapter *padapter);
-void SetDataRate(struct adapter *padapter);
-
-void SetAntenna(struct adapter *padapter);
-
-s32 SetThermalMeter(struct adapter *padapter, u8 target_ther);
-void GetThermalMeter(struct adapter *padapter, u8 *value);
-
-void SetContinuousTx(struct adapter *padapter, u8 bStart);
-void SetSingleCarrierTx(struct adapter *padapter, u8 bStart);
-void SetSingleToneTx(struct adapter *padapter, u8 bStart);
-void SetCarrierSuppressionTx(struct adapter *padapter, u8 bStart);
-void PhySetTxPowerLevel(struct adapter *padapter);
-
-void fill_txdesc_for_mp(struct adapter *padapter, u8 *ptxdesc);
-void SetPacketTx(struct adapter *padapter);
-void SetPacketRx(struct adapter *padapter, u8 bStartRx);
-
-void ResetPhyRxPktCount(struct adapter *padapter);
-u32 GetPhyRxPktReceived(struct adapter *padapter);
-u32 GetPhyRxPktCRC32Error(struct adapter *padapter);
-
-s32 SetPowerTracking(struct adapter *padapter, u8 enable);
-void GetPowerTracking(struct adapter *padapter, u8 *enable);
-
-u32 mp_query_psd(struct adapter *padapter, u8 *data);
-
-void Hal_SetAntenna(struct adapter *padapter);
-void Hal_SetBandwidth(struct adapter *padapter);
-
-void Hal_SetTxPower(struct adapter *padapter);
-void Hal_SetCarrierSuppressionTx(struct adapter *padapter, u8 bStart);
-void Hal_SetSingleToneTx(struct adapter *padapter, u8 bStart);
-void Hal_SetSingleCarrierTx(struct adapter *padapter, u8 bStart);
-void Hal_SetContinuousTx(struct adapter *padapter, u8 bStart);
-
-void Hal_SetDataRate(struct adapter *padapter);
-void Hal_SetChannel(struct adapter *padapter);
-void Hal_SetAntennaPathPower(struct adapter *padapter);
-s32 Hal_SetThermalMeter(struct adapter *padapter, u8 target_ther);
-s32 Hal_SetPowerTracking(struct adapter *padapter, u8 enable);
-void Hal_GetPowerTracking(struct adapter *padapter, u8 *enable);
-void Hal_GetThermalMeter(struct adapter *padapter, u8 *value);
-void Hal_mpt_SwitchRfSetting(struct adapter *padapter);
-void Hal_MPT_CCKTxPowerAdjust(struct adapter *Adapter, bool bInCH14);
-void Hal_MPT_CCKTxPowerAdjustbyIndex(struct adapter *padapter, bool beven);
-void Hal_SetCCKTxPower(struct adapter *padapter, u8 *TxPower);
-void Hal_SetOFDMTxPower(struct adapter *padapter, u8 *TxPower);
-void Hal_TriggerRFThermalMeter(struct adapter *padapter);
-u8 Hal_ReadRFThermalMeter(struct adapter *padapter);
-void Hal_SetCCKContinuousTx(struct adapter *padapter, u8 bStart);
-void Hal_SetOFDMContinuousTx(struct adapter *padapter, u8 bStart);
-void Hal_ProSetCrystalCap(struct adapter *padapter, u32 CrystalCapVal);
-void MP_PHY_SetRFPathSwitch(struct adapter *padapter, bool bMain);
-u32 mpt_ProQueryCalTxPower(struct adapter *padapter, u8 RfPath);
-void MPT_PwrCtlDM(struct adapter *padapter, u32 bstart);
-u8 MptToMgntRate(u32 MptRateIdx);
-
-#endif /* _RTW_MP_H_ */
diff --git a/drivers/staging/rtl8723bs/os_dep/os_intfs.c b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
index 738a601c55bb..de48c3454ab3 100644
--- a/drivers/staging/rtl8723bs/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
@@ -724,8 +724,6 @@ u8 rtw_free_drv_sw(struct adapter *padapter)
rtw_free_mlme_priv(&padapter->mlmepriv);
- /* free_io_queue(padapter); */
-
_rtw_free_xmit_priv(&padapter->xmitpriv);
_rtw_free_sta_priv(&padapter->stapriv); /* will free bcmc_stainfo here */
diff --git a/drivers/staging/sm750fb/ddk750_chip.c b/drivers/staging/sm750fb/ddk750_chip.c
index 02860d3ec365..025dae3756aa 100644
--- a/drivers/staging/sm750fb/ddk750_chip.c
+++ b/drivers/staging/sm750fb/ddk750_chip.c
@@ -228,8 +228,8 @@ int ddk750_init_hw(struct initchip_param *p_init_param)
reg = peek32(VGA_CONFIGURATION);
reg |= (VGA_CONFIGURATION_PLL | VGA_CONFIGURATION_MODE);
poke32(VGA_CONFIGURATION, reg);
+#ifdef CONFIG_X86
} else {
-#if defined(__i386__) || defined(__x86_64__)
/* set graphic mode via IO method */
outb_p(0x88, 0x3d4);
outb_p(0x06, 0x3d5);
diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
index deec33f63bcf..b839b50ac26a 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
@@ -658,8 +658,6 @@ static const struct vb2_ops bcm2835_mmal_video_qops = {
.buf_queue = buffer_queue,
.start_streaming = start_streaming,
.stop_streaming = stop_streaming,
- .wait_prepare = vb2_ops_wait_prepare,
- .wait_finish = vb2_ops_wait_finish,
};
/* ------------------------------------------------------------------
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index a4e83e5d619b..5dbf8d53db09 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -97,13 +97,6 @@ struct vchiq_arm_state {
* tracked separately with the state.
*/
int peer_use_count;
-
- /*
- * Flag to indicate that the first vchiq connect has made it through.
- * This means that both sides should be fully ready, and we should
- * be able to suspend after this point.
- */
- int first_connect;
};
static int
@@ -271,7 +264,7 @@ static int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state
return -ENXIO;
}
- dev_dbg(&pdev->dev, "arm: vchiq_init - done (slots %pK, phys %pad)\n",
+ dev_dbg(&pdev->dev, "arm: vchiq_init - done (slots %p, phys %pad)\n",
vchiq_slot_zero, &slot_phys);
mutex_init(&drv_mgmt->connected_mutex);
@@ -280,32 +273,23 @@ static int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state
return 0;
}
-int
-vchiq_platform_init_state(struct vchiq_state *state)
+static struct vchiq_arm_state *vchiq_platform_get_arm_state(struct vchiq_state *state)
{
- struct vchiq_arm_state *platform_state;
-
- platform_state = devm_kzalloc(state->dev, sizeof(*platform_state), GFP_KERNEL);
- if (!platform_state)
- return -ENOMEM;
-
- rwlock_init(&platform_state->susp_res_lock);
-
- init_completion(&platform_state->ka_evt);
- atomic_set(&platform_state->ka_use_count, 0);
- atomic_set(&platform_state->ka_use_ack_count, 0);
- atomic_set(&platform_state->ka_release_count, 0);
-
- platform_state->state = state;
-
- state->platform_state = (struct opaque_platform_state *)platform_state;
-
- return 0;
+ return (struct vchiq_arm_state *)state->platform_state;
}
-static struct vchiq_arm_state *vchiq_platform_get_arm_state(struct vchiq_state *state)
+static void
+vchiq_platform_uninit(struct vchiq_drv_mgmt *mgmt)
{
- return (struct vchiq_arm_state *)state->platform_state;
+ struct vchiq_arm_state *arm_state;
+
+ kthread_stop(mgmt->state.sync_thread);
+ kthread_stop(mgmt->state.recycle_thread);
+ kthread_stop(mgmt->state.slot_handler_thread);
+
+ arm_state = vchiq_platform_get_arm_state(&mgmt->state);
+ if (!IS_ERR_OR_NULL(arm_state->ka_thread))
+ kthread_stop(arm_state->ka_thread);
}
void vchiq_dump_platform_state(struct seq_file *f)
@@ -368,7 +352,7 @@ void free_bulk_waiter(struct vchiq_instance *instance)
&instance->bulk_waiter_list, list) {
list_del(&waiter->list);
dev_dbg(instance->state->dev,
- "arm: bulk_waiter - cleaned up %pK for pid %d\n",
+ "arm: bulk_waiter - cleaned up %p for pid %d\n",
waiter, waiter->pid);
kfree(waiter);
}
@@ -622,7 +606,7 @@ vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handl
mutex_lock(&instance->bulk_waiter_list_mutex);
list_add(&waiter->list, &instance->bulk_waiter_list);
mutex_unlock(&instance->bulk_waiter_list_mutex);
- dev_dbg(instance->state->dev, "arm: saved bulk_waiter %pK for pid %d\n",
+ dev_dbg(instance->state->dev, "arm: saved bulk_waiter %p for pid %d\n",
waiter, current->pid);
}
@@ -998,6 +982,39 @@ exit:
}
int
+vchiq_platform_init_state(struct vchiq_state *state)
+{
+ struct vchiq_arm_state *platform_state;
+ char threadname[16];
+
+ platform_state = devm_kzalloc(state->dev, sizeof(*platform_state), GFP_KERNEL);
+ if (!platform_state)
+ return -ENOMEM;
+
+ snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
+ state->id);
+ platform_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func,
+ (void *)state, threadname);
+ if (IS_ERR(platform_state->ka_thread)) {
+ dev_err(state->dev, "couldn't create thread %s\n", threadname);
+ return PTR_ERR(platform_state->ka_thread);
+ }
+
+ rwlock_init(&platform_state->susp_res_lock);
+
+ init_completion(&platform_state->ka_evt);
+ atomic_set(&platform_state->ka_use_count, 0);
+ atomic_set(&platform_state->ka_use_ack_count, 0);
+ atomic_set(&platform_state->ka_release_count, 0);
+
+ platform_state->state = state;
+
+ state->platform_state = (struct opaque_platform_state *)platform_state;
+
+ return 0;
+}
+
+int
vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
enum USE_TYPE_E use_type)
{
@@ -1312,37 +1329,19 @@ out:
return ret;
}
+void vchiq_platform_connected(struct vchiq_state *state)
+{
+ struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
+
+ wake_up_process(arm_state->ka_thread);
+}
+
void vchiq_platform_conn_state_changed(struct vchiq_state *state,
enum vchiq_connstate oldstate,
enum vchiq_connstate newstate)
{
- struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
- char threadname[16];
-
dev_dbg(state->dev, "suspend: %d: %s->%s\n",
state->id, get_conn_state_name(oldstate), get_conn_state_name(newstate));
- if (state->conn_state != VCHIQ_CONNSTATE_CONNECTED)
- return;
-
- write_lock_bh(&arm_state->susp_res_lock);
- if (arm_state->first_connect) {
- write_unlock_bh(&arm_state->susp_res_lock);
- return;
- }
-
- arm_state->first_connect = 1;
- write_unlock_bh(&arm_state->susp_res_lock);
- snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
- state->id);
- arm_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func,
- (void *)state,
- threadname);
- if (IS_ERR(arm_state->ka_thread)) {
- dev_err(state->dev, "suspend: Couldn't create thread %s\n",
- threadname);
- } else {
- wake_up_process(arm_state->ka_thread);
- }
}
static const struct of_device_id vchiq_of_match[] = {
@@ -1386,8 +1385,6 @@ static int vchiq_probe(struct platform_device *pdev)
return ret;
}
- vchiq_debugfs_init(&mgmt->state);
-
dev_dbg(&pdev->dev, "arm: platform initialised - version %d (min %d)\n",
VCHIQ_VERSION, VCHIQ_VERSION_MIN);
@@ -1398,9 +1395,12 @@ static int vchiq_probe(struct platform_device *pdev)
ret = vchiq_register_chrdev(&pdev->dev);
if (ret) {
dev_err(&pdev->dev, "arm: Failed to initialize vchiq cdev\n");
+ vchiq_platform_uninit(mgmt);
return ret;
}
+ vchiq_debugfs_init(&mgmt->state);
+
bcm2835_audio = vchiq_device_register(&pdev->dev, "bcm2835-audio");
bcm2835_camera = vchiq_device_register(&pdev->dev, "bcm2835-camera");
@@ -1410,19 +1410,12 @@ static int vchiq_probe(struct platform_device *pdev)
static void vchiq_remove(struct platform_device *pdev)
{
struct vchiq_drv_mgmt *mgmt = dev_get_drvdata(&pdev->dev);
- struct vchiq_arm_state *arm_state;
vchiq_device_unregister(bcm2835_audio);
vchiq_device_unregister(bcm2835_camera);
vchiq_debugfs_deinit();
vchiq_deregister_chrdev();
-
- kthread_stop(mgmt->state.sync_thread);
- kthread_stop(mgmt->state.recycle_thread);
- kthread_stop(mgmt->state.slot_handler_thread);
-
- arm_state = vchiq_platform_get_arm_state(&mgmt->state);
- kthread_stop(arm_state->ka_thread);
+ vchiq_platform_uninit(mgmt);
}
static struct platform_driver vchiq_driver = {
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
index 8d5795db4f39..e7b0c800a205 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
@@ -470,7 +470,7 @@ make_service_callback(struct vchiq_service *service, enum vchiq_reason reason,
cb_userdata = bulk->cb_userdata;
}
- dev_dbg(service->state->dev, "core: %d: callback:%d (%s, %pK, %pK %pK)\n",
+ dev_dbg(service->state->dev, "core: %d: callback:%d (%s, %p, %p %p)\n",
service->state->id, service->localport, reason_names[reason],
header, cb_data, cb_userdata);
status = service->base.callback(service->instance, reason, header, service->handle,
@@ -778,7 +778,7 @@ process_free_data_message(struct vchiq_state *state, u32 *service_found,
complete(&quota->quota_event);
} else if (count == 0) {
dev_err(state->dev,
- "core: service %d message_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)\n",
+ "core: service %d message_use_count=%d (header %p, msgid %x, header->msgid %x, header->size %x)\n",
port, quota->message_use_count, header, msgid,
header->msgid, header->size);
WARN(1, "invalid message use count\n");
@@ -799,11 +799,11 @@ process_free_data_message(struct vchiq_state *state, u32 *service_found,
* it has dropped below its quota
*/
complete(&quota->quota_event);
- dev_dbg(state->dev, "core: %d: pfq:%d %x@%pK - slot_use->%d\n",
+ dev_dbg(state->dev, "core: %d: pfq:%d %x@%p - slot_use->%d\n",
state->id, port, header->size, header, count - 1);
} else {
dev_err(state->dev,
- "core: service %d slot_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)\n",
+ "core: service %d slot_use_count=%d (header %p, msgid %x, header->msgid %x, header->size %x)\n",
port, count, header, msgid, header->msgid, header->size);
WARN(1, "bad slot use count\n");
}
@@ -845,7 +845,7 @@ process_free_queue(struct vchiq_state *state, u32 *service_found,
*/
rmb();
- dev_dbg(state->dev, "core: %d: pfq %d=%pK %x %x\n",
+ dev_dbg(state->dev, "core: %d: pfq %d=%p %x %x\n",
state->id, slot_index, data, local->slot_queue_recycle,
slot_queue_available);
@@ -868,7 +868,7 @@ process_free_queue(struct vchiq_state *state, u32 *service_found,
pos += calc_stride(header->size);
if (pos > VCHIQ_SLOT_SIZE) {
dev_err(state->dev,
- "core: pfq - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x\n",
+ "core: pfq - pos %x: header %p, msgid %x, header->msgid %x, header->size %x\n",
pos, header, msgid, header->msgid, header->size);
WARN(1, "invalid slot position\n");
}
@@ -1060,7 +1060,7 @@ queue_message(struct vchiq_state *state, struct vchiq_service *service,
int tx_end_index;
int slot_use_count;
- dev_dbg(state->dev, "core: %d: qm %s@%pK,%zx (%d->%d)\n",
+ dev_dbg(state->dev, "core: %d: qm %s@%p,%zx (%d->%d)\n",
state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
@@ -1117,7 +1117,7 @@ queue_message(struct vchiq_state *state, struct vchiq_service *service,
VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
} else {
- dev_dbg(state->dev, "core: %d: qm %s@%pK,%zx (%d->%d)\n",
+ dev_dbg(state->dev, "core: %d: qm %s@%p,%zx (%d->%d)\n",
state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
if (size != 0) {
@@ -1204,7 +1204,7 @@ queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
state->id, oldmsgid);
}
- dev_dbg(state->dev, "sync: %d: qms %s@%pK,%x (%d->%d)\n",
+ dev_dbg(state->dev, "sync: %d: qms %s@%p,%x (%d->%d)\n",
state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
@@ -1539,7 +1539,7 @@ create_pagelist(struct vchiq_instance *instance, struct vchiq_bulk *bulk)
pagelist = dma_alloc_coherent(instance->state->dev, pagelist_size, &dma_addr,
GFP_KERNEL);
- dev_dbg(instance->state->dev, "arm: %pK\n", pagelist);
+ dev_dbg(instance->state->dev, "arm: %p\n", pagelist);
if (!pagelist)
return NULL;
@@ -1692,7 +1692,7 @@ free_pagelist(struct vchiq_instance *instance, struct vchiq_pagelist_info *pagel
unsigned int num_pages = pagelistinfo->num_pages;
unsigned int cache_line_size;
- dev_dbg(instance->state->dev, "arm: %pK, %d\n", pagelistinfo->pagelist, actual);
+ dev_dbg(instance->state->dev, "arm: %p, %d\n", pagelistinfo->pagelist, actual);
drv_mgmt = dev_get_drvdata(instance->state->dev);
@@ -1849,7 +1849,7 @@ parse_open(struct vchiq_state *state, struct vchiq_header *header)
payload = (struct vchiq_open_payload *)header->data;
fourcc = payload->fourcc;
- dev_dbg(state->dev, "core: %d: prs OPEN@%pK (%d->'%p4cc')\n",
+ dev_dbg(state->dev, "core: %d: prs OPEN@%p (%d->'%p4cc')\n",
state->id, header, localport, &fourcc);
service = get_listening_service(state, fourcc);
@@ -1976,14 +1976,14 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
service = get_connected_service(state, remoteport);
if (service)
dev_warn(state->dev,
- "core: %d: prs %s@%pK (%d->%d) - found connected service %d\n",
+ "core: %d: prs %s@%p (%d->%d) - found connected service %d\n",
state->id, msg_type_str(type), header,
remoteport, localport, service->localport);
}
if (!service) {
dev_err(state->dev,
- "core: %d: prs %s@%pK (%d->%d) - invalid/closed service %d\n",
+ "core: %d: prs %s@%p (%d->%d) - invalid/closed service %d\n",
state->id, msg_type_str(type), header, remoteport,
localport, localport);
goto skip_message;
@@ -2003,7 +2003,7 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
if (((unsigned long)header & VCHIQ_SLOT_MASK) +
calc_stride(size) > VCHIQ_SLOT_SIZE) {
- dev_err(state->dev, "core: header %pK (msgid %x) - size %x too big for slot\n",
+ dev_err(state->dev, "core: header %p (msgid %x) - size %x too big for slot\n",
header, (unsigned int)msgid, (unsigned int)size);
WARN(1, "oversized for slot\n");
}
@@ -2022,7 +2022,7 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
service->peer_version = payload->version;
}
dev_dbg(state->dev,
- "core: %d: prs OPENACK@%pK,%x (%d->%d) v:%d\n",
+ "core: %d: prs OPENACK@%p,%x (%d->%d) v:%d\n",
state->id, header, size, remoteport, localport,
service->peer_version);
if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
@@ -2037,7 +2037,7 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
case VCHIQ_MSG_CLOSE:
WARN_ON(size); /* There should be no data */
- dev_dbg(state->dev, "core: %d: prs CLOSE@%pK (%d->%d)\n",
+ dev_dbg(state->dev, "core: %d: prs CLOSE@%p (%d->%d)\n",
state->id, header, remoteport, localport);
mark_service_closing_internal(service, 1);
@@ -2049,7 +2049,7 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
&service->base.fourcc, service->localport, service->remoteport);
break;
case VCHIQ_MSG_DATA:
- dev_dbg(state->dev, "core: %d: prs DATA@%pK,%x (%d->%d)\n",
+ dev_dbg(state->dev, "core: %d: prs DATA@%p,%x (%d->%d)\n",
state->id, header, size, remoteport, localport);
if ((service->remoteport == remoteport) &&
@@ -2069,7 +2069,7 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
}
break;
case VCHIQ_MSG_CONNECT:
- dev_dbg(state->dev, "core: %d: prs CONNECT@%pK\n",
+ dev_dbg(state->dev, "core: %d: prs CONNECT@%p\n",
state->id, header);
state->version_common = ((struct vchiq_slot_zero *)
state->slot_data)->version;
@@ -2102,7 +2102,7 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
if ((int)(queue->remote_insert -
queue->local_insert) >= 0) {
dev_err(state->dev,
- "core: %d: prs %s@%pK (%d->%d) unexpected (ri=%d,li=%d)\n",
+ "core: %d: prs %s@%p (%d->%d) unexpected (ri=%d,li=%d)\n",
state->id, msg_type_str(type), header, remoteport,
localport, queue->remote_insert, queue->local_insert);
mutex_unlock(&service->bulk_mutex);
@@ -2120,7 +2120,7 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
bulk->actual = *(int *)header->data;
queue->remote_insert++;
- dev_dbg(state->dev, "core: %d: prs %s@%pK (%d->%d) %x@%pad\n",
+ dev_dbg(state->dev, "core: %d: prs %s@%p (%d->%d) %x@%pad\n",
state->id, msg_type_str(type), header, remoteport,
localport, bulk->actual, &bulk->dma_addr);
@@ -2140,12 +2140,12 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
}
break;
case VCHIQ_MSG_PADDING:
- dev_dbg(state->dev, "core: %d: prs PADDING@%pK,%x\n",
+ dev_dbg(state->dev, "core: %d: prs PADDING@%p,%x\n",
state->id, header, size);
break;
case VCHIQ_MSG_PAUSE:
/* If initiated, signal the application thread */
- dev_dbg(state->dev, "core: %d: prs PAUSE@%pK,%x\n",
+ dev_dbg(state->dev, "core: %d: prs PAUSE@%p,%x\n",
state->id, header, size);
if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
dev_err(state->dev, "core: %d: PAUSE received in state PAUSED\n",
@@ -2162,7 +2162,7 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
break;
case VCHIQ_MSG_RESUME:
- dev_dbg(state->dev, "core: %d: prs RESUME@%pK,%x\n",
+ dev_dbg(state->dev, "core: %d: prs RESUME@%p,%x\n",
state->id, header, size);
/* Release the slot mutex */
mutex_unlock(&state->slot_mutex);
@@ -2179,7 +2179,7 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
break;
default:
- dev_err(state->dev, "core: %d: prs invalid msgid %x@%pK,%x\n",
+ dev_err(state->dev, "core: %d: prs invalid msgid %x@%p,%x\n",
state->id, msgid, header, size);
WARN(1, "invalid message\n");
break;
@@ -2400,7 +2400,7 @@ sync_func(void *v)
if (!service) {
dev_err(state->dev,
- "sync: %d: sf %s@%pK (%d->%d) - invalid/closed service %d\n",
+ "sync: %d: sf %s@%p (%d->%d) - invalid/closed service %d\n",
state->id, msg_type_str(type), header, remoteport,
localport, localport);
release_message_sync(state, header);
@@ -2422,7 +2422,7 @@ sync_func(void *v)
header->data;
service->peer_version = payload->version;
}
- dev_err(state->dev, "sync: %d: sf OPENACK@%pK,%x (%d->%d) v:%d\n",
+ dev_err(state->dev, "sync: %d: sf OPENACK@%p,%x (%d->%d) v:%d\n",
state->id, header, size, remoteport, localport,
service->peer_version);
if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
@@ -2435,7 +2435,7 @@ sync_func(void *v)
break;
case VCHIQ_MSG_DATA:
- dev_dbg(state->dev, "sync: %d: sf DATA@%pK,%x (%d->%d)\n",
+ dev_dbg(state->dev, "sync: %d: sf DATA@%p,%x (%d->%d)\n",
state->id, header, size, remoteport, localport);
if ((service->remoteport == remoteport) &&
@@ -2449,7 +2449,7 @@ sync_func(void *v)
break;
default:
- dev_err(state->dev, "sync: error: %d: sf unexpected msgid %x@%pK,%x\n",
+ dev_err(state->dev, "sync: error: %d: sf unexpected msgid %x@%p,%x\n",
state->id, msgid, header, size);
release_message_sync(state, header);
break;
@@ -2926,13 +2926,13 @@ release_service_messages(struct vchiq_service *service)
int port = VCHIQ_MSG_DSTPORT(msgid);
if ((port == service->localport) && (msgid & VCHIQ_MSGID_CLAIMED)) {
- dev_dbg(state->dev, "core: fsi - hdr %pK\n", header);
+ dev_dbg(state->dev, "core: fsi - hdr %p\n", header);
release_slot(state, slot_info, header, NULL);
}
pos += calc_stride(header->size);
if (pos > VCHIQ_SLOT_SIZE) {
dev_err(state->dev,
- "core: fsi - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x\n",
+ "core: fsi - pos %x: header %p, msgid %x, header->msgid %x, header->size %x\n",
pos, header, msgid, header->msgid, header->size);
WARN(1, "invalid slot position\n");
}
@@ -3091,7 +3091,7 @@ vchiq_bulk_xfer_queue_msg_killable(struct vchiq_service *service,
*/
wmb();
- dev_dbg(state->dev, "core: %d: bt (%d->%d) %cx %x@%pad %pK\n",
+ dev_dbg(state->dev, "core: %d: bt (%d->%d) %cx %x@%pad %p\n",
state->id, service->localport, service->remoteport,
dir_char, bulk->size, &bulk->dma_addr, bulk->cb_data);
@@ -3343,6 +3343,7 @@ vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instanc
return -EAGAIN;
vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
+ vchiq_platform_connected(state);
complete(&state->connect);
}
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
index 9b4e766990a4..3b5c0618e567 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
@@ -575,6 +575,8 @@ int vchiq_send_remote_use(struct vchiq_state *state);
int vchiq_send_remote_use_active(struct vchiq_state *state);
+void vchiq_platform_connected(struct vchiq_state *state);
+
void vchiq_platform_conn_state_changed(struct vchiq_state *state,
enum vchiq_connstate oldstate,
enum vchiq_connstate newstate);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
index 454f43416503..3b20ba5c7362 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
@@ -270,7 +270,7 @@ static int vchiq_ioc_dequeue_message(struct vchiq_instance *instance,
}
} else {
dev_err(service->state->dev,
- "arm: header %pK: bufsize %x < size %x\n",
+ "arm: header %p: bufsize %x < size %x\n",
header, args->bufsize, header->size);
WARN(1, "invalid size\n");
ret = -EMSGSIZE;
@@ -328,7 +328,7 @@ static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
ret = -ESRCH;
goto out;
}
- dev_dbg(service->state->dev, "arm: found bulk_waiter %pK for pid %d\n",
+ dev_dbg(service->state->dev, "arm: found bulk_waiter %p for pid %d\n",
waiter, current->pid);
status = vchiq_bulk_xfer_waiting(instance, args->handle,
@@ -366,7 +366,7 @@ static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
mutex_lock(&instance->bulk_waiter_list_mutex);
list_add(&waiter->list, &instance->bulk_waiter_list);
mutex_unlock(&instance->bulk_waiter_list_mutex);
- dev_dbg(service->state->dev, "arm: saved bulk_waiter %pK for pid %d\n",
+ dev_dbg(service->state->dev, "arm: saved bulk_waiter %p for pid %d\n",
waiter, current->pid);
ret = put_user(mode_waiting, mode);
@@ -512,7 +512,7 @@ static int vchiq_ioc_await_completion(struct vchiq_instance *instance,
/* This must be a VCHIQ-style service */
if (args->msgbufsize < msglen) {
dev_err(service->state->dev,
- "arm: header %pK: msgbufsize %x < msglen %x\n",
+ "arm: header %p: msgbufsize %x < msglen %x\n",
header, args->msgbufsize, msglen);
WARN(1, "invalid message size\n");
if (ret == 0)
@@ -588,7 +588,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
long ret = 0;
int i, rc;
- dev_dbg(instance->state->dev, "arm: instance %pK, cmd %s, arg %lx\n", instance,
+ dev_dbg(instance->state->dev, "arm: instance %p, cmd %s, arg %lx\n", instance,
((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) && (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
@@ -874,12 +874,12 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (!status && (ret < 0) && (ret != -EINTR) && (ret != -EWOULDBLOCK)) {
dev_dbg(instance->state->dev,
- "arm: ioctl instance %pK, cmd %s -> status %d, %ld\n",
+ "arm: ioctl instance %p, cmd %s -> status %d, %ld\n",
instance, (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
ioctl_names[_IOC_NR(cmd)] : "<invalid>", status, ret);
} else {
dev_dbg(instance->state->dev,
- "arm: ioctl instance %pK, cmd %s -> status %d\n, %ld\n",
+ "arm: ioctl instance %p, cmd %s -> status %d\n, %ld\n",
instance, (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
ioctl_names[_IOC_NR(cmd)] : "<invalid>", status, ret);
}
diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c
index 1f25529fe05d..361fece3d818 100644
--- a/drivers/thunderbolt/retimer.c
+++ b/drivers/thunderbolt/retimer.c
@@ -93,9 +93,11 @@ static int tb_retimer_nvm_add(struct tb_retimer *rt)
if (ret)
goto err_nvm;
- ret = tb_nvm_add_non_active(nvm, nvm_write);
- if (ret)
- goto err_nvm;
+ if (!rt->no_nvm_upgrade) {
+ ret = tb_nvm_add_non_active(nvm, nvm_write);
+ if (ret)
+ goto err_nvm;
+ }
rt->nvm = nvm;
dev_dbg(&rt->dev, "NVM version %x.%x\n", nvm->major, nvm->minor);
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 390abcfe7188..8c527af98927 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -1305,12 +1305,16 @@ static void tb_scan_port(struct tb_port *port)
goto out_rpm_put;
}
- tb_retimer_scan(port, true);
-
sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
tb_downstream_route(port));
if (IS_ERR(sw)) {
/*
+ * Make the downstream retimers available even if there
+ * is no router connected.
+ */
+ tb_retimer_scan(port, true);
+
+ /*
* If there is an error accessing the connected switch
* it may be connected to another domain. Also we allow
* the other domain to be connected to a max depth switch.
@@ -1360,6 +1364,14 @@ static void tb_scan_port(struct tb_port *port)
tb_configure_link(port, upstream_port, sw);
/*
+ * Scan for downstream retimers. We only scan them after the
+ * router has been enumerated to avoid issues with certain
+ * Pluggable devices that expect the host to enumerate them
+ * within certain timeout.
+ */
+ tb_retimer_scan(port, true);
+
+ /*
* CL0s and CL1 are enabled and supported together.
* Silently ignore CLx enabling in case CLx is not supported.
*/
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index 717b31d78728..76254ed3f47f 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -2229,19 +2229,15 @@ struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
"USB3 Down");
- if (!path) {
- tb_tunnel_put(tunnel);
- return NULL;
- }
+ if (!path)
+ goto err_free;
tb_usb3_init_path(path);
tunnel->paths[TB_USB3_PATH_DOWN] = path;
path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
"USB3 Up");
- if (!path) {
- tb_tunnel_put(tunnel);
- return NULL;
- }
+ if (!path)
+ goto err_free;
tb_usb3_init_path(path);
tunnel->paths[TB_USB3_PATH_UP] = path;
@@ -2258,6 +2254,10 @@ struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
}
return tunnel;
+
+err_free:
+ tb_tunnel_put(tunnel);
+ return NULL;
}
/**
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index 7fb81bbaee60..149f3d53b760 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -210,7 +210,7 @@ config SERIAL_NONSTANDARD
config MOXA_INTELLIO
tristate "Moxa Intellio support"
- depends on SERIAL_NONSTANDARD && (ISA || EISA || PCI)
+ depends on SERIAL_NONSTANDARD && PCI
select FW_LOADER
help
Say Y here if you have a Moxa Intellio multiport serial card.
diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
index ebaada8db929..1348e2214b81 100644
--- a/drivers/tty/moxa.c
+++ b/drivers/tty/moxa.c
@@ -43,15 +43,6 @@
#include <linux/ratelimit.h>
#include <asm/io.h>
-#include <linux/uaccess.h>
-
-#define MOXA 0x400
-#define MOXA_GET_IQUEUE (MOXA + 1) /* get input buffered count */
-#define MOXA_GET_OQUEUE (MOXA + 2) /* get output buffered count */
-#define MOXA_GETDATACOUNT (MOXA + 23)
-#define MOXA_GET_IOQUEUE (MOXA + 27)
-#define MOXA_FLUSH_QUEUE (MOXA + 28)
-#define MOXA_GETMSTATUS (MOXA + 65)
/*
* System Configuration
@@ -347,8 +338,6 @@
#define MX_PARMARK 0xA0
#define MX_PARSPACE 0x20
-#define MOXA_VERSION "6.0k"
-
#define MOXA_FW_HDRLEN 32
#define MOXAMAJOR 172
@@ -357,33 +346,21 @@
#define MAX_PORTS_PER_BOARD 32 /* Don't change this value */
#define MAX_PORTS (MAX_BOARDS * MAX_PORTS_PER_BOARD)
-#define MOXA_IS_320(brd) ((brd)->boardType == MOXA_BOARD_C320_ISA || \
- (brd)->boardType == MOXA_BOARD_C320_PCI)
-
-/*
- * Define the Moxa PCI vendor and device IDs.
- */
-#define MOXA_BUS_TYPE_ISA 0
-#define MOXA_BUS_TYPE_PCI 1
+#define MOXA_IS_320(brd) ((brd)->boardType == MOXA_BOARD_C320_PCI)
enum {
MOXA_BOARD_C218_PCI = 1,
- MOXA_BOARD_C218_ISA,
MOXA_BOARD_C320_PCI,
- MOXA_BOARD_C320_ISA,
MOXA_BOARD_CP204J,
};
static char *moxa_brdname[] =
{
"C218 Turbo PCI series",
- "C218 Turbo ISA series",
"C320 Turbo PCI series",
- "C320 Turbo ISA series",
"CP-204J series",
};
-#ifdef CONFIG_PCI
static const struct pci_device_id moxa_pcibrds[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_C218),
.driver_data = MOXA_BOARD_C218_PCI },
@@ -394,14 +371,12 @@ static const struct pci_device_id moxa_pcibrds[] = {
{ 0 }
};
MODULE_DEVICE_TABLE(pci, moxa_pcibrds);
-#endif /* CONFIG_PCI */
struct moxa_port;
static struct moxa_board_conf {
int boardType;
int numPorts;
- int busType;
unsigned int ready;
@@ -413,19 +388,6 @@ static struct moxa_board_conf {
void __iomem *intTable;
} moxa_boards[MAX_BOARDS];
-struct mxser_mstatus {
- tcflag_t cflag;
- int cts;
- int dsr;
- int ri;
- int dcd;
-};
-
-struct moxaq_str {
- int inq;
- int outq;
-};
-
struct moxa_port {
struct tty_port port;
struct moxa_board_conf *board;
@@ -440,12 +402,6 @@ struct moxa_port {
u8 lowChkFlag;
};
-struct mon_str {
- int tick;
- int rxcnt[MAX_PORTS];
- int txcnt[MAX_PORTS];
-};
-
/* statusflags */
#define TXSTOPPED 1
#define LOWWAIT 2
@@ -455,17 +411,11 @@ struct mon_str {
#define WAKEUP_CHARS 256
static int ttymajor = MOXAMAJOR;
-static struct mon_str moxaLog;
static unsigned int moxaFuncTout = HZ / 2;
static unsigned int moxaLowWaterChk;
static DEFINE_MUTEX(moxa_openlock);
static DEFINE_SPINLOCK(moxa_lock);
-static unsigned long baseaddr[MAX_BOARDS];
-static unsigned int type[MAX_BOARDS];
-static unsigned int numports[MAX_BOARDS];
-static struct tty_port moxa_service_port;
-
MODULE_AUTHOR("William Chen");
MODULE_DESCRIPTION("MOXA Intellio Family Multiport Board Device Driver");
MODULE_LICENSE("GPL");
@@ -473,13 +423,6 @@ MODULE_FIRMWARE("c218tunx.cod");
MODULE_FIRMWARE("cp204unx.cod");
MODULE_FIRMWARE("c320tunx.cod");
-module_param_array(type, uint, NULL, 0);
-MODULE_PARM_DESC(type, "card type: C218=2, C320=4");
-module_param_hw_array(baseaddr, ulong, ioport, NULL, 0);
-MODULE_PARM_DESC(baseaddr, "base address");
-module_param_array(numports, uint, NULL, 0);
-MODULE_PARM_DESC(numports, "numports (ignored for C218)");
-
module_param(ttymajor, int, 0);
/*
@@ -583,104 +526,6 @@ static void moxa_low_water_check(void __iomem *ofsAddr)
* TTY operations
*/
-static int moxa_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg)
-{
- struct moxa_port *ch = tty->driver_data;
- void __user *argp = (void __user *)arg;
- int status, ret = 0;
-
- if (tty->index == MAX_PORTS) {
- if (cmd != MOXA_GETDATACOUNT && cmd != MOXA_GET_IOQUEUE &&
- cmd != MOXA_GETMSTATUS)
- return -EINVAL;
- } else if (!ch)
- return -ENODEV;
-
- switch (cmd) {
- case MOXA_GETDATACOUNT:
- moxaLog.tick = jiffies;
- if (copy_to_user(argp, &moxaLog, sizeof(moxaLog)))
- ret = -EFAULT;
- break;
- case MOXA_FLUSH_QUEUE:
- MoxaPortFlushData(ch, arg);
- break;
- case MOXA_GET_IOQUEUE: {
- struct moxaq_str __user *argm = argp;
- struct moxaq_str tmp;
- struct moxa_port *p;
- unsigned int i, j;
-
- for (i = 0; i < MAX_BOARDS; i++) {
- p = moxa_boards[i].ports;
- for (j = 0; j < MAX_PORTS_PER_BOARD; j++, p++, argm++) {
- memset(&tmp, 0, sizeof(tmp));
- spin_lock_bh(&moxa_lock);
- if (moxa_boards[i].ready) {
- tmp.inq = MoxaPortRxQueue(p);
- tmp.outq = MoxaPortTxQueue(p);
- }
- spin_unlock_bh(&moxa_lock);
- if (copy_to_user(argm, &tmp, sizeof(tmp)))
- return -EFAULT;
- }
- }
- break;
- } case MOXA_GET_OQUEUE:
- status = MoxaPortTxQueue(ch);
- ret = put_user(status, (unsigned long __user *)argp);
- break;
- case MOXA_GET_IQUEUE:
- status = MoxaPortRxQueue(ch);
- ret = put_user(status, (unsigned long __user *)argp);
- break;
- case MOXA_GETMSTATUS: {
- struct mxser_mstatus __user *argm = argp;
- struct mxser_mstatus tmp;
- struct moxa_port *p;
- unsigned int i, j;
-
- for (i = 0; i < MAX_BOARDS; i++) {
- p = moxa_boards[i].ports;
- for (j = 0; j < MAX_PORTS_PER_BOARD; j++, p++, argm++) {
- struct tty_struct *ttyp;
- memset(&tmp, 0, sizeof(tmp));
- spin_lock_bh(&moxa_lock);
- if (!moxa_boards[i].ready) {
- spin_unlock_bh(&moxa_lock);
- goto copy;
- }
-
- status = MoxaPortLineStatus(p);
- spin_unlock_bh(&moxa_lock);
-
- if (status & 1)
- tmp.cts = 1;
- if (status & 2)
- tmp.dsr = 1;
- if (status & 4)
- tmp.dcd = 1;
-
- ttyp = tty_port_tty_get(&p->port);
- if (!ttyp)
- tmp.cflag = p->cflag;
- else
- tmp.cflag = ttyp->termios.c_cflag;
- tty_kref_put(ttyp);
-copy:
- if (copy_to_user(argm, &tmp, sizeof(tmp)))
- return -EFAULT;
- }
- }
- break;
- }
- default:
- ret = -ENOIOCTLCMD;
- }
- return ret;
-}
-
static int moxa_break_ctl(struct tty_struct *tty, int state)
{
struct moxa_port *port = tty->driver_data;
@@ -697,7 +542,6 @@ static const struct tty_operations moxa_ops = {
.write_room = moxa_write_room,
.flush_buffer = moxa_flush_buffer,
.chars_in_buffer = moxa_chars_in_buffer,
- .ioctl = moxa_ioctl,
.set_termios = moxa_set_termios,
.stop = moxa_stop,
.start = moxa_start,
@@ -725,7 +569,6 @@ static DEFINE_TIMER(moxaTimer, moxa_poll);
static int moxa_check_fw_model(struct moxa_board_conf *brd, u8 model)
{
switch (brd->boardType) {
- case MOXA_BOARD_C218_ISA:
case MOXA_BOARD_C218_PCI:
if (model != 1)
goto err;
@@ -769,7 +612,6 @@ static int moxa_load_bios(struct moxa_board_conf *brd, const u8 *buf,
msleep(2000);
switch (brd->boardType) {
- case MOXA_BOARD_C218_ISA:
case MOXA_BOARD_C218_PCI:
tmp = readw(baseAddr + C218_key);
if (tmp != C218_KeyCode)
@@ -833,7 +675,6 @@ static int moxa_real_load_code(struct moxa_board_conf *brd, const void *ptr,
switch (brd->boardType) {
case MOXA_BOARD_CP204J:
- case MOXA_BOARD_C218_ISA:
case MOXA_BOARD_C218_PCI:
key = C218_key;
loadbuf = C218_LoadBuf;
@@ -898,15 +739,9 @@ static int moxa_real_load_code(struct moxa_board_conf *brd, const void *ptr,
return -EIO;
if (MOXA_IS_320(brd)) {
- if (brd->busType == MOXA_BUS_TYPE_PCI) { /* ASIC board */
- writew(0x3800, baseAddr + TMS320_PORT1);
- writew(0x3900, baseAddr + TMS320_PORT2);
- writew(28499, baseAddr + TMS320_CLOCK);
- } else {
- writew(0x3200, baseAddr + TMS320_PORT1);
- writew(0x3400, baseAddr + TMS320_PORT2);
- writew(19999, baseAddr + TMS320_CLOCK);
- }
+ writew(0x3800, baseAddr + TMS320_PORT1);
+ writew(0x3900, baseAddr + TMS320_PORT2);
+ writew(28499, baseAddr + TMS320_CLOCK);
}
writew(1, baseAddr + Disable_IRQ);
writew(0, baseAddr + Magic_no);
@@ -957,7 +792,6 @@ static int moxa_load_code(struct moxa_board_conf *brd, const void *ptr,
return retval;
switch (brd->boardType) {
- case MOXA_BOARD_C218_ISA:
case MOXA_BOARD_C218_PCI:
case MOXA_BOARD_CP204J:
port = brd->ports;
@@ -1141,7 +975,6 @@ static int moxa_init_board(struct moxa_board_conf *brd, struct device *dev)
}
switch (brd->boardType) {
- case MOXA_BOARD_C218_ISA:
case MOXA_BOARD_C218_PCI:
file = "c218tunx.cod";
break;
@@ -1227,7 +1060,6 @@ static void moxa_board_deinit(struct moxa_board_conf *brd)
kfree(brd->ports);
}
-#ifdef CONFIG_PCI
static int moxa_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -1270,7 +1102,6 @@ static int moxa_pci_probe(struct pci_dev *pdev,
board->boardType = board_type;
switch (board_type) {
- case MOXA_BOARD_C218_ISA:
case MOXA_BOARD_C218_PCI:
board->numPorts = 8;
break;
@@ -1282,7 +1113,6 @@ static int moxa_pci_probe(struct pci_dev *pdev,
board->numPorts = 0;
break;
}
- board->busType = MOXA_BUS_TYPE_PCI;
retval = moxa_init_board(board, &pdev->dev);
if (retval)
@@ -1318,21 +1148,12 @@ static struct pci_driver moxa_pci_driver = {
.probe = moxa_pci_probe,
.remove = moxa_pci_remove
};
-#endif /* CONFIG_PCI */
static int __init moxa_init(void)
{
- unsigned int isabrds = 0;
int retval = 0;
- struct moxa_board_conf *brd = moxa_boards;
- unsigned int i;
-
- printk(KERN_INFO "MOXA Intellio family driver version %s\n",
- MOXA_VERSION);
- tty_port_init(&moxa_service_port);
-
- moxaDriver = tty_alloc_driver(MAX_PORTS + 1,
+ moxaDriver = tty_alloc_driver(MAX_PORTS,
TTY_DRIVER_REAL_RAW |
TTY_DRIVER_DYNAMIC_DEV);
if (IS_ERR(moxaDriver))
@@ -1348,8 +1169,6 @@ static int __init moxa_init(void)
moxaDriver->init_termios.c_ispeed = 9600;
moxaDriver->init_termios.c_ospeed = 9600;
tty_set_operations(moxaDriver, &moxa_ops);
- /* Having one more port only for ioctls is ugly */
- tty_port_link_device(&moxa_service_port, moxaDriver, MAX_PORTS);
if (tty_register_driver(moxaDriver)) {
printk(KERN_ERR "can't register MOXA Smartio tty driver!\n");
@@ -1357,64 +1176,16 @@ static int __init moxa_init(void)
return -1;
}
- /* Find the boards defined from module args. */
-
- for (i = 0; i < MAX_BOARDS; i++) {
- if (!baseaddr[i])
- break;
- if (type[i] == MOXA_BOARD_C218_ISA ||
- type[i] == MOXA_BOARD_C320_ISA) {
- pr_debug("Moxa board %2d: %s board(baseAddr=%lx)\n",
- isabrds + 1, moxa_brdname[type[i] - 1],
- baseaddr[i]);
- brd->boardType = type[i];
- brd->numPorts = type[i] == MOXA_BOARD_C218_ISA ? 8 :
- numports[i];
- brd->busType = MOXA_BUS_TYPE_ISA;
- brd->basemem = ioremap(baseaddr[i], 0x4000);
- if (!brd->basemem) {
- printk(KERN_ERR "MOXA: can't remap %lx\n",
- baseaddr[i]);
- continue;
- }
- if (moxa_init_board(brd, NULL)) {
- iounmap(brd->basemem);
- brd->basemem = NULL;
- continue;
- }
-
- printk(KERN_INFO "MOXA isa board found at 0x%.8lx and "
- "ready (%u ports, firmware loaded)\n",
- baseaddr[i], brd->numPorts);
-
- brd++;
- isabrds++;
- }
- }
-
-#ifdef CONFIG_PCI
retval = pci_register_driver(&moxa_pci_driver);
- if (retval) {
+ if (retval)
printk(KERN_ERR "Can't register MOXA pci driver!\n");
- if (isabrds)
- retval = 0;
- }
-#endif
return retval;
}
static void __exit moxa_exit(void)
{
- unsigned int i;
-
-#ifdef CONFIG_PCI
pci_unregister_driver(&moxa_pci_driver);
-#endif
-
- for (i = 0; i < MAX_BOARDS; i++) /* ISA boards */
- if (moxa_boards[i].ready)
- moxa_board_deinit(&moxa_boards[i]);
del_timer_sync(&moxaTimer);
@@ -1457,9 +1228,6 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
int port;
port = tty->index;
- if (port == MAX_PORTS) {
- return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
- }
if (mutex_lock_interruptible(&moxa_openlock))
return -ERESTARTSYS;
brd = &moxa_boards[port / MAX_PORTS_PER_BOARD];
@@ -2182,7 +1950,6 @@ static ssize_t MoxaPortWriteData(struct tty_struct *tty, const u8 *buffer,
c = (head > tail) ? (head - tail - 1) : (head - tail + tx_mask);
if (c > len)
c = len;
- moxaLog.txcnt[port->port.tty->index] += c;
total = c;
if (spage == epage) {
bufhead = readw(ofsAddr + Ofs_txb);
@@ -2224,7 +1991,6 @@ static ssize_t MoxaPortWriteData(struct tty_struct *tty, const u8 *buffer,
static int MoxaPortReadData(struct moxa_port *port)
{
- struct tty_struct *tty = port->port.tty;
void __iomem *baseAddr, *ofsAddr, *ofs;
u8 *dst;
unsigned int count, len, total;
@@ -2243,7 +2009,6 @@ static int MoxaPortReadData(struct moxa_port *port)
return 0;
total = count;
- moxaLog.rxcnt[tty->index] += total;
if (spage == epage) {
bufhead = readw(ofsAddr + Ofs_rxb);
writew(spage, baseAddr + Control_reg);
@@ -2331,8 +2096,6 @@ static int moxa_get_serial_info(struct tty_struct *tty,
{
struct moxa_port *info = tty->driver_data;
- if (tty->index == MAX_PORTS)
- return -EINVAL;
if (!info)
return -ENODEV;
mutex_lock(&info->port.mutex);
@@ -2352,8 +2115,6 @@ static int moxa_set_serial_info(struct tty_struct *tty,
struct moxa_port *info = tty->driver_data;
unsigned int close_delay;
- if (tty->index == MAX_PORTS)
- return -EINVAL;
if (!info)
return -ENODEV;
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 5e9ca4376d68..6af3f3a0b531 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -56,6 +56,8 @@
*/
#define WAKEUP_CHARS 256
+#define N_TTY_BUF_SIZE 4096
+
/*
* This defines the low- and high-watermarks for throttling and
* unthrottling the TTY driver. These watermarks are used for
@@ -79,14 +81,6 @@
#define ECHO_BLOCK 256
#define ECHO_DISCARD_WATERMARK N_TTY_BUF_SIZE - (ECHO_BLOCK + 32)
-
-#undef N_TTY_TRACE
-#ifdef N_TTY_TRACE
-# define n_tty_trace(f, args...) trace_printk(f, ##args)
-#else
-# define n_tty_trace(f, args...) no_printk(f, ##args)
-#endif
-
struct n_tty_data {
/* producer-published */
size_t read_head;
@@ -486,18 +480,13 @@ static int do_output_char(u8 c, struct tty_struct *tty, int space)
static int process_output(u8 c, struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
- int space, retval;
- mutex_lock(&ldata->output_lock);
-
- space = tty_write_room(tty);
- retval = do_output_char(c, tty, space);
+ guard(mutex)(&ldata->output_lock);
- mutex_unlock(&ldata->output_lock);
- if (retval < 0)
+ if (do_output_char(c, tty, tty_write_room(tty)) < 0)
return -1;
- else
- return 0;
+
+ return 0;
}
/**
@@ -522,17 +511,15 @@ static ssize_t process_output_block(struct tty_struct *tty,
const u8 *buf, unsigned int nr)
{
struct n_tty_data *ldata = tty->disc_data;
- int space;
- int i;
+ unsigned int space, i;
const u8 *cp;
- mutex_lock(&ldata->output_lock);
+ guard(mutex)(&ldata->output_lock);
space = tty_write_room(tty);
- if (space <= 0) {
- mutex_unlock(&ldata->output_lock);
- return space;
- }
+ if (space == 0)
+ return 0;
+
if (nr > space)
nr = space;
@@ -544,18 +531,18 @@ static ssize_t process_output_block(struct tty_struct *tty,
if (O_ONLRET(tty))
ldata->column = 0;
if (O_ONLCR(tty))
- goto break_out;
+ goto do_write;
ldata->canon_column = ldata->column;
break;
case '\r':
if (O_ONOCR(tty) && ldata->column == 0)
- goto break_out;
+ goto do_write;
if (O_OCRNL(tty))
- goto break_out;
+ goto do_write;
ldata->canon_column = ldata->column = 0;
break;
case '\t':
- goto break_out;
+ goto do_write;
case '\b':
if (ldata->column > 0)
ldata->column--;
@@ -563,18 +550,15 @@ static ssize_t process_output_block(struct tty_struct *tty,
default:
if (!iscntrl(c)) {
if (O_OLCUC(tty))
- goto break_out;
+ goto do_write;
if (!is_continuation(c, tty))
ldata->column++;
}
break;
}
}
-break_out:
- i = tty->ops->write(tty, buf, i);
-
- mutex_unlock(&ldata->output_lock);
- return i;
+do_write:
+ return tty->ops->write(tty, buf, i);
}
static int n_tty_process_echo_ops(struct tty_struct *tty, size_t *tail,
@@ -696,7 +680,7 @@ static int n_tty_process_echo_ops(struct tty_struct *tty, size_t *tail,
static size_t __process_echoes(struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
- int space, old_space;
+ unsigned int space, old_space;
size_t tail;
u8 c;
@@ -2034,9 +2018,6 @@ static bool canon_copy_from_read_buf(const struct tty_struct *tty, u8 **kbp,
tail = MASK(ldata->read_tail);
size = min_t(size_t, tail + n, N_TTY_BUF_SIZE);
- n_tty_trace("%s: nr:%zu tail:%zu n:%zu size:%zu\n",
- __func__, *nr, tail, n, size);
-
eol = find_next_bit(ldata->read_flags, size, tail);
more = n - (size - tail);
if (eol == N_TTY_BUF_SIZE && more) {
@@ -2054,9 +2035,6 @@ static bool canon_copy_from_read_buf(const struct tty_struct *tty, u8 **kbp,
if (!found || read_buf(ldata, eol) != __DISABLED_CHAR)
n = c;
- n_tty_trace("%s: eol:%zu found:%d n:%zu c:%zu tail:%zu more:%zu\n",
- __func__, eol, found, n, c, tail, more);
-
tty_copy(tty, *kbp, tail, n);
*kbp += n;
*nr -= n;
@@ -2133,6 +2111,66 @@ static int job_control(struct tty_struct *tty, struct file *file)
return __tty_check_change(tty, SIGTTIN);
}
+/*
+ * We still hold the atomic_read_lock and the termios_rwsem, and can just
+ * continue to copy data.
+ */
+static ssize_t n_tty_continue_cookie(struct tty_struct *tty, u8 *kbuf,
+ size_t nr, void **cookie)
+{
+ struct n_tty_data *ldata = tty->disc_data;
+ u8 *kb = kbuf;
+
+ if (ldata->icanon && !L_EXTPROC(tty)) {
+ /*
+ * If we have filled the user buffer, see if we should skip an
+ * EOF character before releasing the lock and returning done.
+ */
+ if (!nr)
+ canon_skip_eof(ldata);
+ else if (canon_copy_from_read_buf(tty, &kb, &nr))
+ return kb - kbuf;
+ } else {
+ if (copy_from_read_buf(tty, &kb, &nr))
+ return kb - kbuf;
+ }
+
+ /* No more data - release locks and stop retries */
+ n_tty_kick_worker(tty);
+ n_tty_check_unthrottle(tty);
+ up_read(&tty->termios_rwsem);
+ mutex_unlock(&ldata->atomic_read_lock);
+ *cookie = NULL;
+
+ return kb - kbuf;
+}
+
+static int n_tty_wait_for_input(struct tty_struct *tty, struct file *file,
+ struct wait_queue_entry *wait, long *timeout)
+{
+ if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
+ return -EIO;
+ if (tty_hung_up_p(file))
+ return 0;
+ /*
+ * Abort readers for ttys which never actually get hung up.
+ * See __tty_hangup().
+ */
+ if (test_bit(TTY_HUPPING, &tty->flags))
+ return 0;
+ if (!*timeout)
+ return 0;
+ if (tty_io_nonblock(tty, file))
+ return -EAGAIN;
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+
+ up_read(&tty->termios_rwsem);
+ *timeout = wait_woken(wait, TASK_INTERRUPTIBLE, *timeout);
+ down_read(&tty->termios_rwsem);
+
+ return 1;
+}
/**
* n_tty_read - read function for tty
@@ -2166,36 +2204,9 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file, u8 *kbuf,
bool packet;
size_t old_tail;
- /*
- * Is this a continuation of a read started earler?
- *
- * If so, we still hold the atomic_read_lock and the
- * termios_rwsem, and can just continue to copy data.
- */
- if (*cookie) {
- if (ldata->icanon && !L_EXTPROC(tty)) {
- /*
- * If we have filled the user buffer, see
- * if we should skip an EOF character before
- * releasing the lock and returning done.
- */
- if (!nr)
- canon_skip_eof(ldata);
- else if (canon_copy_from_read_buf(tty, &kb, &nr))
- return kb - kbuf;
- } else {
- if (copy_from_read_buf(tty, &kb, &nr))
- return kb - kbuf;
- }
-
- /* No more data - release locks and stop retries */
- n_tty_kick_worker(tty);
- n_tty_check_unthrottle(tty);
- up_read(&tty->termios_rwsem);
- mutex_unlock(&ldata->atomic_read_lock);
- *cookie = NULL;
- return kb - kbuf;
- }
+ /* Is this a continuation of a read started earlier? */
+ if (*cookie)
+ return n_tty_continue_cookie(tty, kbuf, nr, cookie);
retval = job_control(tty, file);
if (retval < 0)
@@ -2250,34 +2261,12 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file, u8 *kbuf,
tty_buffer_flush_work(tty->port);
down_read(&tty->termios_rwsem);
if (!input_available_p(tty, 0)) {
- if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
- retval = -EIO;
+ int ret = n_tty_wait_for_input(tty, file, &wait,
+ &timeout);
+ if (ret <= 0) {
+ retval = ret;
break;
}
- if (tty_hung_up_p(file))
- break;
- /*
- * Abort readers for ttys which never actually
- * get hung up. See __tty_hangup().
- */
- if (test_bit(TTY_HUPPING, &tty->flags))
- break;
- if (!timeout)
- break;
- if (tty_io_nonblock(tty, file)) {
- retval = -EAGAIN;
- break;
- }
- if (signal_pending(current)) {
- retval = -ERESTARTSYS;
- break;
- }
- up_read(&tty->termios_rwsem);
-
- timeout = wait_woken(&wait, TASK_INTERRUPTIBLE,
- timeout);
-
- down_read(&tty->termios_rwsem);
continue;
}
}
@@ -2292,21 +2281,8 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file, u8 *kbuf,
nr--;
}
- /*
- * Copy data, and if there is more to be had
- * and we have nothing more to wait for, then
- * let's mark us for retries.
- *
- * NOTE! We return here with both the termios_sem
- * and atomic_read_lock still held, the retries
- * will release them when done.
- */
- if (copy_from_read_buf(tty, &kb, &nr) && kb - kbuf >= minimum) {
-more_to_be_read:
- remove_wait_queue(&tty->read_wait, &wait);
- *cookie = cookie;
- return kb - kbuf;
- }
+ if (copy_from_read_buf(tty, &kb, &nr) && kb - kbuf >= minimum)
+ goto more_to_be_read;
}
n_tty_check_unthrottle(tty);
@@ -2333,6 +2309,18 @@ more_to_be_read:
retval = kb - kbuf;
return retval;
+more_to_be_read:
+ /*
+ * There is more to be had and we have nothing more to wait for, so
+ * let's mark us for retries.
+ *
+ * NOTE! We return here with both the termios_sem and atomic_read_lock
+ * still held, the retries will release them when done.
+ */
+ remove_wait_queue(&tty->read_wait, &wait);
+ *cookie = cookie;
+
+ return kb - kbuf;
}
/**
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
index ebf0bbc2cff2..eb2a2e58fe78 100644
--- a/drivers/tty/serdev/core.c
+++ b/drivers/tty/serdev/core.c
@@ -316,17 +316,6 @@ void serdev_device_write_flush(struct serdev_device *serdev)
}
EXPORT_SYMBOL_GPL(serdev_device_write_flush);
-int serdev_device_write_room(struct serdev_device *serdev)
-{
- struct serdev_controller *ctrl = serdev->ctrl;
-
- if (!ctrl || !ctrl->ops->write_room)
- return 0;
-
- return serdev->ctrl->ops->write_room(ctrl);
-}
-EXPORT_SYMBOL_GPL(serdev_device_write_room);
-
unsigned int serdev_device_set_baudrate(struct serdev_device *serdev, unsigned int speed)
{
struct serdev_controller *ctrl = serdev->ctrl;
diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c
index 3d7ae7fa5018..bab1b143b8a6 100644
--- a/drivers/tty/serdev/serdev-ttyport.c
+++ b/drivers/tty/serdev/serdev-ttyport.c
@@ -92,14 +92,6 @@ static void ttyport_write_flush(struct serdev_controller *ctrl)
tty_driver_flush_buffer(tty);
}
-static int ttyport_write_room(struct serdev_controller *ctrl)
-{
- struct serport *serport = serdev_controller_get_drvdata(ctrl);
- struct tty_struct *tty = serport->tty;
-
- return tty_write_room(tty);
-}
-
static int ttyport_open(struct serdev_controller *ctrl)
{
struct serport *serport = serdev_controller_get_drvdata(ctrl);
@@ -259,7 +251,6 @@ static int ttyport_break_ctl(struct serdev_controller *ctrl, unsigned int break_
static const struct serdev_controller_ops ctrl_ops = {
.write_buf = ttyport_write_buf,
.write_flush = ttyport_write_flush,
- .write_room = ttyport_write_room,
.open = ttyport_open,
.close = ttyport_close,
.set_flow_control = ttyport_set_flow_control,
diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
index f245a84f4a50..bdd26c9f34bd 100644
--- a/drivers/tty/serial/8250/8250_dma.c
+++ b/drivers/tty/serial/8250/8250_dma.c
@@ -162,7 +162,7 @@ void serial8250_tx_dma_flush(struct uart_8250_port *p)
*/
dma->tx_size = 0;
- dmaengine_terminate_async(dma->rxchan);
+ dmaengine_terminate_async(dma->txchan);
}
int serial8250_rx_dma(struct uart_8250_port *p)
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 6afcf27db3b8..1902f29444a1 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -107,11 +107,23 @@ static inline int dw8250_modify_msr(struct uart_port *p, int offset, int value)
return value;
}
+/*
+ * This function is being called as part of the uart_port::serial_out()
+ * routine. Hence, it must not call serial_port_out() or serial_out()
+ * against the modified registers here, i.e. LCR.
+ */
static void dw8250_force_idle(struct uart_port *p)
{
struct uart_8250_port *up = up_to_u8250p(p);
unsigned int lsr;
+ /*
+ * The following call currently performs serial_out()
+ * against the FCR register. Because it differs to LCR
+ * there will be no infinite loop, but if it ever gets
+ * modified, we might need a new custom version of it
+ * that avoids infinite recursion.
+ */
serial8250_clear_and_reinit_fifos(up);
/*
@@ -120,14 +132,19 @@ static void dw8250_force_idle(struct uart_port *p)
* enabled.
*/
if (up->fcr & UART_FCR_ENABLE_FIFO) {
- lsr = p->serial_in(p, UART_LSR);
+ lsr = serial_port_in(p, UART_LSR);
if (!(lsr & UART_LSR_DR))
return;
}
- (void)p->serial_in(p, UART_RX);
+ serial_port_in(p, UART_RX);
}
+/*
+ * This function is being called as part of the uart_port::serial_out()
+ * routine. Hence, it must not call serial_port_out() or serial_out()
+ * against the modified registers here, i.e. LCR.
+ */
static void dw8250_check_lcr(struct uart_port *p, int offset, int value)
{
struct dw8250_data *d = to_dw8250_data(p->private_data);
@@ -139,7 +156,7 @@ static void dw8250_check_lcr(struct uart_port *p, int offset, int value)
/* Make sure LCR write wasn't ignored */
while (tries--) {
- unsigned int lcr = p->serial_in(p, offset);
+ unsigned int lcr = serial_port_in(p, offset);
if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR))
return;
@@ -260,7 +277,7 @@ static int dw8250_handle_irq(struct uart_port *p)
{
struct uart_8250_port *up = up_to_u8250p(p);
struct dw8250_data *d = to_dw8250_data(p->private_data);
- unsigned int iir = p->serial_in(p, UART_IIR);
+ unsigned int iir = serial_port_in(p, UART_IIR);
bool rx_timeout = (iir & 0x3f) == UART_IIR_RX_TIMEOUT;
unsigned int quirks = d->pdata->quirks;
unsigned int status;
@@ -281,7 +298,7 @@ static int dw8250_handle_irq(struct uart_port *p)
status = serial_lsr_in(up);
if (!(status & (UART_LSR_DR | UART_LSR_BI)))
- (void) p->serial_in(p, UART_RX);
+ serial_port_in(p, UART_RX);
uart_port_unlock_irqrestore(p, flags);
}
@@ -303,7 +320,7 @@ static int dw8250_handle_irq(struct uart_port *p)
if ((iir & UART_IIR_BUSY) == UART_IIR_BUSY) {
/* Clear the USR */
- (void)p->serial_in(p, d->pdata->usr_reg);
+ serial_port_in(p, d->pdata->usr_reg);
return 1;
}
@@ -390,7 +407,7 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
static void dw8250_set_ldisc(struct uart_port *p, struct ktermios *termios)
{
struct uart_8250_port *up = up_to_u8250p(p);
- unsigned int mcr = p->serial_in(p, UART_MCR);
+ unsigned int mcr = serial_port_in(p, UART_MCR);
if (up->capabilities & UART_CAP_IRDA) {
if (termios->c_line == N_IRDA)
@@ -398,7 +415,7 @@ static void dw8250_set_ldisc(struct uart_port *p, struct ktermios *termios)
else
mcr &= ~DW_UART_MCR_SIRE;
- p->serial_out(p, UART_MCR, mcr);
+ serial_port_out(p, UART_MCR, mcr);
}
serial8250_do_set_ldisc(p, termios);
}
@@ -421,6 +438,18 @@ static bool dw8250_idma_filter(struct dma_chan *chan, void *param)
return param == chan->device->dev;
}
+static void dw8250_setup_dma_filter(struct uart_port *p, struct dw8250_data *data)
+{
+ /* Platforms with iDMA 64-bit */
+ if (platform_get_resource_byname(to_platform_device(p->dev), IORESOURCE_MEM, "lpss_priv")) {
+ data->data.dma.rx_param = p->dev->parent;
+ data->data.dma.tx_param = p->dev->parent;
+ data->data.dma.fn = dw8250_idma_filter;
+ } else {
+ data->data.dma.fn = dw8250_fallback_dma_filter;
+ }
+}
+
static u32 dw8250_rzn1_get_dmacr_burst(int max_burst)
{
if (max_burst >= 8)
@@ -459,8 +488,8 @@ static void dw8250_prepare_rx_dma(struct uart_8250_port *p)
static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data)
{
- unsigned int quirks = data->pdata ? data->pdata->quirks : 0;
- u32 cpr_value = data->pdata ? data->pdata->cpr_value : 0;
+ unsigned int quirks = data->pdata->quirks;
+ u32 cpr_value = data->pdata->cpr_value;
if (quirks & DW_UART_QUIRK_CPR_VALUE)
data->data.cpr_value = cpr_value;
@@ -491,14 +520,6 @@ static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data)
p->serial_in = dw8250_serial_in32;
data->uart_16550_compatible = true;
}
-
- /* Platforms with iDMA 64-bit */
- if (platform_get_resource_byname(to_platform_device(p->dev),
- IORESOURCE_MEM, "lpss_priv")) {
- data->data.dma.rx_param = p->dev->parent;
- data->data.dma.tx_param = p->dev->parent;
- data->data.dma.fn = dw8250_idma_filter;
- }
}
static void dw8250_reset_control_assert(void *data)
@@ -520,7 +541,6 @@ static int dw8250_probe(struct platform_device *pdev)
return dev_err_probe(dev, -EINVAL, "no registers defined\n");
spin_lock_init(&p->lock);
- p->handle_irq = dw8250_handle_irq;
p->pm = dw8250_do_pm;
p->type = PORT_8250;
p->flags = UPF_FIXED_PORT;
@@ -532,13 +552,8 @@ static int dw8250_probe(struct platform_device *pdev)
if (!data)
return -ENOMEM;
- data->data.dma.fn = dw8250_fallback_dma_filter;
- data->pdata = device_get_match_data(p->dev);
p->private_data = &data->data;
- data->uart_16550_compatible = device_property_read_bool(dev,
- "snps,uart-16550-compatible");
-
p->mapbase = regs->start;
p->mapsize = resource_size(regs);
@@ -626,11 +641,19 @@ static int dw8250_probe(struct platform_device *pdev)
if (err)
return err;
- dw8250_quirks(p, data);
+ data->uart_16550_compatible = device_property_read_bool(dev, "snps,uart-16550-compatible");
+
+ data->pdata = device_get_match_data(p->dev);
+ if (data->pdata)
+ dw8250_quirks(p, data);
/* If the Busy Functionality is not implemented, don't handle it */
if (data->uart_16550_compatible)
p->handle_irq = NULL;
+ else if (data->pdata)
+ p->handle_irq = dw8250_handle_irq;
+
+ dw8250_setup_dma_filter(p, data);
if (!data->skip_autocfg)
dw8250_setup_port(p);
diff --git a/drivers/tty/serial/8250/8250_fsl.c b/drivers/tty/serial/8250/8250_fsl.c
index 1b7bd55619c6..649ae5c8304d 100644
--- a/drivers/tty/serial/8250/8250_fsl.c
+++ b/drivers/tty/serial/8250/8250_fsl.c
@@ -32,7 +32,7 @@ int fsl8250_handle_irq(struct uart_port *port)
uart_port_lock_irqsave(&up->port, &flags);
- iir = port->serial_in(port, UART_IIR);
+ iir = serial_port_in(port, UART_IIR);
if (iir & UART_IIR_NO_INT) {
uart_port_unlock_irqrestore(&up->port, flags);
return 0;
@@ -54,12 +54,12 @@ int fsl8250_handle_irq(struct uart_port *port)
if (unlikely((iir & UART_IIR_ID) == UART_IIR_RLSI &&
(up->lsr_saved_flags & UART_LSR_BI))) {
up->lsr_saved_flags &= ~UART_LSR_BI;
- port->serial_in(port, UART_RX);
+ serial_port_in(port, UART_RX);
uart_port_unlock_irqrestore(&up->port, flags);
return 1;
}
- lsr = orig_lsr = up->port.serial_in(&up->port, UART_LSR);
+ lsr = orig_lsr = serial_port_in(port, UART_LSR);
/* Process incoming characters first */
if ((lsr & (UART_LSR_DR | UART_LSR_BI)) &&
@@ -71,7 +71,7 @@ int fsl8250_handle_irq(struct uart_port *port)
if ((orig_lsr & UART_LSR_OE) && (up->overrun_backoff_time_ms > 0)) {
unsigned long delay;
- up->ier = port->serial_in(port, UART_IER);
+ up->ier = serial_port_in(port, UART_IER);
if (up->ier & (UART_IER_RLSI | UART_IER_RDI)) {
port->ops->stop_rx(port);
} else {
diff --git a/drivers/tty/serial/8250/8250_ni.c b/drivers/tty/serial/8250/8250_ni.c
new file mode 100644
index 000000000000..b10a42d2ad63
--- /dev/null
+++ b/drivers/tty/serial/8250/8250_ni.c
@@ -0,0 +1,461 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * NI 16550 UART Driver
+ *
+ * The National Instruments (NI) 16550 is a UART that is compatible with the
+ * TL16C550C and OX16C950B register interfaces, but has additional functions
+ * for RS-485 transceiver control. This driver implements support for the
+ * additional functionality on top of the standard serial8250 core.
+ *
+ * Copyright 2012-2023 National Instruments Corporation
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/clk.h>
+
+#include "8250.h"
+
+/* Extra bits in UART_ACR */
+#define NI16550_ACR_AUTO_DTR_EN BIT(4)
+
+/* TFS - TX FIFO Size */
+#define NI16550_TFS_OFFSET 0x0C
+/* RFS - RX FIFO Size */
+#define NI16550_RFS_OFFSET 0x0D
+
+/* PMR - Port Mode Register */
+#define NI16550_PMR_OFFSET 0x0E
+/* PMR[1:0] - Port Capabilities */
+#define NI16550_PMR_CAP_MASK GENMASK(1, 0)
+#define NI16550_PMR_NOT_IMPL FIELD_PREP(NI16550_PMR_CAP_MASK, 0) /* not implemented */
+#define NI16550_PMR_CAP_RS232 FIELD_PREP(NI16550_PMR_CAP_MASK, 1) /* RS-232 capable */
+#define NI16550_PMR_CAP_RS485 FIELD_PREP(NI16550_PMR_CAP_MASK, 2) /* RS-485 capable */
+#define NI16550_PMR_CAP_DUAL FIELD_PREP(NI16550_PMR_CAP_MASK, 3) /* dual-port */
+/* PMR[4] - Interface Mode */
+#define NI16550_PMR_MODE_MASK GENMASK(4, 4)
+#define NI16550_PMR_MODE_RS232 FIELD_PREP(NI16550_PMR_MODE_MASK, 0) /* currently 232 */
+#define NI16550_PMR_MODE_RS485 FIELD_PREP(NI16550_PMR_MODE_MASK, 1) /* currently 485 */
+
+/* PCR - Port Control Register */
+/*
+ * Wire Mode | Tx enabled? | Rx enabled?
+ * ---------------|----------------------|--------------------------
+ * PCR_RS422 | Always | Always
+ * PCR_ECHO_RS485 | When DTR asserted | Always
+ * PCR_DTR_RS485 | When DTR asserted | Disabled when TX enabled
+ * PCR_AUTO_RS485 | When data in TX FIFO | Disabled when TX enabled
+ */
+#define NI16550_PCR_OFFSET 0x0F
+#define NI16550_PCR_WIRE_MODE_MASK GENMASK(1, 0)
+#define NI16550_PCR_RS422 FIELD_PREP(NI16550_PCR_WIRE_MODE_MASK, 0)
+#define NI16550_PCR_ECHO_RS485 FIELD_PREP(NI16550_PCR_WIRE_MODE_MASK, 1)
+#define NI16550_PCR_DTR_RS485 FIELD_PREP(NI16550_PCR_WIRE_MODE_MASK, 2)
+#define NI16550_PCR_AUTO_RS485 FIELD_PREP(NI16550_PCR_WIRE_MODE_MASK, 3)
+#define NI16550_PCR_TXVR_ENABLE_BIT BIT(3)
+#define NI16550_PCR_RS485_TERMINATION_BIT BIT(6)
+
+/* flags for ni16550_device_info */
+#define NI_HAS_PMR BIT(0)
+
+struct ni16550_device_info {
+ u32 uartclk;
+ u8 prescaler;
+ u8 flags;
+};
+
+struct ni16550_data {
+ int line;
+ struct clk *clk;
+};
+
+static int ni16550_enable_transceivers(struct uart_port *port)
+{
+ u8 pcr;
+
+ pcr = port->serial_in(port, NI16550_PCR_OFFSET);
+ pcr |= NI16550_PCR_TXVR_ENABLE_BIT;
+ dev_dbg(port->dev, "enable transceivers: write pcr: 0x%02x\n", pcr);
+ port->serial_out(port, NI16550_PCR_OFFSET, pcr);
+
+ return 0;
+}
+
+static int ni16550_disable_transceivers(struct uart_port *port)
+{
+ u8 pcr;
+
+ pcr = port->serial_in(port, NI16550_PCR_OFFSET);
+ pcr &= ~NI16550_PCR_TXVR_ENABLE_BIT;
+ dev_dbg(port->dev, "disable transceivers: write pcr: 0x%02x\n", pcr);
+ port->serial_out(port, NI16550_PCR_OFFSET, pcr);
+
+ return 0;
+}
+
+static int ni16550_rs485_config(struct uart_port *port,
+ struct ktermios *termios,
+ struct serial_rs485 *rs485)
+{
+ struct uart_8250_port *up = container_of(port, struct uart_8250_port, port);
+ u8 pcr;
+
+ pcr = serial_in(up, NI16550_PCR_OFFSET);
+ pcr &= ~NI16550_PCR_WIRE_MODE_MASK;
+
+ if ((rs485->flags & SER_RS485_MODE_RS422) ||
+ !(rs485->flags & SER_RS485_ENABLED)) {
+ /* RS-422 */
+ pcr |= NI16550_PCR_RS422;
+ up->acr &= ~NI16550_ACR_AUTO_DTR_EN;
+ } else {
+ /* RS-485 2-wire Auto */
+ pcr |= NI16550_PCR_AUTO_RS485;
+ up->acr |= NI16550_ACR_AUTO_DTR_EN;
+ }
+
+ dev_dbg(port->dev, "config rs485: write pcr: 0x%02x, acr: %02x\n", pcr, up->acr);
+ serial_out(up, NI16550_PCR_OFFSET, pcr);
+ serial_icr_write(up, UART_ACR, up->acr);
+
+ return 0;
+}
+
+static bool is_pmr_rs232_mode(struct uart_8250_port *up)
+{
+ u8 pmr = serial_in(up, NI16550_PMR_OFFSET);
+ u8 pmr_mode = pmr & NI16550_PMR_MODE_MASK;
+ u8 pmr_cap = pmr & NI16550_PMR_CAP_MASK;
+
+ /*
+ * If the PMR is not implemented, then by default NI UARTs are
+ * connected to RS-485 transceivers
+ */
+ if (pmr_cap == NI16550_PMR_NOT_IMPL)
+ return false;
+
+ if (pmr_cap == NI16550_PMR_CAP_DUAL)
+ /*
+ * If the port is dual-mode capable, then read the mode bit
+ * to know the current mode
+ */
+ return pmr_mode == NI16550_PMR_MODE_RS232;
+ /*
+ * If it is not dual-mode capable, then decide based on the
+ * capability
+ */
+ return pmr_cap == NI16550_PMR_CAP_RS232;
+}
+
+static void ni16550_config_prescaler(struct uart_8250_port *up,
+ u8 prescaler)
+{
+ /*
+ * Page in the Enhanced Mode Registers
+ * Sets EFR[4] for Enhanced Mode.
+ */
+ u8 lcr_value;
+ u8 efr_value;
+
+ lcr_value = serial_in(up, UART_LCR);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+
+ efr_value = serial_in(up, UART_EFR);
+ efr_value |= UART_EFR_ECB;
+
+ serial_out(up, UART_EFR, efr_value);
+
+ /* Page out the Enhanced Mode Registers */
+ serial_out(up, UART_LCR, lcr_value);
+
+ /* Set prescaler to CPR register. */
+ serial_out(up, UART_SCR, UART_CPR);
+ serial_out(up, UART_ICR, prescaler);
+}
+
+static const struct serial_rs485 ni16550_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_MODE_RS422 | SER_RS485_RTS_ON_SEND |
+ SER_RS485_RTS_AFTER_SEND,
+ /*
+ * delay_rts_* and RX_DURING_TX are not supported.
+ *
+ * RTS_{ON,AFTER}_SEND are supported, but ignored; the transceiver
+ * is connected in only one way and we don't need userspace to tell
+ * us, but want to retain compatibility with applications that do.
+ */
+};
+
+static void ni16550_rs485_setup(struct uart_port *port)
+{
+ port->rs485_config = ni16550_rs485_config;
+ port->rs485_supported = ni16550_rs485_supported;
+ /*
+ * The hardware comes up by default in 2-wire auto mode and we
+ * set the flags to represent that
+ */
+ port->rs485.flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND;
+}
+
+static int ni16550_port_startup(struct uart_port *port)
+{
+ int ret;
+
+ ret = serial8250_do_startup(port);
+ if (ret)
+ return ret;
+
+ return ni16550_enable_transceivers(port);
+}
+
+static void ni16550_port_shutdown(struct uart_port *port)
+{
+ ni16550_disable_transceivers(port);
+
+ serial8250_do_shutdown(port);
+}
+
+static int ni16550_get_regs(struct platform_device *pdev,
+ struct uart_port *port)
+{
+ struct resource *regs;
+
+ regs = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (regs) {
+ port->iotype = UPIO_PORT;
+ port->iobase = regs->start;
+
+ return 0;
+ }
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (regs) {
+ port->iotype = UPIO_MEM;
+ port->mapbase = regs->start;
+ port->mapsize = resource_size(regs);
+ port->flags |= UPF_IOREMAP;
+
+ port->membase = devm_ioremap(&pdev->dev, port->mapbase,
+ port->mapsize);
+ if (!port->membase)
+ return -ENOMEM;
+
+ return 0;
+ }
+
+ dev_err(&pdev->dev, "no registers defined\n");
+ return -EINVAL;
+}
+
+/*
+ * Very old implementations don't have the TFS or RFS registers
+ * defined, so we may read all-0s or all-1s. For such devices,
+ * assume a FIFO size of 128.
+ */
+static u8 ni16550_read_fifo_size(struct uart_8250_port *uart, int reg)
+{
+ u8 value = serial_in(uart, reg);
+
+ if (value == 0x00 || value == 0xFF)
+ return 128;
+
+ return value;
+}
+
+static void ni16550_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ struct uart_8250_port *up = up_to_u8250p(port);
+
+ up->mcr |= UART_MCR_CLKSEL;
+ serial8250_do_set_mctrl(port, mctrl);
+}
+
+static int ni16550_probe(struct platform_device *pdev)
+{
+ const struct ni16550_device_info *info;
+ struct device *dev = &pdev->dev;
+ struct uart_8250_port uart = {};
+ unsigned int txfifosz, rxfifosz;
+ unsigned int prescaler = 0;
+ struct ni16550_data *data;
+ const char *portmode;
+ bool rs232_property;
+ int ret;
+ int irq;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ spin_lock_init(&uart.port.lock);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = ni16550_get_regs(pdev, &uart.port);
+ if (ret < 0)
+ return ret;
+
+ /* early setup so that serial_in()/serial_out() work */
+ serial8250_set_defaults(&uart);
+
+ info = device_get_match_data(dev);
+
+ uart.port.dev = dev;
+ uart.port.irq = irq;
+ uart.port.irqflags = IRQF_SHARED;
+ uart.port.flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF
+ | UPF_FIXED_PORT | UPF_FIXED_TYPE;
+ uart.port.startup = ni16550_port_startup;
+ uart.port.shutdown = ni16550_port_shutdown;
+
+ /*
+ * Hardware instantiation of FIFO sizes are held in registers.
+ */
+ txfifosz = ni16550_read_fifo_size(&uart, NI16550_TFS_OFFSET);
+ rxfifosz = ni16550_read_fifo_size(&uart, NI16550_RFS_OFFSET);
+
+ dev_dbg(dev, "NI 16550 has TX FIFO size %u, RX FIFO size %u\n",
+ txfifosz, rxfifosz);
+
+ uart.port.type = PORT_16550A;
+ uart.port.fifosize = txfifosz;
+ uart.tx_loadsz = txfifosz;
+ uart.fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10;
+ uart.capabilities = UART_CAP_FIFO | UART_CAP_AFE | UART_CAP_EFR;
+
+ /*
+ * Declaration of the base clock frequency can come from one of:
+ * - static declaration in this driver (for older ACPI IDs)
+ * - a "clock-frquency" ACPI
+ */
+ if (info->uartclk)
+ uart.port.uartclk = info->uartclk;
+ if (device_property_read_u32(dev, "clock-frequency",
+ &uart.port.uartclk)) {
+ data->clk = devm_clk_get_enabled(dev, NULL);
+ if (!IS_ERR(data->clk))
+ uart.port.uartclk = clk_get_rate(data->clk);
+ }
+
+ if (!uart.port.uartclk) {
+ dev_err(dev, "unable to determine clock frequency!\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ if (info->prescaler)
+ prescaler = info->prescaler;
+ device_property_read_u32(dev, "clock-prescaler", &prescaler);
+
+ if (prescaler != 0) {
+ uart.port.set_mctrl = ni16550_set_mctrl;
+ ni16550_config_prescaler(&uart, (u8)prescaler);
+ }
+
+ /*
+ * The determination of whether or not this is an RS-485 or RS-232 port
+ * can come from the PMR (if present), otherwise we're solely an RS-485
+ * port.
+ *
+ * This is a device-specific property, and there are old devices in the
+ * field using "transceiver" as an ACPI property, so we have to check
+ * for that as well.
+ */
+ if (!device_property_read_string(dev, "transceiver", &portmode)) {
+ rs232_property = strncmp(portmode, "RS-232", 6) == 0;
+
+ dev_dbg(dev, "port is in %s mode (via device property)\n",
+ rs232_property ? "RS-232" : "RS-485");
+ } else if (info->flags & NI_HAS_PMR) {
+ rs232_property = is_pmr_rs232_mode(&uart);
+
+ dev_dbg(dev, "port is in %s mode (via PMR)\n",
+ rs232_property ? "RS-232" : "RS-485");
+ } else {
+ rs232_property = 0;
+
+ dev_dbg(dev, "port is fixed as RS-485\n");
+ }
+
+ if (!rs232_property) {
+ /*
+ * Neither the 'transceiver' property nor the PMR indicate
+ * that this is an RS-232 port, so it must be an RS-485 one.
+ */
+ ni16550_rs485_setup(&uart.port);
+ }
+
+ ret = serial8250_register_8250_port(&uart);
+ if (ret < 0)
+ goto err;
+ data->line = ret;
+
+ platform_set_drvdata(pdev, data);
+ return 0;
+
+err:
+ return ret;
+}
+
+static void ni16550_remove(struct platform_device *pdev)
+{
+ struct ni16550_data *data = platform_get_drvdata(pdev);
+
+ serial8250_unregister_port(data->line);
+}
+
+#ifdef CONFIG_ACPI
+/* NI 16550 RS-485 Interface */
+static const struct ni16550_device_info nic7750 = {
+ .uartclk = 33333333,
+};
+
+/* NI CVS-145x RS-485 Interface */
+static const struct ni16550_device_info nic7772 = {
+ .uartclk = 1843200,
+ .flags = NI_HAS_PMR,
+};
+
+/* NI cRIO-904x RS-485 Interface */
+static const struct ni16550_device_info nic792b = {
+ /* Sets UART clock rate to 22.222 MHz with 1.125 prescale */
+ .uartclk = 22222222,
+ .prescaler = 0x09,
+};
+
+/* NI sbRIO 96x8 RS-232/485 Interfaces */
+static const struct ni16550_device_info nic7a69 = {
+ /* Set UART clock rate to 29.629 MHz with 1.125 prescale */
+ .uartclk = 29629629,
+ .prescaler = 0x09,
+};
+static const struct acpi_device_id ni16550_acpi_match[] = {
+ { "NIC7750", (kernel_ulong_t)&nic7750 },
+ { "NIC7772", (kernel_ulong_t)&nic7772 },
+ { "NIC792B", (kernel_ulong_t)&nic792b },
+ { "NIC7A69", (kernel_ulong_t)&nic7a69 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, ni16550_acpi_match);
+#endif
+
+static struct platform_driver ni16550_driver = {
+ .driver = {
+ .name = "ni16550",
+ .acpi_match_table = ACPI_PTR(ni16550_acpi_match),
+ },
+ .probe = ni16550_probe,
+ .remove = ni16550_remove,
+};
+
+module_platform_driver(ni16550_driver);
+
+MODULE_AUTHOR("Emerson Electric Co.");
+MODULE_DESCRIPTION("NI 16550 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index c2b75e3f106d..2a0ce11f405d 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -692,7 +692,7 @@ static irqreturn_t omap8250_irq(int irq, void *dev_id)
/* Synchronize UART_IER access against the console. */
uart_port_lock(port);
- up->ier = port->serial_in(port, UART_IER);
+ up->ier = serial_port_in(port, UART_IER);
if (up->ier & (UART_IER_RLSI | UART_IER_RDI)) {
port->ops->stop_rx(port);
} else {
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index df4d0d832e54..73c200127b08 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -2728,6 +2728,22 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
.setup = pci_oxsemi_tornado_setup,
},
{
+ .vendor = PCI_VENDOR_ID_INTASHIELD,
+ .device = 0x4026,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_oxsemi_tornado_init,
+ .setup = pci_oxsemi_tornado_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTASHIELD,
+ .device = 0x4021,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = pci_oxsemi_tornado_init,
+ .setup = pci_oxsemi_tornado_setup,
+ },
+ {
.vendor = PCI_VENDOR_ID_INTEL,
.device = 0x8811,
.subvendor = PCI_ANY_ID,
@@ -5253,6 +5269,14 @@ static const struct pci_device_id serial_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0BA2,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0BA3,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
/*
* Brainboxes UC-235/246
*/
@@ -5373,6 +5397,14 @@ static const struct pci_device_id serial_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_4_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0C42,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_4_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0C43,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_4_115200 },
/*
* Brainboxes UC-420
*/
@@ -5599,6 +5631,20 @@ static const struct pci_device_id serial_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_oxsemi_1_15625000 },
+ /*
+ * Brainboxes XC-235
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x4026,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_1_15625000 },
+ /*
+ * Brainboxes XC-475
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x4021,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_oxsemi_1_15625000 },
/*
* Perle PCI-RAS cards
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index c57f44882abb..8ac452cea36c 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -1678,7 +1678,7 @@ static void serial8250_disable_ms(struct uart_port *port)
if (up->bugs & UART_BUG_NOMSR)
return;
- mctrl_gpio_disable_ms(up->gpios);
+ mctrl_gpio_disable_ms_no_sync(up->gpios);
up->ier &= ~UART_IER_MSI;
serial_port_out(port, UART_IER, up->ier);
@@ -2406,28 +2406,26 @@ int serial8250_do_startup(struct uart_port *port)
* test if we receive TX irq. This way, we'll never enable
* UART_BUG_TXEN.
*/
- if (up->port.quirks & UPQ_NO_TXEN_TEST)
- goto dont_test_tx_en;
-
- /*
- * Do a quick test to see if we receive an interrupt when we enable
- * the TX irq.
- */
- serial_port_out(port, UART_IER, UART_IER_THRI);
- lsr = serial_port_in(port, UART_LSR);
- iir = serial_port_in(port, UART_IIR);
- serial_port_out(port, UART_IER, 0);
+ if (!(up->port.quirks & UPQ_NO_TXEN_TEST)) {
+ /*
+ * Do a quick test to see if we receive an interrupt when we
+ * enable the TX irq.
+ */
+ serial_port_out(port, UART_IER, UART_IER_THRI);
+ lsr = serial_port_in(port, UART_LSR);
+ iir = serial_port_in(port, UART_IIR);
+ serial_port_out(port, UART_IER, 0);
- if (lsr & UART_LSR_TEMT && iir & UART_IIR_NO_INT) {
- if (!(up->bugs & UART_BUG_TXEN)) {
- up->bugs |= UART_BUG_TXEN;
- dev_dbg(port->dev, "enabling bad tx status workarounds\n");
+ if (lsr & UART_LSR_TEMT && iir & UART_IIR_NO_INT) {
+ if (!(up->bugs & UART_BUG_TXEN)) {
+ up->bugs |= UART_BUG_TXEN;
+ dev_dbg(port->dev, "enabling bad tx status workarounds\n");
+ }
+ } else {
+ up->bugs &= ~UART_BUG_TXEN;
}
- } else {
- up->bugs &= ~UART_BUG_TXEN;
}
-dont_test_tx_en:
uart_port_unlock_irqrestore(port, flags);
/*
@@ -2968,7 +2966,6 @@ static int serial8250_request_std_resource(struct uart_8250_port *up)
{
unsigned int size = serial8250_port_size(up);
struct uart_port *port = &up->port;
- int ret = 0;
switch (port->iotype) {
case UPIO_AU:
@@ -2977,32 +2974,28 @@ static int serial8250_request_std_resource(struct uart_8250_port *up)
case UPIO_MEM32BE:
case UPIO_MEM16:
case UPIO_MEM:
- if (!port->mapbase) {
- ret = -EINVAL;
- break;
- }
+ if (!port->mapbase)
+ return -EINVAL;
- if (!request_mem_region(port->mapbase, size, "serial")) {
- ret = -EBUSY;
- break;
- }
+ if (!request_mem_region(port->mapbase, size, "serial"))
+ return -EBUSY;
if (port->flags & UPF_IOREMAP) {
port->membase = ioremap(port->mapbase, size);
if (!port->membase) {
release_mem_region(port->mapbase, size);
- ret = -ENOMEM;
+ return -ENOMEM;
}
}
- break;
-
+ return 0;
case UPIO_HUB6:
case UPIO_PORT:
if (!request_region(port->iobase, size, "serial"))
- ret = -EBUSY;
- break;
+ return -EBUSY;
+ return 0;
}
- return ret;
+
+ return 0;
}
static void serial8250_release_std_resource(struct uart_8250_port *up)
diff --git a/drivers/tty/serial/8250/8250_rsa.c b/drivers/tty/serial/8250/8250_rsa.c
index dfaa613e452d..82f2593b4c59 100644
--- a/drivers/tty/serial/8250/8250_rsa.c
+++ b/drivers/tty/serial/8250/8250_rsa.c
@@ -16,30 +16,27 @@ static unsigned int probe_rsa_count;
static int rsa8250_request_resource(struct uart_8250_port *up)
{
- unsigned long start = UART_RSA_BASE << up->port.regshift;
- unsigned int size = 8 << up->port.regshift;
struct uart_port *port = &up->port;
- int ret = -EINVAL;
+ unsigned long start = UART_RSA_BASE << port->regshift;
+ unsigned int size = 8 << port->regshift;
switch (port->iotype) {
case UPIO_HUB6:
case UPIO_PORT:
start += port->iobase;
- if (request_region(start, size, "serial-rsa"))
- ret = 0;
- else
- ret = -EBUSY;
- break;
+ if (!request_region(start, size, "serial-rsa"))
+ return -EBUSY;
+ return 0;
+ default:
+ return -EINVAL;
}
-
- return ret;
}
static void rsa8250_release_resource(struct uart_8250_port *up)
{
- unsigned long offset = UART_RSA_BASE << up->port.regshift;
- unsigned int size = 8 << up->port.regshift;
struct uart_port *port = &up->port;
+ unsigned long offset = UART_RSA_BASE << port->regshift;
+ unsigned int size = 8 << port->regshift;
switch (port->iotype) {
case UPIO_HUB6:
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index 55d26d16df9b..bd3d636ff962 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -569,6 +569,19 @@ config SERIAL_8250_BCM7271
including DMA support and high accuracy BAUD rates, say
Y to this option. If unsure, say N.
+config SERIAL_8250_NI
+ tristate "NI 16550 based serial port"
+ depends on SERIAL_8250
+ depends on (X86 && ACPI) || COMPILE_TEST
+ help
+ This driver supports the integrated serial ports on National
+ Instruments (NI) controller hardware. This is required for all NI
+ controller models with onboard RS-485 or dual-mode RS-485/RS-232
+ ports.
+
+ To compile this driver as a module, choose M here: the module
+ will be called 8250_ni.
+
config SERIAL_OF_PLATFORM
tristate "Devicetree based probing for 8250 ports"
depends on SERIAL_8250 && OF
diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile
index 1516de629b61..b04eeda03b23 100644
--- a/drivers/tty/serial/8250/Makefile
+++ b/drivers/tty/serial/8250/Makefile
@@ -40,6 +40,7 @@ obj-$(CONFIG_SERIAL_8250_LPSS) += 8250_lpss.o
obj-$(CONFIG_SERIAL_8250_MEN_MCB) += 8250_men_mcb.o
obj-$(CONFIG_SERIAL_8250_MID) += 8250_mid.o
obj-$(CONFIG_SERIAL_8250_MT6577) += 8250_mtk.o
+obj-$(CONFIG_SERIAL_8250_NI) += 8250_ni.o
obj-$(CONFIG_SERIAL_OF_PLATFORM) += 8250_of.o
obj-$(CONFIG_SERIAL_8250_OMAP) += 8250_omap.o
obj-$(CONFIG_SERIAL_8250_PARISC) += 8250_parisc.o
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 976dae3bb1bb..79a8186d3361 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -179,25 +179,6 @@ config SERIAL_ATMEL_TTYAT
Say Y if you have an external 8250/16C550 UART. If unsure, say N.
-config SERIAL_KGDB_NMI
- bool "Serial console over KGDB NMI debugger port"
- depends on KGDB_SERIAL_CONSOLE
- help
- This special driver allows you to temporary use NMI debugger port
- as a normal console (assuming that the port is attached to KGDB).
-
- Unlike KDB's disable_nmi command, with this driver you are always
- able to go back to the debugger using KGDB escape sequence ($3#33).
- This is because this console driver processes the input in NMI
- context, and thus is able to intercept the magic sequence.
-
- Note that since the console interprets input and uses polling
- communication methods, for things like PPP you still must fully
- detach debugger port from the KGDB NMI (i.e. disable_nmi), and
- use raw console.
-
- If unsure, say N.
-
config SERIAL_MESON
tristate "Meson serial port support"
depends on ARCH_MESON || COMPILE_TEST
@@ -306,6 +287,29 @@ config SERIAL_TEGRA_TCU_CONSOLE
If unsure, say Y.
+config SERIAL_TEGRA_UTC
+ tristate "NVIDIA Tegra UART Trace Controller"
+ depends on ARCH_TEGRA || COMPILE_TEST
+ select SERIAL_CORE
+ help
+ Support for Tegra UTC (UART Trace controller) client serial port.
+
+ UTC is a HW based serial port that allows multiplexing multiple data
+ streams of up to 16 UTC clients into a single hardware serial port.
+
+config SERIAL_TEGRA_UTC_CONSOLE
+ bool "Support for console on a Tegra UTC serial port"
+ depends on SERIAL_TEGRA_UTC
+ select SERIAL_CORE_CONSOLE
+ default SERIAL_TEGRA_UTC
+ help
+ If you say Y here, it will be possible to use a Tegra UTC client as
+ the system console (the system console is the device which receives
+ all kernel messages and warnings and which allows logins in single
+ user mode).
+
+ If unsure, say Y.
+
config SERIAL_MAX3100
tristate "MAX3100/3110/3111/3222 support"
depends on SPI
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index 6ff74f0a9530..d58d9f719889 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -86,6 +86,7 @@ obj-$(CONFIG_SERIAL_STM32) += stm32-usart.o
obj-$(CONFIG_SERIAL_SUNPLUS) += sunplus-uart.o
obj-$(CONFIG_SERIAL_TEGRA) += serial-tegra.o
obj-$(CONFIG_SERIAL_TEGRA_TCU) += tegra-tcu.o
+obj-$(CONFIG_SERIAL_TEGRA_UTC) += tegra-utc.o
obj-$(CONFIG_SERIAL_TIMBERDALE) += timbuart.o
obj-$(CONFIG_SERIAL_TXX9) += serial_txx9.o
obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o
@@ -96,6 +97,5 @@ obj-$(CONFIG_SERIAL_ZS) += zs.o
# GPIOLIB helpers for modem control lines
obj-$(CONFIG_SERIAL_MCTRL_GPIO) += serial_mctrl_gpio.o
-obj-$(CONFIG_SERIAL_KGDB_NMI) += kgdb_nmi.o
obj-$(CONFIG_KGDB_SERIAL_CONSOLE) += kgdboc.o
obj-$(CONFIG_SERIAL_NUVOTON_MA35D1) += ma35d1_serial.o
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 98f178bdbcbe..dc092204b472 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -272,6 +272,7 @@ struct uart_amba_port {
enum pl011_rs485_tx_state rs485_tx_state;
struct hrtimer trigger_start_tx;
struct hrtimer trigger_stop_tx;
+ bool console_line_ended;
#ifdef CONFIG_DMA_ENGINE
/* DMA stuff */
unsigned int dmacr; /* dma control reg */
@@ -2366,50 +2367,7 @@ static void pl011_console_putchar(struct uart_port *port, unsigned char ch)
while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
cpu_relax();
pl011_write(ch, uap, REG_DR);
-}
-
-static void
-pl011_console_write(struct console *co, const char *s, unsigned int count)
-{
- struct uart_amba_port *uap = amba_ports[co->index];
- unsigned int old_cr = 0, new_cr;
- unsigned long flags;
- int locked = 1;
-
- clk_enable(uap->clk);
-
- if (oops_in_progress)
- locked = uart_port_trylock_irqsave(&uap->port, &flags);
- else
- uart_port_lock_irqsave(&uap->port, &flags);
-
- /*
- * First save the CR then disable the interrupts
- */
- if (!uap->vendor->always_enabled) {
- old_cr = pl011_read(uap, REG_CR);
- new_cr = old_cr & ~UART011_CR_CTSEN;
- new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
- pl011_write(new_cr, uap, REG_CR);
- }
-
- uart_console_write(&uap->port, s, count, pl011_console_putchar);
-
- /*
- * Finally, wait for transmitter to become empty and restore the
- * TCR. Allow feature register bits to be inverted to work around
- * errata.
- */
- while ((pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr)
- & uap->vendor->fr_busy)
- cpu_relax();
- if (!uap->vendor->always_enabled)
- pl011_write(old_cr, uap, REG_CR);
-
- if (locked)
- uart_port_unlock_irqrestore(&uap->port, flags);
-
- clk_disable(uap->clk);
+ uap->console_line_ended = (ch == '\n');
}
static void pl011_console_get_options(struct uart_amba_port *uap, int *baud,
@@ -2472,6 +2430,8 @@ static int pl011_console_setup(struct console *co, char *options)
if (ret)
return ret;
+ uap->console_line_ended = true;
+
if (dev_get_platdata(uap->port.dev)) {
struct amba_pl011_data *plat;
@@ -2555,14 +2515,105 @@ static int pl011_console_match(struct console *co, char *name, int idx,
return -ENODEV;
}
+static void
+pl011_console_write_atomic(struct console *co, struct nbcon_write_context *wctxt)
+{
+ struct uart_amba_port *uap = amba_ports[co->index];
+ unsigned int old_cr = 0;
+
+ if (!nbcon_enter_unsafe(wctxt))
+ return;
+
+ clk_enable(uap->clk);
+
+ if (!uap->vendor->always_enabled) {
+ old_cr = pl011_read(uap, REG_CR);
+ pl011_write((old_cr & ~UART011_CR_CTSEN) | (UART01x_CR_UARTEN | UART011_CR_TXE),
+ uap, REG_CR);
+ }
+
+ if (!uap->console_line_ended)
+ uart_console_write(&uap->port, "\n", 1, pl011_console_putchar);
+ uart_console_write(&uap->port, wctxt->outbuf, wctxt->len, pl011_console_putchar);
+
+ while ((pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr) & uap->vendor->fr_busy)
+ cpu_relax();
+
+ if (!uap->vendor->always_enabled)
+ pl011_write(old_cr, uap, REG_CR);
+
+ clk_disable(uap->clk);
+
+ nbcon_exit_unsafe(wctxt);
+}
+
+static void
+pl011_console_write_thread(struct console *co, struct nbcon_write_context *wctxt)
+{
+ struct uart_amba_port *uap = amba_ports[co->index];
+ unsigned int old_cr = 0;
+
+ if (!nbcon_enter_unsafe(wctxt))
+ return;
+
+ clk_enable(uap->clk);
+
+ if (!uap->vendor->always_enabled) {
+ old_cr = pl011_read(uap, REG_CR);
+ pl011_write((old_cr & ~UART011_CR_CTSEN) | (UART01x_CR_UARTEN | UART011_CR_TXE),
+ uap, REG_CR);
+ }
+
+ if (nbcon_exit_unsafe(wctxt)) {
+ int i;
+ unsigned int len = READ_ONCE(wctxt->len);
+
+ for (i = 0; i < len; i++) {
+ if (!nbcon_enter_unsafe(wctxt))
+ break;
+ uart_console_write(&uap->port, wctxt->outbuf + i, 1, pl011_console_putchar);
+ if (!nbcon_exit_unsafe(wctxt))
+ break;
+ }
+ }
+
+ while (!nbcon_enter_unsafe(wctxt))
+ nbcon_reacquire_nobuf(wctxt);
+
+ while ((pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr) & uap->vendor->fr_busy)
+ cpu_relax();
+
+ if (!uap->vendor->always_enabled)
+ pl011_write(old_cr, uap, REG_CR);
+
+ clk_disable(uap->clk);
+
+ nbcon_exit_unsafe(wctxt);
+}
+
+static void
+pl011_console_device_lock(struct console *co, unsigned long *flags)
+{
+ __uart_port_lock_irqsave(&amba_ports[co->index]->port, flags);
+}
+
+static void
+pl011_console_device_unlock(struct console *co, unsigned long flags)
+{
+ __uart_port_unlock_irqrestore(&amba_ports[co->index]->port, flags);
+}
+
static struct uart_driver amba_reg;
static struct console amba_console = {
.name = "ttyAMA",
- .write = pl011_console_write,
.device = uart_console_device,
.setup = pl011_console_setup,
.match = pl011_console_match,
- .flags = CON_PRINTBUFFER | CON_ANYTIME,
+ .write_atomic = pl011_console_write_atomic,
+ .write_thread = pl011_console_write_thread,
+ .device_lock = pl011_console_device_lock,
+ .device_unlock = pl011_console_device_unlock,
+ .flags = CON_PRINTBUFFER | CON_ANYTIME | CON_NBCON,
.index = -1,
.data = &amba_reg,
};
@@ -3000,7 +3051,7 @@ static const struct of_device_id sbsa_uart_of_match[] = {
};
MODULE_DEVICE_TABLE(of, sbsa_uart_of_match);
-static const struct acpi_device_id __maybe_unused sbsa_uart_acpi_match[] = {
+static const struct acpi_device_id sbsa_uart_acpi_match[] = {
{ "ARMH0011", 0 },
{ "ARMHB000", 0 },
{},
@@ -3013,8 +3064,8 @@ static struct platform_driver arm_sbsa_uart_platform_driver = {
.driver = {
.name = "sbsa-uart",
.pm = &pl011_dev_pm_ops,
- .of_match_table = of_match_ptr(sbsa_uart_of_match),
- .acpi_match_table = ACPI_PTR(sbsa_uart_acpi_match),
+ .of_match_table = sbsa_uart_of_match,
+ .acpi_match_table = sbsa_uart_acpi_match,
.suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_AMBA_PL011),
},
};
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index f44f9d20a974..8918fbd4bddd 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -700,7 +700,7 @@ static void atmel_disable_ms(struct uart_port *port)
atmel_port->ms_irq_enabled = false;
- mctrl_gpio_disable_ms(atmel_port->gpios);
+ mctrl_gpio_disable_ms_no_sync(atmel_port->gpios);
if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS))
idr |= ATMEL_US_CTSIC;
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index c91b9d9818cd..4470966b826c 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -441,36 +441,36 @@ static unsigned int lpuart_get_baud_clk_rate(struct lpuart_port *sport)
static void lpuart_stop_tx(struct uart_port *port)
{
- unsigned char temp;
+ u8 cr2;
- temp = readb(port->membase + UARTCR2);
- temp &= ~(UARTCR2_TIE | UARTCR2_TCIE);
- writeb(temp, port->membase + UARTCR2);
+ cr2 = readb(port->membase + UARTCR2);
+ cr2 &= ~(UARTCR2_TIE | UARTCR2_TCIE);
+ writeb(cr2, port->membase + UARTCR2);
}
static void lpuart32_stop_tx(struct uart_port *port)
{
- unsigned long temp;
+ u32 ctrl;
- temp = lpuart32_read(port, UARTCTRL);
- temp &= ~(UARTCTRL_TIE | UARTCTRL_TCIE);
- lpuart32_write(port, temp, UARTCTRL);
+ ctrl = lpuart32_read(port, UARTCTRL);
+ ctrl &= ~(UARTCTRL_TIE | UARTCTRL_TCIE);
+ lpuart32_write(port, ctrl, UARTCTRL);
}
static void lpuart_stop_rx(struct uart_port *port)
{
- unsigned char temp;
+ u8 cr2;
- temp = readb(port->membase + UARTCR2);
- writeb(temp & ~UARTCR2_RE, port->membase + UARTCR2);
+ cr2 = readb(port->membase + UARTCR2);
+ writeb(cr2 & ~UARTCR2_RE, port->membase + UARTCR2);
}
static void lpuart32_stop_rx(struct uart_port *port)
{
- unsigned long temp;
+ u32 ctrl;
- temp = lpuart32_read(port, UARTCTRL);
- lpuart32_write(port, temp & ~UARTCTRL_RE, UARTCTRL);
+ ctrl = lpuart32_read(port, UARTCTRL);
+ lpuart32_write(port, ctrl & ~UARTCTRL_RE, UARTCTRL);
}
static void lpuart_dma_tx(struct lpuart_port *sport)
@@ -581,7 +581,7 @@ static int lpuart_dma_tx_request(struct uart_port *port)
ret = dmaengine_slave_config(sport->dma_tx_chan, &dma_tx_sconfig);
if (ret) {
- dev_err(sport->port.dev,
+ dev_err(port->dev,
"DMA slave config failed, err = %d\n", ret);
return ret;
}
@@ -599,7 +599,7 @@ static void lpuart_flush_buffer(struct uart_port *port)
{
struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
struct dma_chan *chan = sport->dma_tx_chan;
- u32 val;
+ u32 fifo;
if (sport->lpuart_dma_tx_use) {
if (sport->dma_tx_in_progress) {
@@ -611,13 +611,13 @@ static void lpuart_flush_buffer(struct uart_port *port)
}
if (lpuart_is_32(sport)) {
- val = lpuart32_read(&sport->port, UARTFIFO);
- val |= UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH;
- lpuart32_write(&sport->port, val, UARTFIFO);
+ fifo = lpuart32_read(port, UARTFIFO);
+ fifo |= UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH;
+ lpuart32_write(port, fifo, UARTFIFO);
} else {
- val = readb(sport->port.membase + UARTCFIFO);
- val |= UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH;
- writeb(val, sport->port.membase + UARTCFIFO);
+ fifo = readb(port->membase + UARTCFIFO);
+ fifo |= UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH;
+ writeb(fifo, port->membase + UARTCFIFO);
}
}
@@ -639,38 +639,36 @@ static void lpuart32_wait_bit_set(struct uart_port *port, unsigned int offset,
static int lpuart_poll_init(struct uart_port *port)
{
- struct lpuart_port *sport = container_of(port,
- struct lpuart_port, port);
unsigned long flags;
- unsigned char temp;
+ u8 fifo;
- sport->port.fifosize = 0;
+ port->fifosize = 0;
- uart_port_lock_irqsave(&sport->port, &flags);
+ uart_port_lock_irqsave(port, &flags);
/* Disable Rx & Tx */
- writeb(0, sport->port.membase + UARTCR2);
+ writeb(0, port->membase + UARTCR2);
- temp = readb(sport->port.membase + UARTPFIFO);
+ fifo = readb(port->membase + UARTPFIFO);
/* Enable Rx and Tx FIFO */
- writeb(temp | UARTPFIFO_RXFE | UARTPFIFO_TXFE,
- sport->port.membase + UARTPFIFO);
+ writeb(fifo | UARTPFIFO_RXFE | UARTPFIFO_TXFE,
+ port->membase + UARTPFIFO);
/* flush Tx and Rx FIFO */
writeb(UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH,
- sport->port.membase + UARTCFIFO);
+ port->membase + UARTCFIFO);
/* explicitly clear RDRF */
- if (readb(sport->port.membase + UARTSR1) & UARTSR1_RDRF) {
- readb(sport->port.membase + UARTDR);
- writeb(UARTSFIFO_RXUF, sport->port.membase + UARTSFIFO);
+ if (readb(port->membase + UARTSR1) & UARTSR1_RDRF) {
+ readb(port->membase + UARTDR);
+ writeb(UARTSFIFO_RXUF, port->membase + UARTSFIFO);
}
- writeb(0, sport->port.membase + UARTTWFIFO);
- writeb(1, sport->port.membase + UARTRWFIFO);
+ writeb(0, port->membase + UARTTWFIFO);
+ writeb(1, port->membase + UARTRWFIFO);
/* Enable Rx and Tx */
- writeb(UARTCR2_RE | UARTCR2_TE, sport->port.membase + UARTCR2);
- uart_port_unlock_irqrestore(&sport->port, flags);
+ writeb(UARTCR2_RE | UARTCR2_TE, port->membase + UARTCR2);
+ uart_port_unlock_irqrestore(port, flags);
return 0;
}
@@ -693,33 +691,32 @@ static int lpuart_poll_get_char(struct uart_port *port)
static int lpuart32_poll_init(struct uart_port *port)
{
unsigned long flags;
- struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
- u32 temp;
+ u32 fifo;
- sport->port.fifosize = 0;
+ port->fifosize = 0;
- uart_port_lock_irqsave(&sport->port, &flags);
+ uart_port_lock_irqsave(port, &flags);
/* Disable Rx & Tx */
- lpuart32_write(&sport->port, 0, UARTCTRL);
+ lpuart32_write(port, 0, UARTCTRL);
- temp = lpuart32_read(&sport->port, UARTFIFO);
+ fifo = lpuart32_read(port, UARTFIFO);
/* Enable Rx and Tx FIFO */
- lpuart32_write(&sport->port, temp | UARTFIFO_RXFE | UARTFIFO_TXFE, UARTFIFO);
+ lpuart32_write(port, fifo | UARTFIFO_RXFE | UARTFIFO_TXFE, UARTFIFO);
/* flush Tx and Rx FIFO */
- lpuart32_write(&sport->port, UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH, UARTFIFO);
+ lpuart32_write(port, UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH, UARTFIFO);
/* explicitly clear RDRF */
- if (lpuart32_read(&sport->port, UARTSTAT) & UARTSTAT_RDRF) {
- lpuart32_read(&sport->port, UARTDATA);
- lpuart32_write(&sport->port, UARTFIFO_RXUF, UARTFIFO);
+ if (lpuart32_read(port, UARTSTAT) & UARTSTAT_RDRF) {
+ lpuart32_read(port, UARTDATA);
+ lpuart32_write(port, UARTFIFO_RXUF, UARTFIFO);
}
/* Enable Rx and Tx */
- lpuart32_write(&sport->port, UARTCTRL_RE | UARTCTRL_TE, UARTCTRL);
- uart_port_unlock_irqrestore(&sport->port, flags);
+ lpuart32_write(port, UARTCTRL_RE | UARTCTRL_TE, UARTCTRL);
+ uart_port_unlock_irqrestore(port, flags);
return 0;
}
@@ -752,7 +749,7 @@ static inline void lpuart_transmit_buffer(struct lpuart_port *sport)
static inline void lpuart32_transmit_buffer(struct lpuart_port *sport)
{
struct tty_port *tport = &sport->port.state->port;
- unsigned long txcnt;
+ u32 txcnt;
unsigned char c;
if (sport->port.x_char) {
@@ -789,10 +786,10 @@ static void lpuart_start_tx(struct uart_port *port)
{
struct lpuart_port *sport = container_of(port,
struct lpuart_port, port);
- unsigned char temp;
+ u8 cr2;
- temp = readb(port->membase + UARTCR2);
- writeb(temp | UARTCR2_TIE, port->membase + UARTCR2);
+ cr2 = readb(port->membase + UARTCR2);
+ writeb(cr2 | UARTCR2_TIE, port->membase + UARTCR2);
if (sport->lpuart_dma_tx_use) {
if (!lpuart_stopped_or_empty(port))
@@ -806,14 +803,14 @@ static void lpuart_start_tx(struct uart_port *port)
static void lpuart32_start_tx(struct uart_port *port)
{
struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
- unsigned long temp;
+ u32 ctrl;
if (sport->lpuart_dma_tx_use) {
if (!lpuart_stopped_or_empty(port))
lpuart_dma_tx(sport);
} else {
- temp = lpuart32_read(port, UARTCTRL);
- lpuart32_write(port, temp | UARTCTRL_TIE, UARTCTRL);
+ ctrl = lpuart32_read(port, UARTCTRL);
+ lpuart32_write(port, ctrl | UARTCTRL_TIE, UARTCTRL);
if (lpuart32_read(port, UARTSTAT) & UARTSTAT_TDRE)
lpuart32_transmit_buffer(sport);
@@ -839,8 +836,8 @@ static unsigned int lpuart_tx_empty(struct uart_port *port)
{
struct lpuart_port *sport = container_of(port,
struct lpuart_port, port);
- unsigned char sr1 = readb(port->membase + UARTSR1);
- unsigned char sfifo = readb(port->membase + UARTSFIFO);
+ u8 sr1 = readb(port->membase + UARTSR1);
+ u8 sfifo = readb(port->membase + UARTSFIFO);
if (sport->dma_tx_in_progress)
return 0;
@@ -855,9 +852,9 @@ static unsigned int lpuart32_tx_empty(struct uart_port *port)
{
struct lpuart_port *sport = container_of(port,
struct lpuart_port, port);
- unsigned long stat = lpuart32_read(port, UARTSTAT);
- unsigned long sfifo = lpuart32_read(port, UARTFIFO);
- unsigned long ctrl = lpuart32_read(port, UARTCTRL);
+ u32 stat = lpuart32_read(port, UARTSTAT);
+ u32 sfifo = lpuart32_read(port, UARTFIFO);
+ u32 ctrl = lpuart32_read(port, UARTCTRL);
if (sport->dma_tx_in_progress)
return 0;
@@ -884,7 +881,7 @@ static void lpuart_rxint(struct lpuart_port *sport)
{
unsigned int flg, ignored = 0, overrun = 0;
struct tty_port *port = &sport->port.state->port;
- unsigned char rx, sr;
+ u8 rx, sr;
uart_port_lock(&sport->port);
@@ -961,7 +958,7 @@ static void lpuart32_rxint(struct lpuart_port *sport)
{
unsigned int flg, ignored = 0;
struct tty_port *port = &sport->port.state->port;
- unsigned long rx, sr;
+ u32 rx, sr;
bool is_break;
uart_port_lock(&sport->port);
@@ -1039,7 +1036,7 @@ out:
static irqreturn_t lpuart_int(int irq, void *dev_id)
{
struct lpuart_port *sport = dev_id;
- unsigned char sts;
+ u8 sts;
sts = readb(sport->port.membase + UARTSR1);
@@ -1113,7 +1110,7 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
int count, copied;
if (lpuart_is_32(sport)) {
- unsigned long sr = lpuart32_read(&sport->port, UARTSTAT);
+ u32 sr = lpuart32_read(&sport->port, UARTSTAT);
if (sr & (UARTSTAT_PE | UARTSTAT_FE)) {
/* Clear the error flags */
@@ -1125,10 +1122,10 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
sport->port.icount.frame++;
}
} else {
- unsigned char sr = readb(sport->port.membase + UARTSR1);
+ u8 sr = readb(sport->port.membase + UARTSR1);
if (sr & (UARTSR1_PE | UARTSR1_FE)) {
- unsigned char cr2;
+ u8 cr2;
/* Disable receiver during this operation... */
cr2 = readb(sport->port.membase + UARTCR2);
@@ -1279,7 +1276,7 @@ static void lpuart32_dma_idleint(struct lpuart_port *sport)
static irqreturn_t lpuart32_int(int irq, void *dev_id)
{
struct lpuart_port *sport = dev_id;
- unsigned long sts, rxcount;
+ u32 sts, rxcount;
sts = lpuart32_read(&sport->port, UARTSTAT);
rxcount = lpuart32_read(&sport->port, UARTWATER);
@@ -1411,12 +1408,12 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
dma_async_issue_pending(chan);
if (lpuart_is_32(sport)) {
- unsigned long temp = lpuart32_read(&sport->port, UARTBAUD);
+ u32 baud = lpuart32_read(&sport->port, UARTBAUD);
- lpuart32_write(&sport->port, temp | UARTBAUD_RDMAE, UARTBAUD);
+ lpuart32_write(&sport->port, baud | UARTBAUD_RDMAE, UARTBAUD);
if (sport->dma_idle_int) {
- unsigned long ctrl = lpuart32_read(&sport->port, UARTCTRL);
+ u32 ctrl = lpuart32_read(&sport->port, UARTCTRL);
lpuart32_write(&sport->port, ctrl | UARTCTRL_ILIE, UARTCTRL);
}
@@ -1449,12 +1446,9 @@ static void lpuart_dma_rx_free(struct uart_port *port)
static int lpuart_config_rs485(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
- struct lpuart_port *sport = container_of(port,
- struct lpuart_port, port);
-
- u8 modem = readb(sport->port.membase + UARTMODEM) &
+ u8 modem = readb(port->membase + UARTMODEM) &
~(UARTMODEM_TXRTSPOL | UARTMODEM_TXRTSE);
- writeb(modem, sport->port.membase + UARTMODEM);
+ writeb(modem, port->membase + UARTMODEM);
if (rs485->flags & SER_RS485_ENABLED) {
/* Enable auto RS-485 RTS mode */
@@ -1472,19 +1466,29 @@ static int lpuart_config_rs485(struct uart_port *port, struct ktermios *termios,
modem &= ~UARTMODEM_TXRTSPOL;
}
- writeb(modem, sport->port.membase + UARTMODEM);
+ writeb(modem, port->membase + UARTMODEM);
return 0;
}
static int lpuart32_config_rs485(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485)
{
- struct lpuart_port *sport = container_of(port,
- struct lpuart_port, port);
-
- unsigned long modem = lpuart32_read(&sport->port, UARTMODIR)
+ u32 modem = lpuart32_read(port, UARTMODIR)
& ~(UARTMODIR_TXRTSPOL | UARTMODIR_TXRTSE);
- lpuart32_write(&sport->port, modem, UARTMODIR);
+ u32 ctrl;
+
+ /* TXRTSE and TXRTSPOL only can be changed when transmitter is disabled. */
+ ctrl = lpuart32_read(port, UARTCTRL);
+ if (ctrl & UARTCTRL_TE) {
+ /* wait for the transmit engine to complete */
+ lpuart32_wait_bit_set(port, UARTSTAT, UARTSTAT_TC);
+ lpuart32_write(port, ctrl & ~UARTCTRL_TE, UARTCTRL);
+
+ while (lpuart32_read(port, UARTCTRL) & UARTCTRL_TE)
+ cpu_relax();
+ }
+
+ lpuart32_write(port, modem, UARTMODIR);
if (rs485->flags & SER_RS485_ENABLED) {
/* Enable auto RS-485 RTS mode */
@@ -1502,17 +1506,21 @@ static int lpuart32_config_rs485(struct uart_port *port, struct ktermios *termio
modem &= ~UARTMODIR_TXRTSPOL;
}
- lpuart32_write(&sport->port, modem, UARTMODIR);
+ lpuart32_write(port, modem, UARTMODIR);
+
+ if (ctrl & UARTCTRL_TE)
+ lpuart32_write(port, ctrl, UARTCTRL);
+
return 0;
}
static unsigned int lpuart_get_mctrl(struct uart_port *port)
{
unsigned int mctrl = 0;
- u8 reg;
+ u8 cr1;
- reg = readb(port->membase + UARTCR1);
- if (reg & UARTCR1_LOOPS)
+ cr1 = readb(port->membase + UARTCR1);
+ if (cr1 & UARTCR1_LOOPS)
mctrl |= TIOCM_LOOP;
return mctrl;
@@ -1521,10 +1529,10 @@ static unsigned int lpuart_get_mctrl(struct uart_port *port)
static unsigned int lpuart32_get_mctrl(struct uart_port *port)
{
unsigned int mctrl = TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
- u32 reg;
+ u32 ctrl;
- reg = lpuart32_read(port, UARTCTRL);
- if (reg & UARTCTRL_LOOPS)
+ ctrl = lpuart32_read(port, UARTCTRL);
+ if (ctrl & UARTCTRL_LOOPS)
mctrl |= TIOCM_LOOP;
return mctrl;
@@ -1532,49 +1540,49 @@ static unsigned int lpuart32_get_mctrl(struct uart_port *port)
static void lpuart_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
- u8 reg;
+ u8 cr1;
- reg = readb(port->membase + UARTCR1);
+ cr1 = readb(port->membase + UARTCR1);
/* for internal loopback we need LOOPS=1 and RSRC=0 */
- reg &= ~(UARTCR1_LOOPS | UARTCR1_RSRC);
+ cr1 &= ~(UARTCR1_LOOPS | UARTCR1_RSRC);
if (mctrl & TIOCM_LOOP)
- reg |= UARTCR1_LOOPS;
+ cr1 |= UARTCR1_LOOPS;
- writeb(reg, port->membase + UARTCR1);
+ writeb(cr1, port->membase + UARTCR1);
}
static void lpuart32_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
- u32 reg;
+ u32 ctrl;
- reg = lpuart32_read(port, UARTCTRL);
+ ctrl = lpuart32_read(port, UARTCTRL);
/* for internal loopback we need LOOPS=1 and RSRC=0 */
- reg &= ~(UARTCTRL_LOOPS | UARTCTRL_RSRC);
+ ctrl &= ~(UARTCTRL_LOOPS | UARTCTRL_RSRC);
if (mctrl & TIOCM_LOOP)
- reg |= UARTCTRL_LOOPS;
+ ctrl |= UARTCTRL_LOOPS;
- lpuart32_write(port, reg, UARTCTRL);
+ lpuart32_write(port, ctrl, UARTCTRL);
}
static void lpuart_break_ctl(struct uart_port *port, int break_state)
{
- unsigned char temp;
+ u8 cr2;
- temp = readb(port->membase + UARTCR2) & ~UARTCR2_SBK;
+ cr2 = readb(port->membase + UARTCR2) & ~UARTCR2_SBK;
if (break_state != 0)
- temp |= UARTCR2_SBK;
+ cr2 |= UARTCR2_SBK;
- writeb(temp, port->membase + UARTCR2);
+ writeb(cr2, port->membase + UARTCR2);
}
static void lpuart32_break_ctl(struct uart_port *port, int break_state)
{
- unsigned long temp;
+ u32 ctrl;
- temp = lpuart32_read(port, UARTCTRL);
+ ctrl = lpuart32_read(port, UARTCTRL);
/*
* LPUART IP now has two known bugs, one is CTS has higher priority than the
@@ -1591,23 +1599,22 @@ static void lpuart32_break_ctl(struct uart_port *port, int break_state)
* Disable the transmitter to prevent any data from being sent out
* during break, then invert the TX line to send break.
*/
- temp &= ~UARTCTRL_TE;
- lpuart32_write(port, temp, UARTCTRL);
- temp |= UARTCTRL_TXINV;
- lpuart32_write(port, temp, UARTCTRL);
+ ctrl &= ~UARTCTRL_TE;
+ lpuart32_write(port, ctrl, UARTCTRL);
+ ctrl |= UARTCTRL_TXINV;
+ lpuart32_write(port, ctrl, UARTCTRL);
} else {
/* Disable the TXINV to turn off break and re-enable transmitter. */
- temp &= ~UARTCTRL_TXINV;
- lpuart32_write(port, temp, UARTCTRL);
- temp |= UARTCTRL_TE;
- lpuart32_write(port, temp, UARTCTRL);
+ ctrl &= ~UARTCTRL_TXINV;
+ lpuart32_write(port, ctrl, UARTCTRL);
+ ctrl |= UARTCTRL_TE;
+ lpuart32_write(port, ctrl, UARTCTRL);
}
}
static void lpuart_setup_watermark(struct lpuart_port *sport)
{
- unsigned char val, cr2;
- unsigned char cr2_saved;
+ u8 fifo, cr2, cr2_saved;
cr2 = readb(sport->port.membase + UARTCR2);
cr2_saved = cr2;
@@ -1615,8 +1622,8 @@ static void lpuart_setup_watermark(struct lpuart_port *sport)
UARTCR2_RIE | UARTCR2_RE);
writeb(cr2, sport->port.membase + UARTCR2);
- val = readb(sport->port.membase + UARTPFIFO);
- writeb(val | UARTPFIFO_TXFE | UARTPFIFO_RXFE,
+ fifo = readb(sport->port.membase + UARTPFIFO);
+ writeb(fifo | UARTPFIFO_TXFE | UARTPFIFO_RXFE,
sport->port.membase + UARTPFIFO);
/* flush Tx and Rx FIFO */
@@ -1640,7 +1647,7 @@ static void lpuart_setup_watermark(struct lpuart_port *sport)
static void lpuart_setup_watermark_enable(struct lpuart_port *sport)
{
- unsigned char cr2;
+ u8 cr2;
lpuart_setup_watermark(sport);
@@ -1651,8 +1658,7 @@ static void lpuart_setup_watermark_enable(struct lpuart_port *sport)
static void lpuart32_setup_watermark(struct lpuart_port *sport)
{
- unsigned long val, ctrl;
- unsigned long ctrl_saved;
+ u32 val, ctrl, ctrl_saved;
ctrl = lpuart32_read(&sport->port, UARTCTRL);
ctrl_saved = ctrl;
@@ -1687,14 +1693,14 @@ static void lpuart32_setup_watermark(struct lpuart_port *sport)
static void lpuart32_setup_watermark_enable(struct lpuart_port *sport)
{
- u32 temp;
+ u32 ctrl;
lpuart32_setup_watermark(sport);
- temp = lpuart32_read(&sport->port, UARTCTRL);
- temp |= UARTCTRL_RE | UARTCTRL_TE;
- temp |= FIELD_PREP(UARTCTRL_IDLECFG, 0x7);
- lpuart32_write(&sport->port, temp, UARTCTRL);
+ ctrl = lpuart32_read(&sport->port, UARTCTRL);
+ ctrl |= UARTCTRL_RE | UARTCTRL_TE;
+ ctrl |= FIELD_PREP(UARTCTRL_IDLECFG, 0x7);
+ lpuart32_write(&sport->port, ctrl, UARTCTRL);
}
static void rx_dma_timer_init(struct lpuart_port *sport)
@@ -1761,7 +1767,7 @@ err:
static void lpuart_rx_dma_startup(struct lpuart_port *sport)
{
int ret;
- unsigned char cr3;
+ u8 cr3;
if (uart_console(&sport->port))
goto err;
@@ -1811,16 +1817,16 @@ static void lpuart_hw_setup(struct lpuart_port *sport)
static int lpuart_startup(struct uart_port *port)
{
struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
- unsigned char temp;
+ u8 fifo;
/* determine FIFO size and enable FIFO mode */
- temp = readb(sport->port.membase + UARTPFIFO);
+ fifo = readb(port->membase + UARTPFIFO);
- sport->txfifo_size = UARTFIFO_DEPTH((temp >> UARTPFIFO_TXSIZE_OFF) &
+ sport->txfifo_size = UARTFIFO_DEPTH((fifo >> UARTPFIFO_TXSIZE_OFF) &
UARTPFIFO_FIFOSIZE_MASK);
- sport->port.fifosize = sport->txfifo_size;
+ port->fifosize = sport->txfifo_size;
- sport->rxfifo_size = UARTFIFO_DEPTH((temp >> UARTPFIFO_RXSIZE_OFF) &
+ sport->rxfifo_size = UARTFIFO_DEPTH((fifo >> UARTPFIFO_RXSIZE_OFF) &
UARTPFIFO_FIFOSIZE_MASK);
lpuart_request_dma(sport);
@@ -1831,24 +1837,24 @@ static int lpuart_startup(struct uart_port *port)
static void lpuart32_hw_disable(struct lpuart_port *sport)
{
- unsigned long temp;
+ u32 ctrl;
- temp = lpuart32_read(&sport->port, UARTCTRL);
- temp &= ~(UARTCTRL_RIE | UARTCTRL_ILIE | UARTCTRL_RE |
+ ctrl = lpuart32_read(&sport->port, UARTCTRL);
+ ctrl &= ~(UARTCTRL_RIE | UARTCTRL_ILIE | UARTCTRL_RE |
UARTCTRL_TIE | UARTCTRL_TE);
- lpuart32_write(&sport->port, temp, UARTCTRL);
+ lpuart32_write(&sport->port, ctrl, UARTCTRL);
}
static void lpuart32_configure(struct lpuart_port *sport)
{
- unsigned long temp;
+ u32 ctrl;
- temp = lpuart32_read(&sport->port, UARTCTRL);
+ ctrl = lpuart32_read(&sport->port, UARTCTRL);
if (!sport->lpuart_dma_rx_use)
- temp |= UARTCTRL_RIE | UARTCTRL_ILIE;
+ ctrl |= UARTCTRL_RIE | UARTCTRL_ILIE;
if (!sport->lpuart_dma_tx_use)
- temp |= UARTCTRL_TIE;
- lpuart32_write(&sport->port, temp, UARTCTRL);
+ ctrl |= UARTCTRL_TIE;
+ lpuart32_write(&sport->port, ctrl, UARTCTRL);
}
static void lpuart32_hw_setup(struct lpuart_port *sport)
@@ -1871,16 +1877,16 @@ static void lpuart32_hw_setup(struct lpuart_port *sport)
static int lpuart32_startup(struct uart_port *port)
{
struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
- unsigned long temp;
+ u32 fifo;
/* determine FIFO size */
- temp = lpuart32_read(&sport->port, UARTFIFO);
+ fifo = lpuart32_read(port, UARTFIFO);
- sport->txfifo_size = UARTFIFO_DEPTH((temp >> UARTFIFO_TXSIZE_OFF) &
+ sport->txfifo_size = UARTFIFO_DEPTH((fifo >> UARTFIFO_TXSIZE_OFF) &
UARTFIFO_FIFOSIZE_MASK);
- sport->port.fifosize = sport->txfifo_size;
+ port->fifosize = sport->txfifo_size;
- sport->rxfifo_size = UARTFIFO_DEPTH((temp >> UARTFIFO_RXSIZE_OFF) &
+ sport->rxfifo_size = UARTFIFO_DEPTH((fifo >> UARTFIFO_RXSIZE_OFF) &
UARTFIFO_FIFOSIZE_MASK);
/*
@@ -1891,7 +1897,7 @@ static int lpuart32_startup(struct uart_port *port)
if (is_layerscape_lpuart(sport)) {
sport->rxfifo_size = 16;
sport->txfifo_size = 16;
- sport->port.fifosize = sport->txfifo_size;
+ port->fifosize = sport->txfifo_size;
}
lpuart_request_dma(sport);
@@ -1925,16 +1931,16 @@ static void lpuart_dma_shutdown(struct lpuart_port *sport)
static void lpuart_shutdown(struct uart_port *port)
{
struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
- unsigned char temp;
+ u8 cr2;
unsigned long flags;
uart_port_lock_irqsave(port, &flags);
/* disable Rx/Tx and interrupts */
- temp = readb(port->membase + UARTCR2);
- temp &= ~(UARTCR2_TE | UARTCR2_RE |
+ cr2 = readb(port->membase + UARTCR2);
+ cr2 &= ~(UARTCR2_TE | UARTCR2_RE |
UARTCR2_TIE | UARTCR2_TCIE | UARTCR2_RIE);
- writeb(temp, port->membase + UARTCR2);
+ writeb(cr2, port->membase + UARTCR2);
uart_port_unlock_irqrestore(port, flags);
@@ -1945,14 +1951,14 @@ static void lpuart32_shutdown(struct uart_port *port)
{
struct lpuart_port *sport =
container_of(port, struct lpuart_port, port);
- unsigned long temp;
+ u32 temp;
unsigned long flags;
uart_port_lock_irqsave(port, &flags);
/* clear status */
- temp = lpuart32_read(&sport->port, UARTSTAT);
- lpuart32_write(&sport->port, temp, UARTSTAT);
+ temp = lpuart32_read(port, UARTSTAT);
+ lpuart32_write(port, temp, UARTSTAT);
/* disable Rx/Tx DMA */
temp = lpuart32_read(port, UARTBAUD);
@@ -1981,17 +1987,17 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
{
struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
unsigned long flags;
- unsigned char cr1, old_cr1, old_cr2, cr3, cr4, bdh, modem;
+ u8 cr1, old_cr1, old_cr2, cr3, cr4, bdh, modem;
unsigned int baud;
unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;
unsigned int sbr, brfa;
- cr1 = old_cr1 = readb(sport->port.membase + UARTCR1);
- old_cr2 = readb(sport->port.membase + UARTCR2);
- cr3 = readb(sport->port.membase + UARTCR3);
- cr4 = readb(sport->port.membase + UARTCR4);
- bdh = readb(sport->port.membase + UARTBDH);
- modem = readb(sport->port.membase + UARTMODEM);
+ cr1 = old_cr1 = readb(port->membase + UARTCR1);
+ old_cr2 = readb(port->membase + UARTCR2);
+ cr3 = readb(port->membase + UARTCR3);
+ cr4 = readb(port->membase + UARTCR4);
+ bdh = readb(port->membase + UARTBDH);
+ modem = readb(port->membase + UARTMODEM);
/*
* only support CS8 and CS7, and for CS7 must enable PE.
* supported mode:
@@ -2023,7 +2029,7 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
* When auto RS-485 RTS mode is enabled,
* hardware flow control need to be disabled.
*/
- if (sport->port.rs485.flags & SER_RS485_ENABLED)
+ if (port->rs485.flags & SER_RS485_ENABLED)
termios->c_cflag &= ~CRTSCTS;
if (termios->c_cflag & CRTSCTS)
@@ -2064,59 +2070,59 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
* Need to update the Ring buffer length according to the selected
* baud rate and restart Rx DMA path.
*
- * Since timer function acqures sport->port.lock, need to stop before
+ * Since timer function acqures port->lock, need to stop before
* acquring same lock because otherwise del_timer_sync() can deadlock.
*/
if (old && sport->lpuart_dma_rx_use)
- lpuart_dma_rx_free(&sport->port);
+ lpuart_dma_rx_free(port);
- uart_port_lock_irqsave(&sport->port, &flags);
+ uart_port_lock_irqsave(port, &flags);
- sport->port.read_status_mask = 0;
+ port->read_status_mask = 0;
if (termios->c_iflag & INPCK)
- sport->port.read_status_mask |= UARTSR1_FE | UARTSR1_PE;
+ port->read_status_mask |= UARTSR1_FE | UARTSR1_PE;
if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
- sport->port.read_status_mask |= UARTSR1_FE;
+ port->read_status_mask |= UARTSR1_FE;
/* characters to ignore */
- sport->port.ignore_status_mask = 0;
+ port->ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
- sport->port.ignore_status_mask |= UARTSR1_PE;
+ port->ignore_status_mask |= UARTSR1_PE;
if (termios->c_iflag & IGNBRK) {
- sport->port.ignore_status_mask |= UARTSR1_FE;
+ port->ignore_status_mask |= UARTSR1_FE;
/*
* if we're ignoring parity and break indicators,
* ignore overruns too (for real raw support).
*/
if (termios->c_iflag & IGNPAR)
- sport->port.ignore_status_mask |= UARTSR1_OR;
+ port->ignore_status_mask |= UARTSR1_OR;
}
/* update the per-port timeout */
uart_update_timeout(port, termios->c_cflag, baud);
/* wait transmit engin complete */
- lpuart_wait_bit_set(&sport->port, UARTSR1, UARTSR1_TC);
+ lpuart_wait_bit_set(port, UARTSR1, UARTSR1_TC);
/* disable transmit and receive */
writeb(old_cr2 & ~(UARTCR2_TE | UARTCR2_RE),
- sport->port.membase + UARTCR2);
+ port->membase + UARTCR2);
- sbr = sport->port.uartclk / (16 * baud);
- brfa = ((sport->port.uartclk - (16 * sbr * baud)) * 2) / baud;
+ sbr = port->uartclk / (16 * baud);
+ brfa = ((port->uartclk - (16 * sbr * baud)) * 2) / baud;
bdh &= ~UARTBDH_SBR_MASK;
bdh |= (sbr >> 8) & 0x1F;
cr4 &= ~UARTCR4_BRFA_MASK;
brfa &= UARTCR4_BRFA_MASK;
- writeb(cr4 | brfa, sport->port.membase + UARTCR4);
- writeb(bdh, sport->port.membase + UARTBDH);
- writeb(sbr & 0xFF, sport->port.membase + UARTBDL);
- writeb(cr3, sport->port.membase + UARTCR3);
- writeb(cr1, sport->port.membase + UARTCR1);
- writeb(modem, sport->port.membase + UARTMODEM);
+ writeb(cr4 | brfa, port->membase + UARTCR4);
+ writeb(bdh, port->membase + UARTBDH);
+ writeb(sbr & 0xFF, port->membase + UARTBDL);
+ writeb(cr3, port->membase + UARTCR3);
+ writeb(cr1, port->membase + UARTCR1);
+ writeb(modem, port->membase + UARTMODEM);
/* restore control register */
- writeb(old_cr2, sport->port.membase + UARTCR2);
+ writeb(old_cr2, port->membase + UARTCR2);
if (old && sport->lpuart_dma_rx_use) {
if (!lpuart_start_rx_dma(sport))
@@ -2125,14 +2131,14 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
sport->lpuart_dma_rx_use = false;
}
- uart_port_unlock_irqrestore(&sport->port, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void __lpuart32_serial_setbrg(struct uart_port *port,
unsigned int baudrate, bool use_rx_dma,
bool use_tx_dma)
{
- u32 sbr, osr, baud_diff, tmp_osr, tmp_sbr, tmp_diff, tmp;
+ u32 sbr, osr, baud_diff, tmp_osr, tmp_sbr, tmp_diff, baud;
u32 clk = port->uartclk;
/*
@@ -2161,9 +2167,9 @@ static void __lpuart32_serial_setbrg(struct uart_port *port,
tmp_diff = clk / (tmp_osr * tmp_sbr) - baudrate;
/* select best values between sbr and sbr+1 */
- tmp = clk / (tmp_osr * (tmp_sbr + 1));
- if (tmp_diff > (baudrate - tmp)) {
- tmp_diff = baudrate - tmp;
+ baud = clk / (tmp_osr * (tmp_sbr + 1));
+ if (tmp_diff > (baudrate - baud)) {
+ tmp_diff = baudrate - baud;
tmp_sbr++;
}
@@ -2185,23 +2191,23 @@ static void __lpuart32_serial_setbrg(struct uart_port *port,
dev_warn(port->dev,
"unacceptable baud rate difference of more than 3%%\n");
- tmp = lpuart32_read(port, UARTBAUD);
+ baud = lpuart32_read(port, UARTBAUD);
if ((osr > 3) && (osr < 8))
- tmp |= UARTBAUD_BOTHEDGE;
+ baud |= UARTBAUD_BOTHEDGE;
- tmp &= ~(UARTBAUD_OSR_MASK << UARTBAUD_OSR_SHIFT);
- tmp |= ((osr-1) & UARTBAUD_OSR_MASK) << UARTBAUD_OSR_SHIFT;
+ baud &= ~(UARTBAUD_OSR_MASK << UARTBAUD_OSR_SHIFT);
+ baud |= ((osr-1) & UARTBAUD_OSR_MASK) << UARTBAUD_OSR_SHIFT;
- tmp &= ~UARTBAUD_SBR_MASK;
- tmp |= sbr & UARTBAUD_SBR_MASK;
+ baud &= ~UARTBAUD_SBR_MASK;
+ baud |= sbr & UARTBAUD_SBR_MASK;
if (!use_rx_dma)
- tmp &= ~UARTBAUD_RDMAE;
+ baud &= ~UARTBAUD_RDMAE;
if (!use_tx_dma)
- tmp &= ~UARTBAUD_TDMAE;
+ baud &= ~UARTBAUD_TDMAE;
- lpuart32_write(port, tmp, UARTBAUD);
+ lpuart32_write(port, baud, UARTBAUD);
}
static void lpuart32_serial_setbrg(struct lpuart_port *sport,
@@ -2219,13 +2225,13 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
{
struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
unsigned long flags;
- unsigned long ctrl, old_ctrl, bd, modem;
+ u32 ctrl, old_ctrl, bd, modem;
unsigned int baud;
unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;
- ctrl = old_ctrl = lpuart32_read(&sport->port, UARTCTRL);
- bd = lpuart32_read(&sport->port, UARTBAUD);
- modem = lpuart32_read(&sport->port, UARTMODIR);
+ ctrl = old_ctrl = lpuart32_read(port, UARTCTRL);
+ bd = lpuart32_read(port, UARTBAUD);
+ modem = lpuart32_read(port, UARTMODIR);
sport->is_cs7 = false;
/*
* only support CS8 and CS7
@@ -2259,7 +2265,7 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
* When auto RS-485 RTS mode is enabled,
* hardware flow control need to be disabled.
*/
- if (sport->port.rs485.flags & SER_RS485_ENABLED)
+ if (port->rs485.flags & SER_RS485_ENABLED)
termios->c_cflag &= ~CRTSCTS;
if (termios->c_cflag & CRTSCTS)
@@ -2309,59 +2315,61 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
* Need to update the Ring buffer length according to the selected
* baud rate and restart Rx DMA path.
*
- * Since timer function acqures sport->port.lock, need to stop before
+ * Since timer function acqures port->lock, need to stop before
* acquring same lock because otherwise del_timer_sync() can deadlock.
*/
if (old && sport->lpuart_dma_rx_use)
- lpuart_dma_rx_free(&sport->port);
+ lpuart_dma_rx_free(port);
- uart_port_lock_irqsave(&sport->port, &flags);
+ uart_port_lock_irqsave(port, &flags);
- sport->port.read_status_mask = 0;
+ port->read_status_mask = 0;
if (termios->c_iflag & INPCK)
- sport->port.read_status_mask |= UARTSTAT_FE | UARTSTAT_PE;
+ port->read_status_mask |= UARTSTAT_FE | UARTSTAT_PE;
if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
- sport->port.read_status_mask |= UARTSTAT_FE;
+ port->read_status_mask |= UARTSTAT_FE;
/* characters to ignore */
- sport->port.ignore_status_mask = 0;
+ port->ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
- sport->port.ignore_status_mask |= UARTSTAT_PE;
+ port->ignore_status_mask |= UARTSTAT_PE;
if (termios->c_iflag & IGNBRK) {
- sport->port.ignore_status_mask |= UARTSTAT_FE;
+ port->ignore_status_mask |= UARTSTAT_FE;
/*
* if we're ignoring parity and break indicators,
* ignore overruns too (for real raw support).
*/
if (termios->c_iflag & IGNPAR)
- sport->port.ignore_status_mask |= UARTSTAT_OR;
+ port->ignore_status_mask |= UARTSTAT_OR;
}
/* update the per-port timeout */
uart_update_timeout(port, termios->c_cflag, baud);
/*
+ * disable CTS to ensure the transmit engine is not blocked by the flow
+ * control when there is dirty data in TX FIFO
+ */
+ lpuart32_write(port, modem & ~UARTMODIR_TXCTSE, UARTMODIR);
+
+ /*
* LPUART Transmission Complete Flag may never be set while queuing a break
* character, so skip waiting for transmission complete when UARTCTRL_SBK is
* asserted.
*/
- if (!(old_ctrl & UARTCTRL_SBK)) {
- lpuart32_write(&sport->port, 0, UARTMODIR);
- lpuart32_wait_bit_set(&sport->port, UARTSTAT, UARTSTAT_TC);
- }
+ if (!(old_ctrl & UARTCTRL_SBK))
+ lpuart32_wait_bit_set(port, UARTSTAT, UARTSTAT_TC);
/* disable transmit and receive */
- lpuart32_write(&sport->port, old_ctrl & ~(UARTCTRL_TE | UARTCTRL_RE),
+ lpuart32_write(port, old_ctrl & ~(UARTCTRL_TE | UARTCTRL_RE),
UARTCTRL);
- lpuart32_write(&sport->port, bd, UARTBAUD);
+ lpuart32_write(port, bd, UARTBAUD);
lpuart32_serial_setbrg(sport, baud);
- /* disable CTS before enabling UARTCTRL_TE to avoid pending idle preamble */
- lpuart32_write(&sport->port, modem & ~UARTMODIR_TXCTSE, UARTMODIR);
/* restore control register */
- lpuart32_write(&sport->port, ctrl, UARTCTRL);
+ lpuart32_write(port, ctrl, UARTCTRL);
/* re-enable the CTS if needed */
- lpuart32_write(&sport->port, modem, UARTMODIR);
+ lpuart32_write(port, modem, UARTMODIR);
if ((ctrl & (UARTCTRL_PE | UARTCTRL_M)) == UARTCTRL_PE)
sport->is_cs7 = true;
@@ -2373,7 +2381,7 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
sport->lpuart_dma_rx_use = false;
}
- uart_port_unlock_irqrestore(&sport->port, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *lpuart_type(struct uart_port *port)
@@ -2486,7 +2494,7 @@ static void
lpuart_console_write(struct console *co, const char *s, unsigned int count)
{
struct lpuart_port *sport = lpuart_ports[co->index];
- unsigned char old_cr2, cr2;
+ u8 old_cr2, cr2;
unsigned long flags;
int locked = 1;
@@ -2516,7 +2524,7 @@ static void
lpuart32_console_write(struct console *co, const char *s, unsigned int count)
{
struct lpuart_port *sport = lpuart_ports[co->index];
- unsigned long old_cr, cr;
+ u32 old_cr, cr;
unsigned long flags;
int locked = 1;
@@ -2550,7 +2558,7 @@ static void __init
lpuart_console_get_options(struct lpuart_port *sport, int *baud,
int *parity, int *bits)
{
- unsigned char cr, bdh, bdl, brfa;
+ u8 cr, bdh, bdl, brfa;
unsigned int sbr, uartclk, baud_raw;
cr = readb(sport->port.membase + UARTCR2);
@@ -2599,7 +2607,7 @@ static void __init
lpuart32_console_get_options(struct lpuart_port *sport, int *baud,
int *parity, int *bits)
{
- unsigned long cr, bd;
+ u32 cr, bd;
unsigned int sbr, uartclk, baud_raw;
cr = lpuart32_read(&sport->port, UARTCTRL);
@@ -2805,13 +2813,13 @@ static int lpuart_global_reset(struct lpuart_port *sport)
{
struct uart_port *port = &sport->port;
void __iomem *global_addr;
- unsigned long ctrl, bd;
+ u32 ctrl, bd;
unsigned int val = 0;
int ret;
ret = clk_prepare_enable(sport->ipg_clk);
if (ret) {
- dev_err(sport->port.dev, "failed to enable uart ipg clk: %d\n", ret);
+ dev_err(port->dev, "failed to enable uart ipg clk: %d\n", ret);
return ret;
}
@@ -2822,10 +2830,10 @@ static int lpuart_global_reset(struct lpuart_port *sport)
*/
ctrl = lpuart32_read(port, UARTCTRL);
if (ctrl & UARTCTRL_TE) {
- bd = lpuart32_read(&sport->port, UARTBAUD);
+ bd = lpuart32_read(port, UARTBAUD);
if (read_poll_timeout(lpuart32_tx_empty, val, val, 1, 100000, false,
port)) {
- dev_warn(sport->port.dev,
+ dev_warn(port->dev,
"timeout waiting for transmit engine to complete\n");
clk_disable_unprepare(sport->ipg_clk);
return 0;
@@ -2954,7 +2962,7 @@ static int lpuart_probe(struct platform_device *pdev)
goto failed_attach_port;
ret = devm_request_irq(&pdev->dev, sport->port.irq, handler, 0,
- DRIVER_NAME, sport);
+ dev_name(&pdev->dev), sport);
if (ret)
goto failed_irq_request;
@@ -3011,7 +3019,7 @@ static int lpuart_runtime_resume(struct device *dev)
static void serial_lpuart_enable_wakeup(struct lpuart_port *sport, bool on)
{
- unsigned int val, baud;
+ u32 val, baud;
if (lpuart_is_32(sport)) {
val = lpuart32_read(&sport->port, UARTCTRL);
@@ -3076,7 +3084,7 @@ static int lpuart_suspend_noirq(struct device *dev)
static int lpuart_resume_noirq(struct device *dev)
{
struct lpuart_port *sport = dev_get_drvdata(dev);
- unsigned int val;
+ u32 stat;
pinctrl_pm_select_default_state(dev);
@@ -3085,8 +3093,8 @@ static int lpuart_resume_noirq(struct device *dev)
/* clear the wakeup flags */
if (lpuart_is_32(sport)) {
- val = lpuart32_read(&sport->port, UARTSTAT);
- lpuart32_write(&sport->port, val, UARTSTAT);
+ stat = lpuart32_read(&sport->port, UARTSTAT);
+ lpuart32_write(&sport->port, stat, UARTSTAT);
}
}
@@ -3096,7 +3104,8 @@ static int lpuart_resume_noirq(struct device *dev)
static int lpuart_suspend(struct device *dev)
{
struct lpuart_port *sport = dev_get_drvdata(dev);
- unsigned long temp, flags;
+ u32 temp;
+ unsigned long flags;
uart_suspend_port(&lpuart_reg, &sport->port);
@@ -3176,7 +3185,7 @@ static void lpuart_console_fixup(struct lpuart_port *sport)
* in VLLS mode, or restore console setting here.
*/
if (is_imx7ulp_lpuart(sport) && lpuart_uport_is_active(sport) &&
- console_suspend_enabled && uart_console(&sport->port)) {
+ console_suspend_enabled && uart_console(uport)) {
mutex_lock(&port->mutex);
memset(&termios, 0, sizeof(struct ktermios));
diff --git a/drivers/tty/serial/icom.c b/drivers/tty/serial/icom.c
index 29e42831df39..7fb995a8490e 100644
--- a/drivers/tty/serial/icom.c
+++ b/drivers/tty/serial/icom.c
@@ -1764,11 +1764,10 @@ static int icom_probe(struct pci_dev *dev,
goto probe_exit1;
}
- /* save off irq and request irq line */
- retval = request_irq(dev->irq, icom_interrupt, IRQF_SHARED, ICOM_DRIVER_NAME, (void *)icom_adapter);
- if (retval) {
- goto probe_exit2;
- }
+ /* save off irq and request irq line */
+ retval = request_irq(dev->irq, icom_interrupt, IRQF_SHARED, ICOM_DRIVER_NAME, icom_adapter);
+ if (retval)
+ goto probe_exit2;
retval = icom_load_ports(icom_adapter);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 9a1afe409b98..19c819705bf9 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -1608,7 +1608,7 @@ static void imx_uart_shutdown(struct uart_port *port)
imx_uart_dma_exit(sport);
}
- mctrl_gpio_disable_ms(sport->gpios);
+ mctrl_gpio_disable_ms_sync(sport->gpios);
uart_port_lock_irqsave(&sport->port, &flags);
ucr2 = imx_uart_readl(sport, UCR2);
diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c
deleted file mode 100644
index 2833708e369f..000000000000
--- a/drivers/tty/serial/kgdb_nmi.c
+++ /dev/null
@@ -1,280 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * KGDB NMI serial console
- *
- * Copyright 2010 Google, Inc.
- * Arve Hjønnevåg <arve@android.com>
- * Colin Cross <ccross@android.com>
- * Copyright 2012 Linaro Ltd.
- * Anton Vorontsov <anton.vorontsov@linaro.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/compiler.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/atomic.h>
-#include <linux/console.h>
-#include <linux/tty.h>
-#include <linux/tty_driver.h>
-#include <linux/tty_flip.h>
-#include <linux/serial_core.h>
-#include <linux/interrupt.h>
-#include <linux/hrtimer.h>
-#include <linux/tick.h>
-#include <linux/kfifo.h>
-#include <linux/kgdb.h>
-#include <linux/kdb.h>
-
-static atomic_t kgdb_nmi_num_readers = ATOMIC_INIT(0);
-
-static int kgdb_nmi_console_setup(struct console *co, char *options)
-{
- arch_kgdb_ops.enable_nmi(1);
-
- /* The NMI console uses the dbg_io_ops to issue console messages. To
- * avoid duplicate messages during kdb sessions we must inform kdb's
- * I/O utilities that messages sent to the console will automatically
- * be displayed on the dbg_io.
- */
- dbg_io_ops->cons = co;
-
- return 0;
-}
-
-static void kgdb_nmi_console_write(struct console *co, const char *s, uint c)
-{
- int i;
-
- for (i = 0; i < c; i++)
- dbg_io_ops->write_char(s[i]);
-}
-
-static struct tty_driver *kgdb_nmi_tty_driver;
-
-static struct tty_driver *kgdb_nmi_console_device(struct console *co, int *idx)
-{
- *idx = co->index;
- return kgdb_nmi_tty_driver;
-}
-
-static struct console kgdb_nmi_console = {
- .name = "ttyNMI",
- .setup = kgdb_nmi_console_setup,
- .write = kgdb_nmi_console_write,
- .device = kgdb_nmi_console_device,
- .flags = CON_PRINTBUFFER | CON_ANYTIME,
- .index = -1,
-};
-
-/*
- * This is usually the maximum rate on debug ports. We make fifo large enough
- * to make copy-pasting to the terminal usable.
- */
-#define KGDB_NMI_BAUD 115200
-#define KGDB_NMI_FIFO_SIZE roundup_pow_of_two(KGDB_NMI_BAUD / 8 / HZ)
-
-struct kgdb_nmi_tty_priv {
- struct tty_port port;
- struct timer_list timer;
- STRUCT_KFIFO(char, KGDB_NMI_FIFO_SIZE) fifo;
-};
-
-static struct tty_port *kgdb_nmi_port;
-
-/*
- * The tasklet is cheap, it does not cause wakeups when reschedules itself,
- * instead it waits for the next tick.
- */
-static void kgdb_nmi_tty_receiver(struct timer_list *t)
-{
- struct kgdb_nmi_tty_priv *priv = from_timer(priv, t, timer);
- char ch;
-
- priv->timer.expires = jiffies + (HZ/100);
- add_timer(&priv->timer);
-
- if (likely(!atomic_read(&kgdb_nmi_num_readers) ||
- !kfifo_len(&priv->fifo)))
- return;
-
- while (kfifo_out(&priv->fifo, &ch, 1))
- tty_insert_flip_char(&priv->port, ch, TTY_NORMAL);
- tty_flip_buffer_push(&priv->port);
-}
-
-static int kgdb_nmi_tty_activate(struct tty_port *port, struct tty_struct *tty)
-{
- struct kgdb_nmi_tty_priv *priv =
- container_of(port, struct kgdb_nmi_tty_priv, port);
-
- kgdb_nmi_port = port;
- priv->timer.expires = jiffies + (HZ/100);
- add_timer(&priv->timer);
-
- return 0;
-}
-
-static void kgdb_nmi_tty_shutdown(struct tty_port *port)
-{
- struct kgdb_nmi_tty_priv *priv =
- container_of(port, struct kgdb_nmi_tty_priv, port);
-
- del_timer(&priv->timer);
- kgdb_nmi_port = NULL;
-}
-
-static const struct tty_port_operations kgdb_nmi_tty_port_ops = {
- .activate = kgdb_nmi_tty_activate,
- .shutdown = kgdb_nmi_tty_shutdown,
-};
-
-static int kgdb_nmi_tty_install(struct tty_driver *drv, struct tty_struct *tty)
-{
- struct kgdb_nmi_tty_priv *priv;
- int ret;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- INIT_KFIFO(priv->fifo);
- timer_setup(&priv->timer, kgdb_nmi_tty_receiver, 0);
- tty_port_init(&priv->port);
- priv->port.ops = &kgdb_nmi_tty_port_ops;
- tty->driver_data = priv;
-
- ret = tty_port_install(&priv->port, drv, tty);
- if (ret) {
- pr_err("%s: can't install tty port: %d\n", __func__, ret);
- goto err;
- }
- return 0;
-err:
- tty_port_destroy(&priv->port);
- kfree(priv);
- return ret;
-}
-
-static void kgdb_nmi_tty_cleanup(struct tty_struct *tty)
-{
- struct kgdb_nmi_tty_priv *priv = tty->driver_data;
-
- tty->driver_data = NULL;
- tty_port_destroy(&priv->port);
- kfree(priv);
-}
-
-static int kgdb_nmi_tty_open(struct tty_struct *tty, struct file *file)
-{
- struct kgdb_nmi_tty_priv *priv = tty->driver_data;
- unsigned int mode = file->f_flags & O_ACCMODE;
- int ret;
-
- ret = tty_port_open(&priv->port, tty, file);
- if (!ret && (mode == O_RDONLY || mode == O_RDWR))
- atomic_inc(&kgdb_nmi_num_readers);
-
- return ret;
-}
-
-static void kgdb_nmi_tty_close(struct tty_struct *tty, struct file *file)
-{
- struct kgdb_nmi_tty_priv *priv = tty->driver_data;
- unsigned int mode = file->f_flags & O_ACCMODE;
-
- if (mode == O_RDONLY || mode == O_RDWR)
- atomic_dec(&kgdb_nmi_num_readers);
-
- tty_port_close(&priv->port, tty, file);
-}
-
-static void kgdb_nmi_tty_hangup(struct tty_struct *tty)
-{
- struct kgdb_nmi_tty_priv *priv = tty->driver_data;
-
- tty_port_hangup(&priv->port);
-}
-
-static unsigned int kgdb_nmi_tty_write_room(struct tty_struct *tty)
-{
- /* Actually, we can handle any amount as we use polled writes. */
- return 2048;
-}
-
-static ssize_t kgdb_nmi_tty_write(struct tty_struct *tty, const u8 *buf,
- size_t c)
-{
- int i;
-
- for (i = 0; i < c; i++)
- dbg_io_ops->write_char(buf[i]);
- return c;
-}
-
-static const struct tty_operations kgdb_nmi_tty_ops = {
- .open = kgdb_nmi_tty_open,
- .close = kgdb_nmi_tty_close,
- .install = kgdb_nmi_tty_install,
- .cleanup = kgdb_nmi_tty_cleanup,
- .hangup = kgdb_nmi_tty_hangup,
- .write_room = kgdb_nmi_tty_write_room,
- .write = kgdb_nmi_tty_write,
-};
-
-int kgdb_register_nmi_console(void)
-{
- int ret;
-
- if (!arch_kgdb_ops.enable_nmi)
- return 0;
-
- kgdb_nmi_tty_driver = tty_alloc_driver(1, TTY_DRIVER_REAL_RAW);
- if (IS_ERR(kgdb_nmi_tty_driver)) {
- pr_err("%s: cannot allocate tty\n", __func__);
- return PTR_ERR(kgdb_nmi_tty_driver);
- }
- kgdb_nmi_tty_driver->driver_name = "ttyNMI";
- kgdb_nmi_tty_driver->name = "ttyNMI";
- kgdb_nmi_tty_driver->num = 1;
- kgdb_nmi_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
- kgdb_nmi_tty_driver->subtype = SERIAL_TYPE_NORMAL;
- kgdb_nmi_tty_driver->init_termios = tty_std_termios;
- tty_termios_encode_baud_rate(&kgdb_nmi_tty_driver->init_termios,
- KGDB_NMI_BAUD, KGDB_NMI_BAUD);
- tty_set_operations(kgdb_nmi_tty_driver, &kgdb_nmi_tty_ops);
-
- ret = tty_register_driver(kgdb_nmi_tty_driver);
- if (ret) {
- pr_err("%s: can't register tty driver: %d\n", __func__, ret);
- goto err_drv_reg;
- }
-
- register_console(&kgdb_nmi_console);
-
- return 0;
-err_drv_reg:
- tty_driver_kref_put(kgdb_nmi_tty_driver);
- return ret;
-}
-EXPORT_SYMBOL_GPL(kgdb_register_nmi_console);
-
-int kgdb_unregister_nmi_console(void)
-{
- int ret;
-
- if (!arch_kgdb_ops.enable_nmi)
- return 0;
- arch_kgdb_ops.enable_nmi(0);
-
- ret = unregister_console(&kgdb_nmi_console);
- if (ret)
- return ret;
-
- tty_unregister_driver(kgdb_nmi_tty_driver);
- tty_driver_kref_put(kgdb_nmi_tty_driver);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(kgdb_unregister_nmi_console);
diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
index 58ea1e1391ce..85f6c5a76e0f 100644
--- a/drivers/tty/serial/kgdboc.c
+++ b/drivers/tty/serial/kgdboc.c
@@ -186,8 +186,6 @@ static void cleanup_kgdboc(void)
if (configured != 1)
return;
- if (kgdb_unregister_nmi_console())
- return;
kgdboc_unregister_kbd();
kgdb_unregister_io_module(&kgdboc_io_ops);
}
@@ -250,16 +248,10 @@ do_register:
if (err)
goto noconfig;
- err = kgdb_register_nmi_console();
- if (err)
- goto nmi_con_failed;
-
configured = 1;
return 0;
-nmi_con_failed:
- kgdb_unregister_io_module(&kgdboc_io_ops);
noconfig:
kgdboc_unregister_kbd();
configured = 0;
diff --git a/drivers/tty/serial/ma35d1_serial.c b/drivers/tty/serial/ma35d1_serial.c
index 8dcad52eedfd..285b0fe41a86 100644
--- a/drivers/tty/serial/ma35d1_serial.c
+++ b/drivers/tty/serial/ma35d1_serial.c
@@ -799,7 +799,7 @@ static struct platform_driver ma35d1serial_driver = {
.resume = ma35d1serial_resume,
.driver = {
.name = "ma35d1-uart",
- .of_match_table = of_match_ptr(ma35d1_serial_of_match),
+ .of_match_table = ma35d1_serial_of_match,
},
};
diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
index 2204cc3e3b07..37eb701b0b46 100644
--- a/drivers/tty/serial/mpc52xx_uart.c
+++ b/drivers/tty/serial/mpc52xx_uart.c
@@ -1351,7 +1351,6 @@ static const struct uart_ops mpc52xx_uart_ops = {
.startup = mpc52xx_uart_startup,
.shutdown = mpc52xx_uart_shutdown,
.set_termios = mpc52xx_uart_set_termios,
-/* .pm = mpc52xx_uart_pm, Not supported yet */
.type = mpc52xx_uart_type,
.release_port = mpc52xx_uart_release_port,
.request_port = mpc52xx_uart_request_port,
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index c7cee5fee603..508e8c6f01d4 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -1515,7 +1515,6 @@ static const struct uart_ops pch_uart_ops = {
.startup = pch_uart_startup,
.shutdown = pch_uart_shutdown,
.set_termios = pch_uart_set_termios,
-/* .pm = pch_uart_pm, Not supported yet */
.type = pch_uart_type,
.release_port = pch_uart_release_port,
.request_port = pch_uart_request_port,
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index d46650e578e5..88669972d9a0 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -895,8 +895,8 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
{
struct uart_port *uport = uart_port_check(state);
unsigned long new_port;
- unsigned int change_irq, change_port, closing_wait;
- unsigned int old_custom_divisor, close_delay;
+ unsigned int old_custom_divisor, close_delay, closing_wait;
+ bool change_irq, change_port;
upf_t old_flags, new_flags;
int retval;
@@ -2013,9 +2013,8 @@ static const char *uart_type(struct uart_port *port)
#ifdef CONFIG_PROC_FS
-static void uart_line_info(struct seq_file *m, struct uart_driver *drv, int i)
+static void uart_line_info(struct seq_file *m, struct uart_state *state)
{
- struct uart_state *state = drv->state + i;
struct tty_port *port = &state->port;
enum uart_pm_state pm_state;
struct uart_port *uport;
@@ -2100,7 +2099,7 @@ static int uart_proc_show(struct seq_file *m, void *v)
seq_printf(m, "serinfo:1.0 driver%s%s revision:%s\n", "", "", "");
for (i = 0; i < drv->nr; i++)
- uart_line_info(m, drv, i);
+ uart_line_info(m, drv->state + i);
return 0;
}
#endif
@@ -3156,7 +3155,6 @@ static int serial_core_add_one_port(struct uart_driver *drv, struct uart_port *u
if (uport->cons && uport->dev)
of_console_check(uport->dev->of_node, uport->cons->name, uport->line);
- tty_port_link_device(port, drv->tty_driver, uport->line);
uart_configure_port(drv, state, uport);
port->console = uart_console(uport);
diff --git a/drivers/tty/serial/serial_mctrl_gpio.c b/drivers/tty/serial/serial_mctrl_gpio.c
index 8855688a5b6c..7b02c5ca4afd 100644
--- a/drivers/tty/serial/serial_mctrl_gpio.c
+++ b/drivers/tty/serial/serial_mctrl_gpio.c
@@ -217,7 +217,7 @@ static irqreturn_t mctrl_gpio_irq_handle(int irq, void *context)
*
* This will get the {cts,rts,...}-gpios from device tree if they are present
* and request them, set direction etc, and return an allocated structure.
- * `devm_*` functions are used, so there's no need to call mctrl_gpio_free().
+ * `devm_*` functions are used, so there's no need to explicitly free.
* As this sets up the irq handling, make sure to not handle changes to the
* gpio input lines in your driver, too.
*/
@@ -268,32 +268,6 @@ struct mctrl_gpios *mctrl_gpio_init(struct uart_port *port, unsigned int idx)
EXPORT_SYMBOL_GPL(mctrl_gpio_init);
/**
- * mctrl_gpio_free - explicitly free uart gpios
- * @dev: uart port's device
- * @gpios: gpios structure to be freed
- *
- * This will free the requested gpios in mctrl_gpio_init(). As `devm_*`
- * functions are used, there's generally no need to call this function.
- */
-void mctrl_gpio_free(struct device *dev, struct mctrl_gpios *gpios)
-{
- enum mctrl_gpio_idx i;
-
- if (gpios == NULL)
- return;
-
- for (i = 0; i < UART_GPIO_MAX; i++) {
- if (gpios->irq[i])
- devm_free_irq(gpios->port->dev, gpios->irq[i], gpios);
-
- if (gpios->gpio[i])
- devm_gpiod_put(dev, gpios->gpio[i]);
- }
- devm_kfree(dev, gpios);
-}
-EXPORT_SYMBOL_GPL(mctrl_gpio_free);
-
-/**
* mctrl_gpio_enable_ms - enable irqs and handling of changes to the ms lines
* @gpios: gpios to enable
*/
@@ -322,11 +296,7 @@ void mctrl_gpio_enable_ms(struct mctrl_gpios *gpios)
}
EXPORT_SYMBOL_GPL(mctrl_gpio_enable_ms);
-/**
- * mctrl_gpio_disable_ms - disable irqs and handling of changes to the ms lines
- * @gpios: gpios to disable
- */
-void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios)
+static void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios, bool sync)
{
enum mctrl_gpio_idx i;
@@ -342,10 +312,34 @@ void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios)
if (!gpios->irq[i])
continue;
- disable_irq(gpios->irq[i]);
+ if (sync)
+ disable_irq(gpios->irq[i]);
+ else
+ disable_irq_nosync(gpios->irq[i]);
}
}
-EXPORT_SYMBOL_GPL(mctrl_gpio_disable_ms);
+
+/**
+ * mctrl_gpio_disable_ms_sync - disable irqs and handling of changes to the ms
+ * lines, and wait for any pending IRQ to be processed
+ * @gpios: gpios to disable
+ */
+void mctrl_gpio_disable_ms_sync(struct mctrl_gpios *gpios)
+{
+ mctrl_gpio_disable_ms(gpios, true);
+}
+EXPORT_SYMBOL_GPL(mctrl_gpio_disable_ms_sync);
+
+/**
+ * mctrl_gpio_disable_ms_no_sync - disable irqs and handling of changes to the
+ * ms lines, and return immediately
+ * @gpios: gpios to disable
+ */
+void mctrl_gpio_disable_ms_no_sync(struct mctrl_gpios *gpios)
+{
+ mctrl_gpio_disable_ms(gpios, false);
+}
+EXPORT_SYMBOL_GPL(mctrl_gpio_disable_ms_no_sync);
void mctrl_gpio_enable_irq_wake(struct mctrl_gpios *gpios)
{
diff --git a/drivers/tty/serial/serial_mctrl_gpio.h b/drivers/tty/serial/serial_mctrl_gpio.h
index fc76910fb105..961e4ba0c6f8 100644
--- a/drivers/tty/serial/serial_mctrl_gpio.h
+++ b/drivers/tty/serial/serial_mctrl_gpio.h
@@ -59,7 +59,7 @@ struct gpio_desc *mctrl_gpio_to_gpiod(struct mctrl_gpios *gpios,
/*
* Request and set direction of modem control line GPIOs and set up irq
* handling.
- * devm_* functions are used, so there's no need to call mctrl_gpio_free().
+ * devm_* functions are used, so there's no need to explicitly free.
* Returns a pointer to the allocated mctrl structure if ok, -ENOMEM on
* allocation error.
*/
@@ -67,7 +67,7 @@ struct mctrl_gpios *mctrl_gpio_init(struct uart_port *port, unsigned int idx);
/*
* Request and set direction of modem control line GPIOs.
- * devm_* functions are used, so there's no need to call mctrl_gpio_free().
+ * devm_* functions are used, so there's no need to explicitly free.
* Returns a pointer to the allocated mctrl structure if ok, -ENOMEM on
* allocation error.
*/
@@ -75,21 +75,21 @@ struct mctrl_gpios *mctrl_gpio_init_noauto(struct device *dev,
unsigned int idx);
/*
- * Free the mctrl_gpios structure.
- * Normally, this function will not be called, as the GPIOs will
- * be disposed of by the resource management code.
+ * Enable gpio interrupts to report status line changes.
*/
-void mctrl_gpio_free(struct device *dev, struct mctrl_gpios *gpios);
+void mctrl_gpio_enable_ms(struct mctrl_gpios *gpios);
/*
- * Enable gpio interrupts to report status line changes.
+ * Disable gpio interrupts to report status line changes, and block until
+ * any corresponding IRQ is processed
*/
-void mctrl_gpio_enable_ms(struct mctrl_gpios *gpios);
+void mctrl_gpio_disable_ms_sync(struct mctrl_gpios *gpios);
/*
- * Disable gpio interrupts to report status line changes.
+ * Disable gpio interrupts to report status line changes, and return
+ * immediately
*/
-void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios);
+void mctrl_gpio_disable_ms_no_sync(struct mctrl_gpios *gpios);
/*
* Enable gpio wakeup interrupts to enable wake up source.
@@ -139,16 +139,15 @@ struct mctrl_gpios *mctrl_gpio_init_noauto(struct device *dev, unsigned int idx)
return NULL;
}
-static inline
-void mctrl_gpio_free(struct device *dev, struct mctrl_gpios *gpios)
+static inline void mctrl_gpio_enable_ms(struct mctrl_gpios *gpios)
{
}
-static inline void mctrl_gpio_enable_ms(struct mctrl_gpios *gpios)
+static inline void mctrl_gpio_disable_ms_sync(struct mctrl_gpios *gpios)
{
}
-static inline void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios)
+static inline void mctrl_gpio_disable_ms_no_sync(struct mctrl_gpios *gpios)
{
}
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index b72c3bc19bfa..1c8480d0338e 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -104,6 +104,20 @@ struct plat_sci_reg {
u8 offset, size;
};
+struct sci_suspend_regs {
+ u16 scdl;
+ u16 sccks;
+ u16 scsmr;
+ u16 scscr;
+ u16 scfcr;
+ u16 scsptr;
+ u16 hssrr;
+ u16 scpcr;
+ u16 scpdr;
+ u8 scbrr;
+ u8 semr;
+};
+
struct sci_port_params {
const struct plat_sci_reg regs[SCIx_NR_REGS];
unsigned int fifosize;
@@ -134,6 +148,8 @@ struct sci_port {
struct dma_chan *chan_tx;
struct dma_chan *chan_rx;
+ struct reset_control *rstc;
+
#ifdef CONFIG_SERIAL_SH_SCI_DMA
struct dma_chan *chan_tx_saved;
struct dma_chan *chan_rx_saved;
@@ -153,6 +169,7 @@ struct sci_port {
int rx_trigger;
struct timer_list rx_fifo_timer;
int rx_fifo_timeout;
+ struct sci_suspend_regs suspend_regs;
u16 hscif_tot;
bool has_rtscts;
@@ -2297,7 +2314,7 @@ static void sci_shutdown(struct uart_port *port)
dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
s->autorts = false;
- mctrl_gpio_disable_ms(to_sci_port(port)->gpios);
+ mctrl_gpio_disable_ms_sync(to_sci_port(port)->gpios);
uart_port_lock_irqsave(port, &flags);
sci_stop_rx(port);
@@ -3373,6 +3390,7 @@ static struct plat_sci_port *sci_parse_dt(struct platform_device *pdev,
}
sp = &sci_ports[id];
+ sp->rstc = rstc;
*dev_id = id;
p->type = SCI_OF_TYPE(data);
@@ -3545,13 +3563,77 @@ static int sci_probe(struct platform_device *dev)
return 0;
}
+static void sci_console_save(struct sci_port *s)
+{
+ struct sci_suspend_regs *regs = &s->suspend_regs;
+ struct uart_port *port = &s->port;
+
+ if (sci_getreg(port, SCDL)->size)
+ regs->scdl = sci_serial_in(port, SCDL);
+ if (sci_getreg(port, SCCKS)->size)
+ regs->sccks = sci_serial_in(port, SCCKS);
+ if (sci_getreg(port, SCSMR)->size)
+ regs->scsmr = sci_serial_in(port, SCSMR);
+ if (sci_getreg(port, SCSCR)->size)
+ regs->scscr = sci_serial_in(port, SCSCR);
+ if (sci_getreg(port, SCFCR)->size)
+ regs->scfcr = sci_serial_in(port, SCFCR);
+ if (sci_getreg(port, SCSPTR)->size)
+ regs->scsptr = sci_serial_in(port, SCSPTR);
+ if (sci_getreg(port, SCBRR)->size)
+ regs->scbrr = sci_serial_in(port, SCBRR);
+ if (sci_getreg(port, HSSRR)->size)
+ regs->hssrr = sci_serial_in(port, HSSRR);
+ if (sci_getreg(port, SCPCR)->size)
+ regs->scpcr = sci_serial_in(port, SCPCR);
+ if (sci_getreg(port, SCPDR)->size)
+ regs->scpdr = sci_serial_in(port, SCPDR);
+ if (sci_getreg(port, SEMR)->size)
+ regs->semr = sci_serial_in(port, SEMR);
+}
+
+static void sci_console_restore(struct sci_port *s)
+{
+ struct sci_suspend_regs *regs = &s->suspend_regs;
+ struct uart_port *port = &s->port;
+
+ if (sci_getreg(port, SCDL)->size)
+ sci_serial_out(port, SCDL, regs->scdl);
+ if (sci_getreg(port, SCCKS)->size)
+ sci_serial_out(port, SCCKS, regs->sccks);
+ if (sci_getreg(port, SCSMR)->size)
+ sci_serial_out(port, SCSMR, regs->scsmr);
+ if (sci_getreg(port, SCSCR)->size)
+ sci_serial_out(port, SCSCR, regs->scscr);
+ if (sci_getreg(port, SCFCR)->size)
+ sci_serial_out(port, SCFCR, regs->scfcr);
+ if (sci_getreg(port, SCSPTR)->size)
+ sci_serial_out(port, SCSPTR, regs->scsptr);
+ if (sci_getreg(port, SCBRR)->size)
+ sci_serial_out(port, SCBRR, regs->scbrr);
+ if (sci_getreg(port, HSSRR)->size)
+ sci_serial_out(port, HSSRR, regs->hssrr);
+ if (sci_getreg(port, SCPCR)->size)
+ sci_serial_out(port, SCPCR, regs->scpcr);
+ if (sci_getreg(port, SCPDR)->size)
+ sci_serial_out(port, SCPDR, regs->scpdr);
+ if (sci_getreg(port, SEMR)->size)
+ sci_serial_out(port, SEMR, regs->semr);
+}
+
static __maybe_unused int sci_suspend(struct device *dev)
{
struct sci_port *sport = dev_get_drvdata(dev);
- if (sport)
+ if (sport) {
uart_suspend_port(&sci_uart_driver, &sport->port);
+ if (!console_suspend_enabled && uart_console(&sport->port))
+ sci_console_save(sport);
+ else
+ return reset_control_assert(sport->rstc);
+ }
+
return 0;
}
@@ -3559,8 +3641,18 @@ static __maybe_unused int sci_resume(struct device *dev)
{
struct sci_port *sport = dev_get_drvdata(dev);
- if (sport)
+ if (sport) {
+ if (!console_suspend_enabled && uart_console(&sport->port)) {
+ sci_console_restore(sport);
+ } else {
+ int ret = reset_control_deassert(sport->rstc);
+
+ if (ret)
+ return ret;
+ }
+
uart_resume_port(&sci_uart_driver, &sport->port);
+ }
return 0;
}
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 1ec5d8c3aef8..ad06b760cfca 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -944,7 +944,7 @@ static void stm32_usart_enable_ms(struct uart_port *port)
static void stm32_usart_disable_ms(struct uart_port *port)
{
- mctrl_gpio_disable_ms(to_stm32_port(port)->gpios);
+ mctrl_gpio_disable_ms_sync(to_stm32_port(port)->gpios);
}
/* Transmit stop */
@@ -965,10 +965,8 @@ static void stm32_usart_start_tx(struct uart_port *port)
{
struct tty_port *tport = &port->state->port;
- if (kfifo_is_empty(&tport->xmit_fifo) && !port->x_char) {
- stm32_usart_rs485_rts_disable(port);
+ if (kfifo_is_empty(&tport->xmit_fifo) && !port->x_char)
return;
- }
stm32_usart_rs485_rts_enable(port);
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index 7f0fef07e141..383141fe7ba0 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -151,16 +151,6 @@ static void serial_out(struct uart_sunsu_port *up, int offset, int value)
}
/*
- * We used to support using pause I/O for certain machines. We
- * haven't supported this for a while, but just in case it's badly
- * needed for certain old 386 machines, I've left these #define's
- * in....
- */
-#define serial_inp(up, offset) serial_in(up, offset)
-#define serial_outp(up, offset, value) serial_out(up, offset, value)
-
-
-/*
* For the 16C950
*/
static void serial_icr_write(struct uart_sunsu_port *up, int offset, int value)
@@ -169,20 +159,6 @@ static void serial_icr_write(struct uart_sunsu_port *up, int offset, int value)
serial_out(up, UART_ICR, value);
}
-#if 0 /* Unused currently */
-static unsigned int serial_icr_read(struct uart_sunsu_port *up, int offset)
-{
- unsigned int value;
-
- serial_icr_write(up, UART_ACR, up->acr | UART_ACR_ICRRD);
- serial_out(up, UART_SCR, offset);
- value = serial_in(up, UART_ICR);
- serial_icr_write(up, UART_ACR, up->acr);
-
- return value;
-}
-#endif
-
#ifdef CONFIG_SERIAL_8250_RSA
/*
* Attempts to turn on the RSA FIFO. Returns zero on failure.
@@ -193,12 +169,12 @@ static int __enable_rsa(struct uart_sunsu_port *up)
unsigned char mode;
int result;
- mode = serial_inp(up, UART_RSA_MSR);
+ mode = serial_in(up, UART_RSA_MSR);
result = mode & UART_RSA_MSR_FIFO;
if (!result) {
- serial_outp(up, UART_RSA_MSR, mode | UART_RSA_MSR_FIFO);
- mode = serial_inp(up, UART_RSA_MSR);
+ serial_out(up, UART_RSA_MSR, mode | UART_RSA_MSR_FIFO);
+ mode = serial_in(up, UART_RSA_MSR);
result = mode & UART_RSA_MSR_FIFO;
}
@@ -217,7 +193,7 @@ static void enable_rsa(struct uart_sunsu_port *up)
uart_port_unlock_irq(&up->port);
}
if (up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16)
- serial_outp(up, UART_RSA_FRR, 0);
+ serial_out(up, UART_RSA_FRR, 0);
}
}
@@ -236,12 +212,12 @@ static void disable_rsa(struct uart_sunsu_port *up)
up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16) {
uart_port_lock_irq(&up->port);
- mode = serial_inp(up, UART_RSA_MSR);
+ mode = serial_in(up, UART_RSA_MSR);
result = !(mode & UART_RSA_MSR_FIFO);
if (!result) {
- serial_outp(up, UART_RSA_MSR, mode & ~UART_RSA_MSR_FIFO);
- mode = serial_inp(up, UART_RSA_MSR);
+ serial_out(up, UART_RSA_MSR, mode & ~UART_RSA_MSR_FIFO);
+ mode = serial_in(up, UART_RSA_MSR);
result = !(mode & UART_RSA_MSR_FIFO);
}
@@ -326,7 +302,7 @@ receive_chars(struct uart_sunsu_port *up, unsigned char *status)
int saw_console_brk = 0;
do {
- ch = serial_inp(up, UART_RX);
+ ch = serial_in(up, UART_RX);
flag = TTY_NORMAL;
up->port.icount.rx++;
@@ -387,7 +363,7 @@ receive_chars(struct uart_sunsu_port *up, unsigned char *status)
*/
tty_insert_flip_char(port, 0, TTY_OVERRUN);
ignore_char:
- *status = serial_inp(up, UART_LSR);
+ *status = serial_in(up, UART_LSR);
} while ((*status & UART_LSR_DR) && (max_count-- > 0));
if (saw_console_brk)
@@ -401,7 +377,7 @@ static void transmit_chars(struct uart_sunsu_port *up)
int count;
if (up->port.x_char) {
- serial_outp(up, UART_TX, up->port.x_char);
+ serial_out(up, UART_TX, up->port.x_char);
up->port.icount.tx++;
up->port.x_char = 0;
return;
@@ -460,7 +436,7 @@ static irqreturn_t sunsu_serial_interrupt(int irq, void *dev_id)
uart_port_lock_irqsave(&up->port, &flags);
do {
- status = serial_inp(up, UART_LSR);
+ status = serial_in(up, UART_LSR);
if (status & UART_LSR_DR)
receive_chars(up, &status);
check_modem_status(up);
@@ -498,7 +474,7 @@ static void sunsu_change_mouse_baud(struct uart_sunsu_port *up)
static void receive_kbd_ms_chars(struct uart_sunsu_port *up, int is_break)
{
do {
- unsigned char ch = serial_inp(up, UART_RX);
+ unsigned char ch = serial_in(up, UART_RX);
/* Stop-A is handled by drivers/char/keyboard.c now. */
if (up->su_type == SU_PORT_KBD) {
@@ -530,7 +506,7 @@ static irqreturn_t sunsu_kbd_ms_interrupt(int irq, void *dev_id)
struct uart_sunsu_port *up = dev_id;
if (!(serial_in(up, UART_IIR) & UART_IIR_NO_INT)) {
- unsigned char status = serial_inp(up, UART_LSR);
+ unsigned char status = serial_in(up, UART_LSR);
if ((status & UART_LSR_DR) || (status & UART_LSR_BI))
receive_kbd_ms_chars(up, (status & UART_LSR_BI) != 0);
@@ -619,14 +595,14 @@ static int sunsu_startup(struct uart_port *port)
if (up->port.type == PORT_16C950) {
/* Wake up and initialize UART */
up->acr = 0;
- serial_outp(up, UART_LCR, 0xBF);
- serial_outp(up, UART_EFR, UART_EFR_ECB);
- serial_outp(up, UART_IER, 0);
- serial_outp(up, UART_LCR, 0);
+ serial_out(up, UART_LCR, 0xBF);
+ serial_out(up, UART_EFR, UART_EFR_ECB);
+ serial_out(up, UART_IER, 0);
+ serial_out(up, UART_LCR, 0);
serial_icr_write(up, UART_CSR, 0); /* Reset the UART */
- serial_outp(up, UART_LCR, 0xBF);
- serial_outp(up, UART_EFR, UART_EFR_ECB);
- serial_outp(up, UART_LCR, 0);
+ serial_out(up, UART_LCR, 0xBF);
+ serial_out(up, UART_EFR, UART_EFR_ECB);
+ serial_out(up, UART_LCR, 0);
}
#ifdef CONFIG_SERIAL_8250_RSA
@@ -642,19 +618,19 @@ static int sunsu_startup(struct uart_port *port)
* (they will be reenabled in set_termios())
*/
if (uart_config[up->port.type].flags & UART_CLEAR_FIFO) {
- serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO);
- serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO |
+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
- serial_outp(up, UART_FCR, 0);
+ serial_out(up, UART_FCR, 0);
}
/*
* Clear the interrupt registers.
*/
- (void) serial_inp(up, UART_LSR);
- (void) serial_inp(up, UART_RX);
- (void) serial_inp(up, UART_IIR);
- (void) serial_inp(up, UART_MSR);
+ (void) serial_in(up, UART_LSR);
+ (void) serial_in(up, UART_RX);
+ (void) serial_in(up, UART_IIR);
+ (void) serial_in(up, UART_MSR);
/*
* At this point, there's no way the LSR could still be 0xff;
@@ -662,7 +638,7 @@ static int sunsu_startup(struct uart_port *port)
* here.
*/
if (!(up->port.flags & UPF_BUGGY_UART) &&
- (serial_inp(up, UART_LSR) == 0xff)) {
+ (serial_in(up, UART_LSR) == 0xff)) {
printk("ttyS%d: LSR safety check engaged!\n", up->port.line);
return -ENODEV;
}
@@ -682,7 +658,7 @@ static int sunsu_startup(struct uart_port *port)
/*
* Now, initialize the UART
*/
- serial_outp(up, UART_LCR, UART_LCR_WLEN8);
+ serial_out(up, UART_LCR, UART_LCR_WLEN8);
uart_port_lock_irqsave(&up->port, &flags);
@@ -697,7 +673,7 @@ static int sunsu_startup(struct uart_port *port)
* anyway, so we don't enable them here.
*/
up->ier = UART_IER_RLSI | UART_IER_RDI;
- serial_outp(up, UART_IER, up->ier);
+ serial_out(up, UART_IER, up->ier);
if (up->port.flags & UPF_FOURPORT) {
unsigned int icp;
@@ -712,10 +688,10 @@ static int sunsu_startup(struct uart_port *port)
/*
* And clear the interrupt registers again for luck.
*/
- (void) serial_inp(up, UART_LSR);
- (void) serial_inp(up, UART_RX);
- (void) serial_inp(up, UART_IIR);
- (void) serial_inp(up, UART_MSR);
+ (void) serial_in(up, UART_LSR);
+ (void) serial_in(up, UART_RX);
+ (void) serial_in(up, UART_IIR);
+ (void) serial_in(up, UART_MSR);
return 0;
}
@@ -730,7 +706,7 @@ static void sunsu_shutdown(struct uart_port *port)
* Disable interrupts from this port
*/
up->ier = 0;
- serial_outp(up, UART_IER, 0);
+ serial_out(up, UART_IER, 0);
uart_port_lock_irqsave(&up->port, &flags);
if (up->port.flags & UPF_FOURPORT) {
@@ -746,11 +722,11 @@ static void sunsu_shutdown(struct uart_port *port)
/*
* Disable break condition and FIFOs
*/
- serial_out(up, UART_LCR, serial_inp(up, UART_LCR) & ~UART_LCR_SBC);
- serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO |
+ serial_out(up, UART_LCR, serial_in(up, UART_LCR) & ~UART_LCR_SBC);
+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
UART_FCR_CLEAR_RCVR |
UART_FCR_CLEAR_XMIT);
- serial_outp(up, UART_FCR, 0);
+ serial_out(up, UART_FCR, 0);
#ifdef CONFIG_SERIAL_8250_RSA
/*
@@ -872,22 +848,22 @@ sunsu_change_speed(struct uart_port *port, unsigned int cflag,
serial_out(up, UART_IER, up->ier);
if (uart_config[up->port.type].flags & UART_STARTECH) {
- serial_outp(up, UART_LCR, 0xBF);
- serial_outp(up, UART_EFR, cflag & CRTSCTS ? UART_EFR_CTS :0);
+ serial_out(up, UART_LCR, 0xBF);
+ serial_out(up, UART_EFR, cflag & CRTSCTS ? UART_EFR_CTS :0);
}
- serial_outp(up, UART_LCR, cval | UART_LCR_DLAB);/* set DLAB */
- serial_outp(up, UART_DLL, quot & 0xff); /* LS of divisor */
- serial_outp(up, UART_DLM, quot >> 8); /* MS of divisor */
+ serial_out(up, UART_LCR, cval | UART_LCR_DLAB);/* set DLAB */
+ serial_out(up, UART_DLL, quot & 0xff); /* LS of divisor */
+ serial_out(up, UART_DLM, quot >> 8); /* MS of divisor */
if (up->port.type == PORT_16750)
- serial_outp(up, UART_FCR, fcr); /* set fcr */
- serial_outp(up, UART_LCR, cval); /* reset DLAB */
+ serial_out(up, UART_FCR, fcr); /* set fcr */
+ serial_out(up, UART_LCR, cval); /* reset DLAB */
up->lcr = cval; /* Save LCR */
if (up->port.type != PORT_16750) {
if (fcr & UART_FCR_ENABLE_FIFO) {
/* emulated UARTs (Lucent Venus 167x) need two steps */
- serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO);
+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
}
- serial_outp(up, UART_FCR, fcr); /* set fcr */
+ serial_out(up, UART_FCR, fcr); /* set fcr */
}
up->cflag = cflag;
@@ -1051,18 +1027,18 @@ static void sunsu_autoconfig(struct uart_sunsu_port *up)
* 0x80 is a non-existent port; which should be safe since
* include/asm/io.h also makes this assumption.
*/
- scratch = serial_inp(up, UART_IER);
- serial_outp(up, UART_IER, 0);
+ scratch = serial_in(up, UART_IER);
+ serial_out(up, UART_IER, 0);
#ifdef __i386__
outb(0xff, 0x080);
#endif
- scratch2 = serial_inp(up, UART_IER);
- serial_outp(up, UART_IER, 0x0f);
+ scratch2 = serial_in(up, UART_IER);
+ serial_out(up, UART_IER, 0x0f);
#ifdef __i386__
outb(0, 0x080);
#endif
- scratch3 = serial_inp(up, UART_IER);
- serial_outp(up, UART_IER, scratch);
+ scratch3 = serial_in(up, UART_IER);
+ serial_out(up, UART_IER, scratch);
if (scratch2 != 0 || scratch3 != 0x0F)
goto out; /* We failed; there's nothing here */
}
@@ -1080,16 +1056,16 @@ static void sunsu_autoconfig(struct uart_sunsu_port *up)
* that conflicts with COM 1-4 --- we hope!
*/
if (!(up->port.flags & UPF_SKIP_TEST)) {
- serial_outp(up, UART_MCR, UART_MCR_LOOP | 0x0A);
- status1 = serial_inp(up, UART_MSR) & 0xF0;
- serial_outp(up, UART_MCR, save_mcr);
+ serial_out(up, UART_MCR, UART_MCR_LOOP | 0x0A);
+ status1 = serial_in(up, UART_MSR) & 0xF0;
+ serial_out(up, UART_MCR, save_mcr);
if (status1 != 0x90)
goto out; /* We failed loopback test */
}
- serial_outp(up, UART_LCR, 0xBF); /* set up for StarTech test */
- serial_outp(up, UART_EFR, 0); /* EFR is the same as FCR */
- serial_outp(up, UART_LCR, 0);
- serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO);
+ serial_out(up, UART_LCR, 0xBF); /* set up for StarTech test */
+ serial_out(up, UART_EFR, 0); /* EFR is the same as FCR */
+ serial_out(up, UART_LCR, 0);
+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
scratch = serial_in(up, UART_IIR) >> 6;
switch (scratch) {
case 0:
@@ -1107,19 +1083,19 @@ static void sunsu_autoconfig(struct uart_sunsu_port *up)
}
if (up->port.type == PORT_16550A) {
/* Check for Startech UART's */
- serial_outp(up, UART_LCR, UART_LCR_DLAB);
+ serial_out(up, UART_LCR, UART_LCR_DLAB);
if (serial_in(up, UART_EFR) == 0) {
up->port.type = PORT_16650;
} else {
- serial_outp(up, UART_LCR, 0xBF);
+ serial_out(up, UART_LCR, 0xBF);
if (serial_in(up, UART_EFR) == 0)
up->port.type = PORT_16650V2;
}
}
if (up->port.type == PORT_16550A) {
/* Check for TI 16750 */
- serial_outp(up, UART_LCR, save_lcr | UART_LCR_DLAB);
- serial_outp(up, UART_FCR,
+ serial_out(up, UART_LCR, save_lcr | UART_LCR_DLAB);
+ serial_out(up, UART_FCR,
UART_FCR_ENABLE_FIFO | UART_FCR7_64BYTE);
scratch = serial_in(up, UART_IIR) >> 5;
if (scratch == 7) {
@@ -1129,24 +1105,24 @@ static void sunsu_autoconfig(struct uart_sunsu_port *up)
* mode if the UART_FCR7_64BYTE bit was set
* while UART_LCR_DLAB was latched.
*/
- serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO);
- serial_outp(up, UART_LCR, 0);
- serial_outp(up, UART_FCR,
+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
+ serial_out(up, UART_LCR, 0);
+ serial_out(up, UART_FCR,
UART_FCR_ENABLE_FIFO | UART_FCR7_64BYTE);
scratch = serial_in(up, UART_IIR) >> 5;
if (scratch == 6)
up->port.type = PORT_16750;
}
- serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO);
+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
}
- serial_outp(up, UART_LCR, save_lcr);
+ serial_out(up, UART_LCR, save_lcr);
if (up->port.type == PORT_16450) {
scratch = serial_in(up, UART_SCR);
- serial_outp(up, UART_SCR, 0xa5);
+ serial_out(up, UART_SCR, 0xa5);
status1 = serial_in(up, UART_SCR);
- serial_outp(up, UART_SCR, 0x5a);
+ serial_out(up, UART_SCR, 0x5a);
status2 = serial_in(up, UART_SCR);
- serial_outp(up, UART_SCR, scratch);
+ serial_out(up, UART_SCR, scratch);
if ((status1 != 0xa5) || (status2 != 0x5a))
up->port.type = PORT_8250;
@@ -1163,15 +1139,15 @@ static void sunsu_autoconfig(struct uart_sunsu_port *up)
*/
#ifdef CONFIG_SERIAL_8250_RSA
if (up->port.type == PORT_RSA)
- serial_outp(up, UART_RSA_FRR, 0);
+ serial_out(up, UART_RSA_FRR, 0);
#endif
- serial_outp(up, UART_MCR, save_mcr);
- serial_outp(up, UART_FCR, (UART_FCR_ENABLE_FIFO |
+ serial_out(up, UART_MCR, save_mcr);
+ serial_out(up, UART_FCR, (UART_FCR_ENABLE_FIFO |
UART_FCR_CLEAR_RCVR |
UART_FCR_CLEAR_XMIT));
- serial_outp(up, UART_FCR, 0);
+ serial_out(up, UART_FCR, 0);
(void)serial_in(up, UART_RX);
- serial_outp(up, UART_IER, 0);
+ serial_out(up, UART_IER, 0);
out:
uart_port_unlock_irqrestore(&up->port, flags);
diff --git a/drivers/tty/serial/tegra-utc.c b/drivers/tty/serial/tegra-utc.c
new file mode 100644
index 000000000000..39b14fe813c9
--- /dev/null
+++ b/drivers/tty/serial/tegra-utc.c
@@ -0,0 +1,625 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+// NVIDIA Tegra UTC (UART Trace Controller) driver.
+
+#include <linux/bits.h>
+#include <linux/console.h>
+#include <linux/container_of.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/iopoll.h>
+#include <linux/kfifo.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/property.h>
+#include <linux/platform_device.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/slab.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/types.h>
+
+#define TEGRA_UTC_ENABLE 0x000
+#define TEGRA_UTC_ENABLE_CLIENT_ENABLE BIT(0)
+
+#define TEGRA_UTC_FIFO_THRESHOLD 0x008
+
+#define TEGRA_UTC_COMMAND 0x00c
+#define TEGRA_UTC_COMMAND_RESET BIT(0)
+#define TEGRA_UTC_COMMAND_FLUSH BIT(1)
+
+#define TEGRA_UTC_DATA 0x020
+
+#define TEGRA_UTC_FIFO_STATUS 0x100
+#define TEGRA_UTC_FIFO_EMPTY BIT(0)
+#define TEGRA_UTC_FIFO_FULL BIT(1)
+#define TEGRA_UTC_FIFO_REQ BIT(2)
+#define TEGRA_UTC_FIFO_OVERFLOW BIT(3)
+#define TEGRA_UTC_FIFO_TIMEOUT BIT(4)
+
+#define TEGRA_UTC_FIFO_OCCUPANCY 0x104
+
+#define TEGRA_UTC_INTR_STATUS 0x108
+#define TEGRA_UTC_INTR_SET 0x10c
+#define TEGRA_UTC_INTR_MASK 0x110
+#define TEGRA_UTC_INTR_CLEAR 0x114
+#define TEGRA_UTC_INTR_EMPTY BIT(0)
+#define TEGRA_UTC_INTR_FULL BIT(1)
+#define TEGRA_UTC_INTR_REQ BIT(2)
+#define TEGRA_UTC_INTR_OVERFLOW BIT(3)
+#define TEGRA_UTC_INTR_TIMEOUT BIT(4)
+
+#define TEGRA_UTC_UART_NR 16
+
+#define TEGRA_UTC_INTR_COMMON (TEGRA_UTC_INTR_REQ | TEGRA_UTC_INTR_FULL | TEGRA_UTC_INTR_EMPTY)
+
+struct tegra_utc_port {
+#if IS_ENABLED(CONFIG_SERIAL_TEGRA_UTC_CONSOLE)
+ struct console console;
+#endif
+ struct uart_port port;
+
+ void __iomem *rx_base;
+ void __iomem *tx_base;
+
+ u32 tx_irqmask;
+ u32 rx_irqmask;
+
+ unsigned int fifosize;
+ u32 tx_threshold;
+ u32 rx_threshold;
+};
+
+static u32 tegra_utc_rx_readl(struct tegra_utc_port *tup, unsigned int offset)
+{
+ void __iomem *addr = tup->rx_base + offset;
+
+ return readl_relaxed(addr);
+}
+
+static void tegra_utc_rx_writel(struct tegra_utc_port *tup, u32 val, unsigned int offset)
+{
+ void __iomem *addr = tup->rx_base + offset;
+
+ writel_relaxed(val, addr);
+}
+
+static u32 tegra_utc_tx_readl(struct tegra_utc_port *tup, unsigned int offset)
+{
+ void __iomem *addr = tup->tx_base + offset;
+
+ return readl_relaxed(addr);
+}
+
+static void tegra_utc_tx_writel(struct tegra_utc_port *tup, u32 val, unsigned int offset)
+{
+ void __iomem *addr = tup->tx_base + offset;
+
+ writel_relaxed(val, addr);
+}
+
+static void tegra_utc_enable_tx_irq(struct tegra_utc_port *tup)
+{
+ tup->tx_irqmask = TEGRA_UTC_INTR_REQ;
+
+ tegra_utc_tx_writel(tup, tup->tx_irqmask, TEGRA_UTC_INTR_MASK);
+ tegra_utc_tx_writel(tup, tup->tx_irqmask, TEGRA_UTC_INTR_SET);
+}
+
+static void tegra_utc_disable_tx_irq(struct tegra_utc_port *tup)
+{
+ tup->tx_irqmask = 0x0;
+
+ tegra_utc_tx_writel(tup, tup->tx_irqmask, TEGRA_UTC_INTR_MASK);
+ tegra_utc_tx_writel(tup, tup->tx_irqmask, TEGRA_UTC_INTR_SET);
+}
+
+static void tegra_utc_stop_tx(struct uart_port *port)
+{
+ struct tegra_utc_port *tup = container_of(port, struct tegra_utc_port, port);
+
+ tegra_utc_disable_tx_irq(tup);
+}
+
+static void tegra_utc_init_tx(struct tegra_utc_port *tup)
+{
+ /* Disable TX. */
+ tegra_utc_tx_writel(tup, 0x0, TEGRA_UTC_ENABLE);
+
+ /* Update the FIFO Threshold. */
+ tegra_utc_tx_writel(tup, tup->tx_threshold, TEGRA_UTC_FIFO_THRESHOLD);
+
+ /* Clear and mask all the interrupts. */
+ tegra_utc_tx_writel(tup, TEGRA_UTC_INTR_COMMON, TEGRA_UTC_INTR_CLEAR);
+ tegra_utc_disable_tx_irq(tup);
+
+ /* Enable TX. */
+ tegra_utc_tx_writel(tup, TEGRA_UTC_ENABLE_CLIENT_ENABLE, TEGRA_UTC_ENABLE);
+}
+
+static void tegra_utc_init_rx(struct tegra_utc_port *tup)
+{
+ tup->rx_irqmask = TEGRA_UTC_INTR_REQ | TEGRA_UTC_INTR_TIMEOUT;
+
+ tegra_utc_rx_writel(tup, TEGRA_UTC_COMMAND_RESET, TEGRA_UTC_COMMAND);
+ tegra_utc_rx_writel(tup, tup->rx_threshold, TEGRA_UTC_FIFO_THRESHOLD);
+
+ /* Clear all the pending interrupts. */
+ tegra_utc_rx_writel(tup, TEGRA_UTC_INTR_TIMEOUT | TEGRA_UTC_INTR_OVERFLOW |
+ TEGRA_UTC_INTR_COMMON, TEGRA_UTC_INTR_CLEAR);
+ tegra_utc_rx_writel(tup, tup->rx_irqmask, TEGRA_UTC_INTR_MASK);
+ tegra_utc_rx_writel(tup, tup->rx_irqmask, TEGRA_UTC_INTR_SET);
+
+ /* Enable RX. */
+ tegra_utc_rx_writel(tup, TEGRA_UTC_ENABLE_CLIENT_ENABLE, TEGRA_UTC_ENABLE);
+}
+
+static bool tegra_utc_tx_chars(struct tegra_utc_port *tup)
+{
+ struct uart_port *port = &tup->port;
+ unsigned int pending;
+ u8 c;
+
+ pending = uart_port_tx(port, c,
+ !(tegra_utc_tx_readl(tup, TEGRA_UTC_FIFO_STATUS) & TEGRA_UTC_FIFO_FULL),
+ tegra_utc_tx_writel(tup, c, TEGRA_UTC_DATA));
+
+ return pending;
+}
+
+static void tegra_utc_rx_chars(struct tegra_utc_port *tup)
+{
+ struct tty_port *port = &tup->port.state->port;
+ unsigned int max_chars = 256;
+ u32 status;
+ int sysrq;
+ u32 ch;
+
+ while (max_chars--) {
+ status = tegra_utc_rx_readl(tup, TEGRA_UTC_FIFO_STATUS);
+ if (status & TEGRA_UTC_FIFO_EMPTY)
+ break;
+
+ ch = tegra_utc_rx_readl(tup, TEGRA_UTC_DATA);
+ tup->port.icount.rx++;
+
+ if (status & TEGRA_UTC_FIFO_OVERFLOW)
+ tup->port.icount.overrun++;
+
+ uart_port_unlock(&tup->port);
+ sysrq = uart_handle_sysrq_char(&tup->port, ch);
+ uart_port_lock(&tup->port);
+
+ if (!sysrq)
+ tty_insert_flip_char(port, ch, TTY_NORMAL);
+ }
+
+ tty_flip_buffer_push(port);
+}
+
+static irqreturn_t tegra_utc_isr(int irq, void *dev_id)
+{
+ struct tegra_utc_port *tup = dev_id;
+ unsigned int handled = 0;
+ u32 status;
+
+ uart_port_lock(&tup->port);
+
+ /* Process RX_REQ and RX_TIMEOUT interrupts. */
+ do {
+ status = tegra_utc_rx_readl(tup, TEGRA_UTC_INTR_STATUS) & tup->rx_irqmask;
+ if (status) {
+ tegra_utc_rx_writel(tup, tup->rx_irqmask, TEGRA_UTC_INTR_CLEAR);
+ tegra_utc_rx_chars(tup);
+ handled = 1;
+ }
+ } while (status);
+
+ /* Process TX_REQ interrupt. */
+ do {
+ status = tegra_utc_tx_readl(tup, TEGRA_UTC_INTR_STATUS) & tup->tx_irqmask;
+ if (status) {
+ tegra_utc_tx_writel(tup, tup->tx_irqmask, TEGRA_UTC_INTR_CLEAR);
+ tegra_utc_tx_chars(tup);
+ handled = 1;
+ }
+ } while (status);
+
+ uart_port_unlock(&tup->port);
+
+ return IRQ_RETVAL(handled);
+}
+
+static unsigned int tegra_utc_tx_empty(struct uart_port *port)
+{
+ struct tegra_utc_port *tup = container_of(port, struct tegra_utc_port, port);
+
+ return tegra_utc_tx_readl(tup, TEGRA_UTC_FIFO_OCCUPANCY) ? 0 : TIOCSER_TEMT;
+}
+
+static void tegra_utc_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+}
+
+static unsigned int tegra_utc_get_mctrl(struct uart_port *port)
+{
+ return 0;
+}
+
+static void tegra_utc_start_tx(struct uart_port *port)
+{
+ struct tegra_utc_port *tup = container_of(port, struct tegra_utc_port, port);
+
+ if (tegra_utc_tx_chars(tup))
+ tegra_utc_enable_tx_irq(tup);
+}
+
+static void tegra_utc_stop_rx(struct uart_port *port)
+{
+ struct tegra_utc_port *tup = container_of(port, struct tegra_utc_port, port);
+
+ tup->rx_irqmask = 0x0;
+ tegra_utc_rx_writel(tup, tup->rx_irqmask, TEGRA_UTC_INTR_MASK);
+ tegra_utc_rx_writel(tup, tup->rx_irqmask, TEGRA_UTC_INTR_SET);
+}
+
+static void tegra_utc_hw_init(struct tegra_utc_port *tup)
+{
+ tegra_utc_init_tx(tup);
+ tegra_utc_init_rx(tup);
+}
+
+static int tegra_utc_startup(struct uart_port *port)
+{
+ struct tegra_utc_port *tup = container_of(port, struct tegra_utc_port, port);
+ int ret;
+
+ tegra_utc_hw_init(tup);
+
+ /* Interrupt is dedicated to this UTC client. */
+ ret = request_irq(port->irq, tegra_utc_isr, 0, dev_name(port->dev), tup);
+ if (ret < 0)
+ dev_err(port->dev, "failed to register interrupt handler\n");
+
+ return ret;
+}
+
+static void tegra_utc_shutdown(struct uart_port *port)
+{
+ struct tegra_utc_port *tup = container_of(port, struct tegra_utc_port, port);
+
+ tegra_utc_rx_writel(tup, 0x0, TEGRA_UTC_ENABLE);
+ free_irq(port->irq, tup);
+}
+
+static void tegra_utc_set_termios(struct uart_port *port, struct ktermios *termios,
+ const struct ktermios *old)
+{
+ /* The Tegra UTC clients supports only 8-N-1 configuration without HW flow control */
+ termios->c_cflag &= ~(CSIZE | CSTOPB | PARENB | PARODD);
+ termios->c_cflag &= ~(CMSPAR | CRTSCTS);
+ termios->c_cflag |= CS8 | CLOCAL;
+}
+
+#ifdef CONFIG_CONSOLE_POLL
+
+static int tegra_utc_poll_init(struct uart_port *port)
+{
+ struct tegra_utc_port *tup = container_of(port, struct tegra_utc_port, port);
+
+ tegra_utc_hw_init(tup);
+ return 0;
+}
+
+static int tegra_utc_get_poll_char(struct uart_port *port)
+{
+ struct tegra_utc_port *tup = container_of(port, struct tegra_utc_port, port);
+
+ if (tegra_utc_rx_readl(tup, TEGRA_UTC_FIFO_STATUS) & TEGRA_UTC_FIFO_EMPTY)
+ return NO_POLL_CHAR;
+
+ return tegra_utc_rx_readl(tup, TEGRA_UTC_DATA);
+}
+
+static void tegra_utc_put_poll_char(struct uart_port *port, unsigned char ch)
+{
+ struct tegra_utc_port *tup = container_of(port, struct tegra_utc_port, port);
+ u32 val;
+
+ read_poll_timeout_atomic(tegra_utc_tx_readl, val, !(val & TEGRA_UTC_FIFO_FULL),
+ 0, USEC_PER_SEC, false, tup, TEGRA_UTC_FIFO_STATUS);
+
+ tegra_utc_tx_writel(tup, ch, TEGRA_UTC_DATA);
+}
+
+#endif
+
+static const struct uart_ops tegra_utc_uart_ops = {
+ .tx_empty = tegra_utc_tx_empty,
+ .set_mctrl = tegra_utc_set_mctrl,
+ .get_mctrl = tegra_utc_get_mctrl,
+ .stop_tx = tegra_utc_stop_tx,
+ .start_tx = tegra_utc_start_tx,
+ .stop_rx = tegra_utc_stop_rx,
+ .startup = tegra_utc_startup,
+ .shutdown = tegra_utc_shutdown,
+ .set_termios = tegra_utc_set_termios,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_init = tegra_utc_poll_init,
+ .poll_get_char = tegra_utc_get_poll_char,
+ .poll_put_char = tegra_utc_put_poll_char,
+#endif
+};
+
+#if IS_ENABLED(CONFIG_SERIAL_TEGRA_UTC_CONSOLE)
+#define TEGRA_UTC_DEFAULT_FIFO_THRESHOLD 4
+#define TEGRA_UTC_EARLYCON_MAX_BURST_SIZE 128
+
+static void tegra_utc_putc(struct uart_port *port, unsigned char c)
+{
+ writel(c, port->membase + TEGRA_UTC_DATA);
+}
+
+static void tegra_utc_early_write(struct console *con, const char *s, unsigned int n)
+{
+ struct earlycon_device *dev = con->data;
+
+ while (n) {
+ u32 burst_size = TEGRA_UTC_EARLYCON_MAX_BURST_SIZE;
+
+ burst_size -= readl(dev->port.membase + TEGRA_UTC_FIFO_OCCUPANCY);
+ if (n < burst_size)
+ burst_size = n;
+
+ uart_console_write(&dev->port, s, burst_size, tegra_utc_putc);
+
+ n -= burst_size;
+ s += burst_size;
+ }
+}
+
+static int __init tegra_utc_early_console_setup(struct earlycon_device *device, const char *opt)
+{
+ if (!device->port.membase)
+ return -ENODEV;
+
+ /* Configure TX */
+ writel(TEGRA_UTC_COMMAND_FLUSH | TEGRA_UTC_COMMAND_RESET,
+ device->port.membase + TEGRA_UTC_COMMAND);
+ writel(TEGRA_UTC_DEFAULT_FIFO_THRESHOLD, device->port.membase + TEGRA_UTC_FIFO_THRESHOLD);
+
+ /* Clear and mask all the interrupts. */
+ writel(TEGRA_UTC_INTR_COMMON, device->port.membase + TEGRA_UTC_INTR_CLEAR);
+
+ writel(0x0, device->port.membase + TEGRA_UTC_INTR_MASK);
+ writel(0x0, device->port.membase + TEGRA_UTC_INTR_SET);
+
+ /* Enable TX. */
+ writel(TEGRA_UTC_ENABLE_CLIENT_ENABLE, device->port.membase + TEGRA_UTC_ENABLE);
+
+ device->con->write = tegra_utc_early_write;
+
+ return 0;
+}
+OF_EARLYCON_DECLARE(tegra_utc, "nvidia,tegra264-utc", tegra_utc_early_console_setup);
+
+static void tegra_utc_console_putchar(struct uart_port *port, unsigned char ch)
+{
+ struct tegra_utc_port *tup = container_of(port, struct tegra_utc_port, port);
+
+ tegra_utc_tx_writel(tup, ch, TEGRA_UTC_DATA);
+}
+
+static void tegra_utc_console_write_atomic(struct console *cons, struct nbcon_write_context *wctxt)
+{
+ struct tegra_utc_port *tup = container_of(cons, struct tegra_utc_port, console);
+ unsigned int len;
+ char *outbuf;
+
+ if (!nbcon_enter_unsafe(wctxt))
+ return;
+
+ outbuf = wctxt->outbuf;
+ len = wctxt->len;
+
+ while (len) {
+ u32 burst_size = tup->fifosize;
+
+ burst_size -= tegra_utc_tx_readl(tup, TEGRA_UTC_FIFO_OCCUPANCY);
+ if (len < burst_size)
+ burst_size = len;
+
+ uart_console_write(&tup->port, outbuf, burst_size, tegra_utc_console_putchar);
+
+ outbuf += burst_size;
+ len -= burst_size;
+ };
+
+ nbcon_exit_unsafe(wctxt);
+}
+
+static void tegra_utc_console_write_thread(struct console *cons, struct nbcon_write_context *wctxt)
+{
+ struct tegra_utc_port *tup = container_of(cons, struct tegra_utc_port, console);
+ unsigned int len = READ_ONCE(wctxt->len);
+ unsigned int i;
+ u32 val;
+
+ for (i = 0; i < len; i++) {
+ if (!nbcon_enter_unsafe(wctxt))
+ break;
+
+ read_poll_timeout_atomic(tegra_utc_tx_readl, val, !(val & TEGRA_UTC_FIFO_FULL),
+ 0, USEC_PER_SEC, false, tup, TEGRA_UTC_FIFO_STATUS);
+ uart_console_write(&tup->port, wctxt->outbuf + i, 1, tegra_utc_console_putchar);
+
+ if (!nbcon_exit_unsafe(wctxt))
+ break;
+ }
+}
+
+static void tegra_utc_console_device_lock(struct console *cons, unsigned long *flags)
+{
+ struct tegra_utc_port *tup = container_of(cons, struct tegra_utc_port, console);
+ struct uart_port *port = &tup->port;
+
+ __uart_port_lock_irqsave(port, flags);
+}
+
+static void tegra_utc_console_device_unlock(struct console *cons, unsigned long flags)
+{
+ struct tegra_utc_port *tup = container_of(cons, struct tegra_utc_port, console);
+ struct uart_port *port = &tup->port;
+
+ __uart_port_unlock_irqrestore(port, flags);
+}
+
+static int tegra_utc_console_setup(struct console *cons, char *options)
+{
+ struct tegra_utc_port *tup = container_of(cons, struct tegra_utc_port, console);
+
+ tegra_utc_init_tx(tup);
+
+ return 0;
+}
+#endif
+
+static struct uart_driver tegra_utc_driver = {
+ .driver_name = "tegra-utc",
+ .dev_name = "ttyUTC",
+ .nr = TEGRA_UTC_UART_NR,
+};
+
+static int tegra_utc_setup_port(struct device *dev, struct tegra_utc_port *tup)
+{
+ tup->port.dev = dev;
+ tup->port.fifosize = tup->fifosize;
+ tup->port.flags = UPF_BOOT_AUTOCONF;
+ tup->port.iotype = UPIO_MEM;
+ tup->port.ops = &tegra_utc_uart_ops;
+ tup->port.type = PORT_TEGRA_TCU;
+ tup->port.private_data = tup;
+
+#if IS_ENABLED(CONFIG_SERIAL_TEGRA_UTC_CONSOLE)
+ strscpy(tup->console.name, "ttyUTC", sizeof(tup->console.name));
+ tup->console.write_atomic = tegra_utc_console_write_atomic;
+ tup->console.write_thread = tegra_utc_console_write_thread;
+ tup->console.device_lock = tegra_utc_console_device_lock;
+ tup->console.device_unlock = tegra_utc_console_device_unlock;
+ tup->console.device = uart_console_device;
+ tup->console.setup = tegra_utc_console_setup;
+ tup->console.flags = CON_PRINTBUFFER | CON_NBCON;
+ tup->console.data = &tegra_utc_driver;
+#endif
+
+ return uart_read_port_properties(&tup->port);
+}
+
+static int tegra_utc_register_port(struct tegra_utc_port *tup)
+{
+ int ret;
+
+ ret = uart_add_one_port(&tegra_utc_driver, &tup->port);
+ if (ret)
+ return ret;
+
+#if IS_ENABLED(CONFIG_SERIAL_TEGRA_UTC_CONSOLE)
+ register_console(&tup->console);
+#endif
+
+ return 0;
+}
+
+static int tegra_utc_probe(struct platform_device *pdev)
+{
+ const unsigned int *soc_fifosize;
+ struct device *dev = &pdev->dev;
+ struct tegra_utc_port *tup;
+ int ret;
+
+ tup = devm_kzalloc(dev, sizeof(*tup), GFP_KERNEL);
+ if (!tup)
+ return -ENOMEM;
+
+ ret = device_property_read_u32(dev, "tx-threshold", &tup->tx_threshold);
+ if (ret)
+ return dev_err_probe(dev, ret, "missing %s property\n", "tx-threshold");
+
+ ret = device_property_read_u32(dev, "rx-threshold", &tup->rx_threshold);
+ if (ret)
+ return dev_err_probe(dev, ret, "missing %s property\n", "rx-threshold");
+
+ soc_fifosize = device_get_match_data(dev);
+ tup->fifosize = *soc_fifosize;
+
+ tup->tx_base = devm_platform_ioremap_resource_byname(pdev, "tx");
+ if (IS_ERR(tup->tx_base))
+ return PTR_ERR(tup->tx_base);
+
+ tup->rx_base = devm_platform_ioremap_resource_byname(pdev, "rx");
+ if (IS_ERR(tup->rx_base))
+ return PTR_ERR(tup->rx_base);
+
+ ret = tegra_utc_setup_port(dev, tup);
+ if (ret)
+ dev_err_probe(dev, ret, "failed to setup uart port\n");
+
+ platform_set_drvdata(pdev, tup);
+
+ return tegra_utc_register_port(tup);
+}
+
+static void tegra_utc_remove(struct platform_device *pdev)
+{
+ struct tegra_utc_port *tup = platform_get_drvdata(pdev);
+
+#if IS_ENABLED(CONFIG_SERIAL_TEGRA_UTC_CONSOLE)
+ unregister_console(&tup->console);
+#endif
+ uart_remove_one_port(&tegra_utc_driver, &tup->port);
+}
+
+static const unsigned int tegra264_utc_soc = 128;
+
+static const struct of_device_id tegra_utc_of_match[] = {
+ { .compatible = "nvidia,tegra264-utc", .data = &tegra264_utc_soc },
+ {}
+};
+MODULE_DEVICE_TABLE(of, tegra_utc_of_match);
+
+static struct platform_driver tegra_utc_platform_driver = {
+ .probe = tegra_utc_probe,
+ .remove = tegra_utc_remove,
+ .driver = {
+ .name = "tegra-utc",
+ .of_match_table = tegra_utc_of_match,
+ },
+};
+
+static int __init tegra_utc_init(void)
+{
+ int ret;
+
+ ret = uart_register_driver(&tegra_utc_driver);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&tegra_utc_platform_driver);
+ if (ret)
+ uart_unregister_driver(&tegra_utc_driver);
+
+ return ret;
+}
+module_init(tegra_utc_init);
+
+static void __exit tegra_utc_exit(void)
+{
+ platform_driver_unregister(&tegra_utc_platform_driver);
+ uart_unregister_driver(&tegra_utc_driver);
+}
+module_exit(tegra_utc_exit);
+
+MODULE_AUTHOR("Kartik Rajput <kkartik@nvidia.com>");
+MODULE_DESCRIPTION("Tegra UART Trace Controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/tty_audit.c b/drivers/tty/tty_audit.c
index 1d81eeefb068..75542333c54a 100644
--- a/drivers/tty/tty_audit.c
+++ b/drivers/tty/tty_audit.c
@@ -12,12 +12,14 @@
#include <linux/tty.h>
#include "tty.h"
+#define TTY_AUDIT_BUF_SIZE 4096
+
struct tty_audit_buf {
struct mutex mutex; /* Protects all data below */
dev_t dev; /* The TTY which the data is from */
bool icanon;
size_t valid;
- u8 *data; /* Allocated size N_TTY_BUF_SIZE */
+ u8 *data; /* Allocated size TTY_AUDIT_BUF_SIZE */
};
static struct tty_audit_buf *tty_audit_buf_ref(void)
@@ -37,7 +39,7 @@ static struct tty_audit_buf *tty_audit_buf_alloc(void)
if (!buf)
goto err;
- buf->data = kmalloc(N_TTY_BUF_SIZE, GFP_KERNEL);
+ buf->data = kmalloc(TTY_AUDIT_BUF_SIZE, GFP_KERNEL);
if (!buf->data)
goto err_buf;
@@ -235,14 +237,14 @@ void tty_audit_add_data(const struct tty_struct *tty, const void *data,
do {
size_t run;
- run = N_TTY_BUF_SIZE - buf->valid;
+ run = TTY_AUDIT_BUF_SIZE - buf->valid;
if (run > size)
run = size;
memcpy(buf->data + buf->valid, data, run);
buf->valid += run;
data += run;
size -= run;
- if (buf->valid == N_TTY_BUF_SIZE)
+ if (buf->valid == TTY_AUDIT_BUF_SIZE)
tty_audit_buf_push(buf);
} while (size != 0);
mutex_unlock(&buf->mutex);
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 449dbd216460..ca9b7d7bad2b 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -3329,10 +3329,12 @@ EXPORT_SYMBOL(tty_unregister_device);
* __tty_alloc_driver - allocate tty driver
* @lines: count of lines this driver can handle at most
* @owner: module which is responsible for this driver
- * @flags: some of %TTY_DRIVER_ flags, will be set in driver->flags
+ * @flags: some of enum tty_driver_flag, will be set in driver->flags
*
- * This should not be called directly, some of the provided macros should be
- * used instead. Use IS_ERR() and friends on @retval.
+ * This should not be called directly, tty_alloc_driver() should be used
+ * instead.
+ *
+ * Returns: struct tty_driver or a PTR-encoded error (use IS_ERR() and friends).
*/
struct tty_driver *__tty_alloc_driver(unsigned int lines, struct module *owner,
unsigned long flags)
diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
index 3be428c16260..4e18031a5ca3 100644
--- a/drivers/tty/tty_ldsem.c
+++ b/drivers/tty/tty_ldsem.c
@@ -367,23 +367,6 @@ int __sched ldsem_down_write(struct ld_semaphore *sem, long timeout)
}
/*
- * trylock for writing -- returns 1 if successful, 0 if contention
- */
-int ldsem_down_write_trylock(struct ld_semaphore *sem)
-{
- long count = atomic_long_read(&sem->count);
-
- while ((count & LDSEM_ACTIVE_MASK) == 0) {
- if (atomic_long_try_cmpxchg(&sem->count, &count, count + LDSEM_WRITE_BIAS)) {
- rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_);
- lock_acquired(&sem->dep_map, _RET_IP_);
- return 1;
- }
- }
- return 0;
-}
-
-/*
* release a read lock
*/
void ldsem_up_read(struct ld_semaphore *sem)
diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
index fd1beb10bba7..694aa1457739 100644
--- a/drivers/usb/cdns3/cdns3-gadget.c
+++ b/drivers/usb/cdns3/cdns3-gadget.c
@@ -3468,7 +3468,7 @@ __must_hold(&cdns->lock)
return 0;
}
-static int cdns3_gadget_resume(struct cdns *cdns, bool hibernated)
+static int cdns3_gadget_resume(struct cdns *cdns, bool lost_power)
{
struct cdns3_device *priv_dev = cdns->gadget_dev;
@@ -3476,7 +3476,7 @@ static int cdns3_gadget_resume(struct cdns *cdns, bool hibernated)
return 0;
cdns3_gadget_config(priv_dev);
- if (hibernated)
+ if (lost_power)
writel(USB_CONF_DEVEN, &priv_dev->regs->usb_conf);
return 0;
diff --git a/drivers/usb/cdns3/cdns3-ti.c b/drivers/usb/cdns3/cdns3-ti.c
index 040bb91e9c01..302ebf6d8e53 100644
--- a/drivers/usb/cdns3/cdns3-ti.c
+++ b/drivers/usb/cdns3/cdns3-ti.c
@@ -58,6 +58,7 @@ struct cdns_ti {
unsigned vbus_divider:1;
struct clk *usb2_refclk;
struct clk *lpm_clk;
+ int usb2_refclk_rate_code;
};
static const int cdns_ti_rate_table[] = { /* in KHZ */
@@ -98,15 +99,50 @@ static const struct of_dev_auxdata cdns_ti_auxdata[] = {
{},
};
+static void cdns_ti_reset_and_init_hw(struct cdns_ti *data)
+{
+ u32 reg;
+
+ /* assert RESET */
+ reg = cdns_ti_readl(data, USBSS_W1);
+ reg &= ~USBSS_W1_PWRUP_RST;
+ cdns_ti_writel(data, USBSS_W1, reg);
+
+ /* set static config */
+ reg = cdns_ti_readl(data, USBSS_STATIC_CONFIG);
+ reg &= ~USBSS1_STATIC_PLL_REF_SEL_MASK;
+ reg |= data->usb2_refclk_rate_code << USBSS1_STATIC_PLL_REF_SEL_SHIFT;
+
+ reg &= ~USBSS1_STATIC_VBUS_SEL_MASK;
+ if (data->vbus_divider)
+ reg |= 1 << USBSS1_STATIC_VBUS_SEL_SHIFT;
+
+ cdns_ti_writel(data, USBSS_STATIC_CONFIG, reg);
+ reg = cdns_ti_readl(data, USBSS_STATIC_CONFIG);
+
+ /* set USB2_ONLY mode if requested */
+ reg = cdns_ti_readl(data, USBSS_W1);
+ if (data->usb2_only)
+ reg |= USBSS_W1_USB2_ONLY;
+
+ /* set default modestrap */
+ reg |= USBSS_W1_MODESTRAP_SEL;
+ reg &= ~USBSS_W1_MODESTRAP_MASK;
+ reg |= USBSS_MODESTRAP_MODE_NONE << USBSS_W1_MODESTRAP_SHIFT;
+ cdns_ti_writel(data, USBSS_W1, reg);
+
+ /* de-assert RESET */
+ reg |= USBSS_W1_PWRUP_RST;
+ cdns_ti_writel(data, USBSS_W1, reg);
+}
+
static int cdns_ti_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = pdev->dev.of_node;
struct cdns_ti *data;
- int error;
- u32 reg;
- int rate_code, i;
unsigned long rate;
+ int error, i;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
@@ -146,7 +182,17 @@ static int cdns_ti_probe(struct platform_device *pdev)
return -EINVAL;
}
- rate_code = i;
+ data->usb2_refclk_rate_code = i;
+ data->vbus_divider = device_property_read_bool(dev, "ti,vbus-divider");
+ data->usb2_only = device_property_read_bool(dev, "ti,usb2-only");
+
+ /*
+ * The call below to pm_runtime_get_sync() MIGHT reset hardware, if it
+ * detects it as uninitialised. We want to enforce a reset at probe,
+ * and so do it manually here. This means the first runtime_resume()
+ * will be a no-op.
+ */
+ cdns_ti_reset_and_init_hw(data);
pm_runtime_enable(dev);
error = pm_runtime_get_sync(dev);
@@ -155,40 +201,6 @@ static int cdns_ti_probe(struct platform_device *pdev)
goto err;
}
- /* assert RESET */
- reg = cdns_ti_readl(data, USBSS_W1);
- reg &= ~USBSS_W1_PWRUP_RST;
- cdns_ti_writel(data, USBSS_W1, reg);
-
- /* set static config */
- reg = cdns_ti_readl(data, USBSS_STATIC_CONFIG);
- reg &= ~USBSS1_STATIC_PLL_REF_SEL_MASK;
- reg |= rate_code << USBSS1_STATIC_PLL_REF_SEL_SHIFT;
-
- reg &= ~USBSS1_STATIC_VBUS_SEL_MASK;
- data->vbus_divider = device_property_read_bool(dev, "ti,vbus-divider");
- if (data->vbus_divider)
- reg |= 1 << USBSS1_STATIC_VBUS_SEL_SHIFT;
-
- cdns_ti_writel(data, USBSS_STATIC_CONFIG, reg);
- reg = cdns_ti_readl(data, USBSS_STATIC_CONFIG);
-
- /* set USB2_ONLY mode if requested */
- reg = cdns_ti_readl(data, USBSS_W1);
- data->usb2_only = device_property_read_bool(dev, "ti,usb2-only");
- if (data->usb2_only)
- reg |= USBSS_W1_USB2_ONLY;
-
- /* set default modestrap */
- reg |= USBSS_W1_MODESTRAP_SEL;
- reg &= ~USBSS_W1_MODESTRAP_MASK;
- reg |= USBSS_MODESTRAP_MODE_NONE << USBSS_W1_MODESTRAP_SHIFT;
- cdns_ti_writel(data, USBSS_W1, reg);
-
- /* de-assert RESET */
- reg |= USBSS_W1_PWRUP_RST;
- cdns_ti_writel(data, USBSS_W1, reg);
-
error = of_platform_populate(node, NULL, cdns_ti_auxdata, dev);
if (error) {
dev_err(dev, "failed to create children: %d\n", error);
@@ -224,6 +236,24 @@ static void cdns_ti_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
}
+static int cdns_ti_runtime_resume(struct device *dev)
+{
+ const u32 mask = USBSS_W1_PWRUP_RST | USBSS_W1_MODESTRAP_SEL;
+ struct cdns_ti *data = dev_get_drvdata(dev);
+ u32 w1;
+
+ w1 = cdns_ti_readl(data, USBSS_W1);
+ if ((w1 & mask) != mask)
+ cdns_ti_reset_and_init_hw(data);
+
+ return 0;
+}
+
+static const struct dev_pm_ops cdns_ti_pm_ops = {
+ RUNTIME_PM_OPS(NULL, cdns_ti_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+};
+
static const struct of_device_id cdns_ti_of_match[] = {
{ .compatible = "ti,j721e-usb", },
{ .compatible = "ti,am64-usb", },
@@ -237,6 +267,7 @@ static struct platform_driver cdns_ti_driver = {
.driver = {
.name = "cdns3-ti",
.of_match_table = cdns_ti_of_match,
+ .pm = pm_ptr(&cdns_ti_pm_ops),
},
};
diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c
index 97edf767ecee..87f310841735 100644
--- a/drivers/usb/cdns3/cdnsp-gadget.c
+++ b/drivers/usb/cdns3/cdnsp-gadget.c
@@ -1974,7 +1974,7 @@ static int cdnsp_gadget_suspend(struct cdns *cdns, bool do_wakeup)
return 0;
}
-static int cdnsp_gadget_resume(struct cdns *cdns, bool hibernated)
+static int cdnsp_gadget_resume(struct cdns *cdns, bool lost_power)
{
struct cdnsp_device *pdev = cdns->gadget_dev;
enum usb_device_speed max_speed;
diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c
index 98980a23e1c2..1243a5cea91b 100644
--- a/drivers/usb/cdns3/core.c
+++ b/drivers/usb/cdns3/core.c
@@ -524,11 +524,12 @@ EXPORT_SYMBOL_GPL(cdns_suspend);
int cdns_resume(struct cdns *cdns)
{
+ bool power_lost = cdns_power_is_lost(cdns);
enum usb_role real_role;
bool role_changed = false;
int ret = 0;
- if (cdns_power_is_lost(cdns)) {
+ if (power_lost) {
if (!cdns->role_sw) {
real_role = cdns_hw_role_state_machine(cdns);
if (real_role != cdns->role) {
@@ -551,7 +552,7 @@ int cdns_resume(struct cdns *cdns)
}
if (cdns->roles[cdns->role]->resume)
- cdns->roles[cdns->role]->resume(cdns, cdns_power_is_lost(cdns));
+ cdns->roles[cdns->role]->resume(cdns, power_lost);
return 0;
}
diff --git a/drivers/usb/cdns3/core.h b/drivers/usb/cdns3/core.h
index 57d47348dc19..921cccf1ca9d 100644
--- a/drivers/usb/cdns3/core.h
+++ b/drivers/usb/cdns3/core.h
@@ -30,7 +30,7 @@ struct cdns_role_driver {
int (*start)(struct cdns *cdns);
void (*stop)(struct cdns *cdns);
int (*suspend)(struct cdns *cdns, bool do_wakeup);
- int (*resume)(struct cdns *cdns, bool hibernated);
+ int (*resume)(struct cdns *cdns, bool lost_power);
const char *name;
#define CDNS_ROLE_STATE_INACTIVE 0
#define CDNS_ROLE_STATE_ACTIVE 1
diff --git a/drivers/usb/cdns3/host.c b/drivers/usb/cdns3/host.c
index 7ba760ee62e3..f0df114c2b53 100644
--- a/drivers/usb/cdns3/host.c
+++ b/drivers/usb/cdns3/host.c
@@ -138,6 +138,16 @@ static void cdns_host_exit(struct cdns *cdns)
cdns_drd_host_off(cdns);
}
+static int cdns_host_resume(struct cdns *cdns, bool power_lost)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(cdns->host_dev);
+ struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
+
+ priv->power_lost = power_lost;
+
+ return 0;
+}
+
int cdns_host_init(struct cdns *cdns)
{
struct cdns_role_driver *rdrv;
@@ -148,6 +158,7 @@ int cdns_host_init(struct cdns *cdns)
rdrv->start = __cdns_host_init;
rdrv->stop = cdns_host_exit;
+ rdrv->resume = cdns_host_resume;
rdrv->state = CDNS_ROLE_STATE_INACTIVE;
rdrv->name = "host";
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
index 1394881fde5f..6243d8005f5d 100644
--- a/drivers/usb/chipidea/usbmisc_imx.c
+++ b/drivers/usb/chipidea/usbmisc_imx.c
@@ -440,7 +440,7 @@ static int usbmisc_imx6q_init(struct imx_usbmisc_data *data)
else if (data->oc_pol_configured)
reg &= ~MX6_BM_OVER_CUR_POLARITY;
}
- /* If the polarity is not set keep it as setup by the bootlader */
+ /* If the polarity is not set keep it as setup by the bootloader */
if (data->pwr_pol == 1)
reg |= MX6_BM_PWR_POLARITY;
writel(reg, usbmisc->base + data->index * 4);
@@ -645,7 +645,7 @@ static int usbmisc_imx7d_init(struct imx_usbmisc_data *data)
else if (data->oc_pol_configured)
reg &= ~MX6_BM_OVER_CUR_POLARITY;
}
- /* If the polarity is not set keep it as setup by the bootlader */
+ /* If the polarity is not set keep it as setup by the bootloader */
if (data->pwr_pol == 1)
reg |= MX6_BM_PWR_POLARITY;
writel(reg, usbmisc->base);
@@ -939,7 +939,7 @@ static int usbmisc_imx7ulp_init(struct imx_usbmisc_data *data)
else if (data->oc_pol_configured)
reg &= ~MX6_BM_OVER_CUR_POLARITY;
}
- /* If the polarity is not set keep it as setup by the bootlader */
+ /* If the polarity is not set keep it as setup by the bootloader */
if (data->pwr_pol == 1)
reg |= MX6_BM_PWR_POLARITY;
@@ -1185,7 +1185,7 @@ int imx_usbmisc_suspend(struct imx_usbmisc_data *data, bool wakeup)
if (usbmisc->ops->hsic_set_clk && data->hsic)
ret = usbmisc->ops->hsic_set_clk(data, false);
if (ret) {
- dev_err(data->dev, "set_wakeup failed, ret=%d\n", ret);
+ dev_err(data->dev, "hsic_set_clk failed, ret=%d\n", ret);
return ret;
}
@@ -1224,7 +1224,7 @@ int imx_usbmisc_resume(struct imx_usbmisc_data *data, bool wakeup)
if (usbmisc->ops->hsic_set_clk && data->hsic)
ret = usbmisc->ops->hsic_set_clk(data, true);
if (ret) {
- dev_err(data->dev, "set_wakeup failed, ret=%d\n", ret);
+ dev_err(data->dev, "hsic_set_clk failed, ret=%d\n", ret);
goto hsic_set_clk_fail;
}
diff --git a/drivers/usb/common/usb-conn-gpio.c b/drivers/usb/common/usb-conn-gpio.c
index aa710b50791b..1e36be2a28fd 100644
--- a/drivers/usb/common/usb-conn-gpio.c
+++ b/drivers/usb/common/usb-conn-gpio.c
@@ -158,7 +158,7 @@ static int usb_conn_psy_register(struct usb_conn_info *info)
struct device *dev = info->dev;
struct power_supply_desc *desc = &info->desc;
struct power_supply_config cfg = {
- .of_node = dev->of_node,
+ .fwnode = dev_fwnode(dev),
};
desc->name = "usb-charger";
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index f7bf8d1de3ad..13bd4ec4ea5f 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -64,6 +64,37 @@ static void usb_parse_ssp_isoc_endpoint_companion(struct device *ddev,
memcpy(&ep->ssp_isoc_ep_comp, desc, USB_DT_SSP_ISOC_EP_COMP_SIZE);
}
+static void usb_parse_eusb2_isoc_endpoint_companion(struct device *ddev,
+ int cfgno, int inum, int asnum, struct usb_host_endpoint *ep,
+ unsigned char *buffer, int size)
+{
+ struct usb_eusb2_isoc_ep_comp_descriptor *desc;
+ struct usb_descriptor_header *h;
+
+ /*
+ * eUSB2 isochronous endpoint companion descriptor for this endpoint
+ * shall be declared before the next endpoint or interface descriptor
+ */
+ while (size >= USB_DT_EUSB2_ISOC_EP_COMP_SIZE) {
+ h = (struct usb_descriptor_header *)buffer;
+
+ if (h->bDescriptorType == USB_DT_EUSB2_ISOC_ENDPOINT_COMP) {
+ desc = (struct usb_eusb2_isoc_ep_comp_descriptor *)buffer;
+ ep->eusb2_isoc_ep_comp = *desc;
+ return;
+ }
+ if (h->bDescriptorType == USB_DT_ENDPOINT ||
+ h->bDescriptorType == USB_DT_INTERFACE)
+ break;
+
+ buffer += h->bLength;
+ size -= h->bLength;
+ }
+
+ dev_notice(ddev, "No eUSB2 isoc ep %d companion for config %d interface %d altsetting %d\n",
+ ep->desc.bEndpointAddress, cfgno, inum, asnum);
+}
+
static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
int inum, int asnum, struct usb_host_endpoint *ep,
unsigned char *buffer, int size)
@@ -258,8 +289,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
int n, i, j, retval;
unsigned int maxp;
const unsigned short *maxpacket_maxes;
+ u16 bcdUSB;
d = (struct usb_endpoint_descriptor *) buffer;
+ bcdUSB = le16_to_cpu(udev->descriptor.bcdUSB);
buffer += d->bLength;
size -= d->bLength;
@@ -409,15 +442,17 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
/*
* Validate the wMaxPacketSize field.
- * Some devices have isochronous endpoints in altsetting 0;
- * the USB-2 spec requires such endpoints to have wMaxPacketSize = 0
- * (see the end of section 5.6.3), so don't warn about them.
+ * eUSB2 devices (see USB 2.0 Double Isochronous IN ECN 9.6.6 Endpoint)
+ * and devices with isochronous endpoints in altsetting 0 (see USB 2.0
+ * end of section 5.6.3) have wMaxPacketSize = 0.
+ * So don't warn about those.
*/
maxp = le16_to_cpu(endpoint->desc.wMaxPacketSize);
- if (maxp == 0 && !(usb_endpoint_xfer_isoc(d) && asnum == 0)) {
+
+ if (maxp == 0 && bcdUSB != 0x0220 &&
+ !(usb_endpoint_xfer_isoc(d) && asnum == 0))
dev_notice(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid wMaxPacketSize 0\n",
cfgno, inum, asnum, d->bEndpointAddress);
- }
/* Find the highest legal maxpacket size for this endpoint */
i = 0; /* additional transactions per microframe */
@@ -465,6 +500,12 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
maxp);
}
+ /* Parse a possible eUSB2 periodic endpoint companion descriptor */
+ if (bcdUSB == 0x0220 && d->wMaxPacketSize == 0 &&
+ (usb_endpoint_xfer_isoc(d) || usb_endpoint_xfer_int(d)))
+ usb_parse_eusb2_isoc_endpoint_companion(ddev, cfgno, inum, asnum,
+ endpoint, buffer, size);
+
/* Parse a possible SuperSpeed endpoint companion descriptor */
if (udev->speed >= USB_SPEED_SUPER)
usb_parse_ss_endpoint_companion(ddev, cfgno,
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index a75cf1f6d741..46026b331267 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1609,7 +1609,7 @@ int usb_hcd_unlink_urb (struct urb *urb, int status)
if (retval == 0)
retval = -EINPROGRESS;
else if (retval != -EIDRM && retval != -EBUSY)
- dev_dbg(&udev->dev, "hcd_unlink_urb %pK fail %d\n",
+ dev_dbg(&udev->dev, "hcd_unlink_urb %p fail %d\n",
urb, retval);
usb_put_dev(udev);
}
@@ -1786,7 +1786,7 @@ rescan:
/* kick hcd */
unlink1(hcd, urb, -ESHUTDOWN);
dev_dbg (hcd->self.controller,
- "shutdown urb %pK ep%d%s-%s\n",
+ "shutdown urb %p ep%d%s-%s\n",
urb, usb_endpoint_num(&ep->desc),
is_in ? "in" : "out",
usb_ep_type_string(usb_endpoint_type(&ep->desc)));
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index dcba4281ea48..8c7f9cc785bb 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -4708,8 +4708,6 @@ void usb_ep0_reinit(struct usb_device *udev)
}
EXPORT_SYMBOL_GPL(usb_ep0_reinit);
-#define usb_sndaddr0pipe() (PIPE_CONTROL << 30)
-
static int hub_set_address(struct usb_device *udev, int devnum)
{
int retval;
@@ -4733,7 +4731,7 @@ static int hub_set_address(struct usb_device *udev, int devnum)
if (hcd->driver->address_device)
retval = hcd->driver->address_device(hcd, udev, timeout_ms);
else
- retval = usb_control_msg(udev, usb_sndaddr0pipe(),
+ retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
USB_REQ_SET_ADDRESS, 0, devnum, 0,
NULL, 0, timeout_ms);
if (retval == 0) {
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 7576920e2d5a..5e52a35486af 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -376,7 +376,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
if (!urb || !urb->complete)
return -EINVAL;
if (urb->hcpriv) {
- WARN_ONCE(1, "URB %pK submitted while active\n", urb);
+ WARN_ONCE(1, "URB %p submitted while active\n", urb);
return -EBUSY;
}
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index 9919ab725d54..c3d24312db0f 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -43,6 +43,7 @@ int dwc2_backup_global_registers(struct dwc2_hsotg *hsotg)
/* Backup global regs */
gr = &hsotg->gr_backup;
+ gr->gintsts = dwc2_readl(hsotg, GINTSTS);
gr->gotgctl = dwc2_readl(hsotg, GOTGCTL);
gr->gintmsk = dwc2_readl(hsotg, GINTMSK);
gr->gahbcfg = dwc2_readl(hsotg, GAHBCFG);
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 2bd74f3033ed..34127b890b2a 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -667,6 +667,7 @@ struct dwc2_hw_params {
/**
* struct dwc2_gregs_backup - Holds global registers state before
* entering partial power down
+ * @gintsts: Backup of GINTSTS register
* @gotgctl: Backup of GOTGCTL register
* @gintmsk: Backup of GINTMSK register
* @gahbcfg: Backup of GAHBCFG register
@@ -683,6 +684,7 @@ struct dwc2_hw_params {
* @valid: True if registers values backuped.
*/
struct dwc2_gregs_backup {
+ u32 gintsts;
u32 gotgctl;
u32 gintmsk;
u32 gahbcfg;
@@ -1127,6 +1129,9 @@ struct dwc2_hsotg {
#define DWC2_FS_IOT_ID 0x55310000
#define DWC2_HS_IOT_ID 0x55320000
+#define DWC2_RESTORE_DCTL BIT(0)
+#define DWC2_RESTORE_DCFG BIT(1)
+
#if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
union dwc2_hcd_internal_flags {
u32 d32;
@@ -1420,7 +1425,7 @@ int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode);
#define dwc2_is_device_connected(hsotg) (hsotg->connected)
#define dwc2_is_device_enabled(hsotg) (hsotg->enabled)
int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg);
-int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, int remote_wakeup);
+int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, unsigned int flags);
int dwc2_gadget_enter_hibernation(struct dwc2_hsotg *hsotg);
int dwc2_gadget_exit_hibernation(struct dwc2_hsotg *hsotg,
int rem_wakeup, int reset);
@@ -1435,6 +1440,9 @@ int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg);
int dwc2_hsotg_tx_fifo_average_depth(struct dwc2_hsotg *hsotg);
void dwc2_gadget_init_lpm(struct dwc2_hsotg *hsotg);
void dwc2_gadget_program_ref_clk(struct dwc2_hsotg *hsotg);
+int dwc2_gadget_backup_critical_registers(struct dwc2_hsotg *hsotg);
+int dwc2_gadget_restore_critical_registers(struct dwc2_hsotg *hsotg,
+ unsigned int flags);
static inline void dwc2_clear_fifo_map(struct dwc2_hsotg *hsotg)
{ hsotg->fifo_map = 0; }
#else
@@ -1459,7 +1467,7 @@ static inline int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg,
static inline int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
{ return 0; }
static inline int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg,
- int remote_wakeup)
+ unsigned int flags)
{ return 0; }
static inline int dwc2_gadget_enter_hibernation(struct dwc2_hsotg *hsotg)
{ return 0; }
@@ -1482,6 +1490,11 @@ static inline int dwc2_hsotg_tx_fifo_average_depth(struct dwc2_hsotg *hsotg)
{ return 0; }
static inline void dwc2_gadget_init_lpm(struct dwc2_hsotg *hsotg) {}
static inline void dwc2_gadget_program_ref_clk(struct dwc2_hsotg *hsotg) {}
+static inline int dwc2_gadget_backup_critical_registers(struct dwc2_hsotg *hsotg)
+{ return 0; }
+static inline int dwc2_gadget_restore_critical_registers(struct dwc2_hsotg *hsotg,
+ unsigned int flags)
+{ return 0; }
static inline void dwc2_clear_fifo_map(struct dwc2_hsotg *hsotg) {}
#endif
@@ -1505,6 +1518,8 @@ int dwc2_host_exit_partial_power_down(struct dwc2_hsotg *hsotg,
void dwc2_host_enter_clock_gating(struct dwc2_hsotg *hsotg);
void dwc2_host_exit_clock_gating(struct dwc2_hsotg *hsotg, int rem_wakeup);
bool dwc2_host_can_poweroff_phy(struct dwc2_hsotg *dwc2);
+int dwc2_host_backup_critical_registers(struct dwc2_hsotg *hsotg);
+int dwc2_host_restore_critical_registers(struct dwc2_hsotg *hsotg);
static inline void dwc2_host_schedule_phy_reset(struct dwc2_hsotg *hsotg)
{ schedule_work(&hsotg->phy_reset_work); }
#else
@@ -1544,6 +1559,10 @@ static inline void dwc2_host_exit_clock_gating(struct dwc2_hsotg *hsotg,
int rem_wakeup) {}
static inline bool dwc2_host_can_poweroff_phy(struct dwc2_hsotg *dwc2)
{ return false; }
+static inline int dwc2_host_backup_critical_registers(struct dwc2_hsotg *hsotg)
+{ return 0; }
+static inline int dwc2_host_restore_critical_registers(struct dwc2_hsotg *hsotg)
+{ return 0; }
static inline void dwc2_host_schedule_phy_reset(struct dwc2_hsotg *hsotg) {}
#endif
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index bd4c788f03bc..300ea4969f0c 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -5204,11 +5204,11 @@ int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
* if controller power were disabled.
*
* @hsotg: Programming view of the DWC_otg controller
- * @remote_wakeup: Indicates whether resume is initiated by Device or Host.
+ * @flags: Defines which registers should be restored.
*
* Return: 0 if successful, negative error code otherwise
*/
-int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, int remote_wakeup)
+int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, unsigned int flags)
{
struct dwc2_dregs_backup *dr;
int i;
@@ -5224,7 +5224,10 @@ int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, int remote_wakeup)
}
dr->valid = false;
- if (!remote_wakeup)
+ if (flags & DWC2_RESTORE_DCFG)
+ dwc2_writel(hsotg, dr->dcfg, DCFG);
+
+ if (flags & DWC2_RESTORE_DCTL)
dwc2_writel(hsotg, dr->dctl, DCTL);
dwc2_writel(hsotg, dr->daintmsk, DAINTMSK);
@@ -5310,6 +5313,49 @@ void dwc2_gadget_program_ref_clk(struct dwc2_hsotg *hsotg)
dev_dbg(hsotg->dev, "GREFCLK=0x%08x\n", dwc2_readl(hsotg, GREFCLK));
}
+int dwc2_gadget_backup_critical_registers(struct dwc2_hsotg *hsotg)
+{
+ int ret;
+
+ /* Backup all registers */
+ ret = dwc2_backup_global_registers(hsotg);
+ if (ret) {
+ dev_err(hsotg->dev, "%s: failed to backup global registers\n",
+ __func__);
+ return ret;
+ }
+
+ ret = dwc2_backup_device_registers(hsotg);
+ if (ret) {
+ dev_err(hsotg->dev, "%s: failed to backup device registers\n",
+ __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+int dwc2_gadget_restore_critical_registers(struct dwc2_hsotg *hsotg,
+ unsigned int flags)
+{
+ int ret;
+
+ ret = dwc2_restore_global_registers(hsotg);
+ if (ret) {
+ dev_err(hsotg->dev, "%s: failed to restore registers\n",
+ __func__);
+ return ret;
+ }
+ ret = dwc2_restore_device_registers(hsotg, flags);
+ if (ret) {
+ dev_err(hsotg->dev, "%s: failed to restore device registers\n",
+ __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
/**
* dwc2_gadget_enter_hibernation() - Put controller in Hibernation.
*
@@ -5327,18 +5373,9 @@ int dwc2_gadget_enter_hibernation(struct dwc2_hsotg *hsotg)
/* Change to L2(suspend) state */
hsotg->lx_state = DWC2_L2;
dev_dbg(hsotg->dev, "Start of hibernation completed\n");
- ret = dwc2_backup_global_registers(hsotg);
- if (ret) {
- dev_err(hsotg->dev, "%s: failed to backup global registers\n",
- __func__);
- return ret;
- }
- ret = dwc2_backup_device_registers(hsotg);
- if (ret) {
- dev_err(hsotg->dev, "%s: failed to backup device registers\n",
- __func__);
+ ret = dwc2_gadget_backup_critical_registers(hsotg);
+ if (ret)
return ret;
- }
gpwrdn = GPWRDN_PWRDNRSTN;
udelay(10);
@@ -5415,6 +5452,7 @@ int dwc2_gadget_exit_hibernation(struct dwc2_hsotg *hsotg,
u32 gpwrdn;
u32 dctl;
int ret = 0;
+ unsigned int flags = 0;
struct dwc2_gregs_backup *gr;
struct dwc2_dregs_backup *dr;
@@ -5477,6 +5515,7 @@ int dwc2_gadget_exit_hibernation(struct dwc2_hsotg *hsotg,
dctl = dwc2_readl(hsotg, DCTL);
dctl |= DCTL_PWRONPRGDONE;
dwc2_writel(hsotg, dctl, DCTL);
+ flags |= DWC2_RESTORE_DCTL;
}
/* Wait for interrupts which must be cleared */
mdelay(2);
@@ -5484,20 +5523,9 @@ int dwc2_gadget_exit_hibernation(struct dwc2_hsotg *hsotg,
dwc2_writel(hsotg, 0xffffffff, GINTSTS);
/* Restore global registers */
- ret = dwc2_restore_global_registers(hsotg);
- if (ret) {
- dev_err(hsotg->dev, "%s: failed to restore registers\n",
- __func__);
- return ret;
- }
-
- /* Restore device registers */
- ret = dwc2_restore_device_registers(hsotg, rem_wakeup);
- if (ret) {
- dev_err(hsotg->dev, "%s: failed to restore device registers\n",
- __func__);
+ ret = dwc2_gadget_restore_critical_registers(hsotg, flags);
+ if (ret)
return ret;
- }
if (rem_wakeup) {
mdelay(10);
@@ -5531,19 +5559,9 @@ int dwc2_gadget_enter_partial_power_down(struct dwc2_hsotg *hsotg)
dev_dbg(hsotg->dev, "Entering device partial power down started.\n");
/* Backup all registers */
- ret = dwc2_backup_global_registers(hsotg);
- if (ret) {
- dev_err(hsotg->dev, "%s: failed to backup global registers\n",
- __func__);
- return ret;
- }
-
- ret = dwc2_backup_device_registers(hsotg);
- if (ret) {
- dev_err(hsotg->dev, "%s: failed to backup device registers\n",
- __func__);
+ ret = dwc2_gadget_backup_critical_registers(hsotg);
+ if (ret)
return ret;
- }
/*
* Clear any pending interrupts since dwc2 will not be able to
@@ -5590,11 +5608,8 @@ int dwc2_gadget_exit_partial_power_down(struct dwc2_hsotg *hsotg,
{
u32 pcgcctl;
u32 dctl;
- struct dwc2_dregs_backup *dr;
int ret = 0;
- dr = &hsotg->dr_backup;
-
dev_dbg(hsotg->dev, "Exiting device partial Power Down started.\n");
pcgcctl = dwc2_readl(hsotg, PCGCTL);
@@ -5611,21 +5626,10 @@ int dwc2_gadget_exit_partial_power_down(struct dwc2_hsotg *hsotg,
udelay(100);
if (restore) {
- ret = dwc2_restore_global_registers(hsotg);
- if (ret) {
- dev_err(hsotg->dev, "%s: failed to restore registers\n",
- __func__);
- return ret;
- }
- /* Restore DCFG */
- dwc2_writel(hsotg, dr->dcfg, DCFG);
-
- ret = dwc2_restore_device_registers(hsotg, 0);
- if (ret) {
- dev_err(hsotg->dev, "%s: failed to restore device registers\n",
- __func__);
+ ret = dwc2_gadget_restore_critical_registers(hsotg, DWC2_RESTORE_DCTL |
+ DWC2_RESTORE_DCFG);
+ if (ret)
return ret;
- }
}
/* Set the Power-On Programming done bit */
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 8c3941ecaaf5..869245238d6c 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -5474,6 +5474,49 @@ int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
return 0;
}
+int dwc2_host_backup_critical_registers(struct dwc2_hsotg *hsotg)
+{
+ int ret;
+
+ /* Backup all registers */
+ ret = dwc2_backup_global_registers(hsotg);
+ if (ret) {
+ dev_err(hsotg->dev, "%s: failed to backup global registers\n",
+ __func__);
+ return ret;
+ }
+
+ ret = dwc2_backup_host_registers(hsotg);
+ if (ret) {
+ dev_err(hsotg->dev, "%s: failed to backup host registers\n",
+ __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+int dwc2_host_restore_critical_registers(struct dwc2_hsotg *hsotg)
+{
+ int ret;
+
+ ret = dwc2_restore_global_registers(hsotg);
+ if (ret) {
+ dev_err(hsotg->dev, "%s: failed to restore registers\n",
+ __func__);
+ return ret;
+ }
+
+ ret = dwc2_restore_host_registers(hsotg);
+ if (ret) {
+ dev_err(hsotg->dev, "%s: failed to restore host registers\n",
+ __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
/**
* dwc2_host_enter_hibernation() - Put controller in Hibernation.
*
@@ -5489,18 +5532,9 @@ int dwc2_host_enter_hibernation(struct dwc2_hsotg *hsotg)
u32 gpwrdn;
dev_dbg(hsotg->dev, "Preparing host for hibernation\n");
- ret = dwc2_backup_global_registers(hsotg);
- if (ret) {
- dev_err(hsotg->dev, "%s: failed to backup global registers\n",
- __func__);
- return ret;
- }
- ret = dwc2_backup_host_registers(hsotg);
- if (ret) {
- dev_err(hsotg->dev, "%s: failed to backup host registers\n",
- __func__);
+ ret = dwc2_host_backup_critical_registers(hsotg);
+ if (ret)
return ret;
- }
/* Enter USB Suspend Mode */
hprt0 = dwc2_readl(hsotg, HPRT0);
@@ -5694,20 +5728,9 @@ int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup,
dwc2_writel(hsotg, 0xffffffff, GINTSTS);
/* Restore global registers */
- ret = dwc2_restore_global_registers(hsotg);
- if (ret) {
- dev_err(hsotg->dev, "%s: failed to restore registers\n",
- __func__);
+ ret = dwc2_host_restore_critical_registers(hsotg);
+ if (ret)
return ret;
- }
-
- /* Restore host registers */
- ret = dwc2_restore_host_registers(hsotg);
- if (ret) {
- dev_err(hsotg->dev, "%s: failed to restore host registers\n",
- __func__);
- return ret;
- }
if (rem_wakeup) {
dwc2_hcd_rem_wakeup(hsotg);
@@ -5774,19 +5797,9 @@ int dwc2_host_enter_partial_power_down(struct dwc2_hsotg *hsotg)
dev_warn(hsotg->dev, "Suspend wasn't generated\n");
/* Backup all registers */
- ret = dwc2_backup_global_registers(hsotg);
- if (ret) {
- dev_err(hsotg->dev, "%s: failed to backup global registers\n",
- __func__);
- return ret;
- }
-
- ret = dwc2_backup_host_registers(hsotg);
- if (ret) {
- dev_err(hsotg->dev, "%s: failed to backup host registers\n",
- __func__);
+ ret = dwc2_host_backup_critical_registers(hsotg);
+ if (ret)
return ret;
- }
/*
* Clear any pending interrupts since dwc2 will not be able to
@@ -5855,19 +5868,9 @@ int dwc2_host_exit_partial_power_down(struct dwc2_hsotg *hsotg,
udelay(100);
if (restore) {
- ret = dwc2_restore_global_registers(hsotg);
- if (ret) {
- dev_err(hsotg->dev, "%s: failed to restore registers\n",
- __func__);
- return ret;
- }
-
- ret = dwc2_restore_host_registers(hsotg);
- if (ret) {
- dev_err(hsotg->dev, "%s: failed to restore host registers\n",
- __func__);
+ ret = dwc2_host_restore_critical_registers(hsotg);
+ if (ret)
return ret;
- }
}
/* Drive resume signaling and exit suspend mode on the port. */
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 91c80a92d9b8..12b4dc07d08a 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -685,6 +685,14 @@ static int __maybe_unused dwc2_suspend(struct device *dev)
regulator_disable(dwc2->usb33d);
}
+ if (is_device_mode)
+ ret = dwc2_gadget_backup_critical_registers(dwc2);
+ else
+ ret = dwc2_host_backup_critical_registers(dwc2);
+
+ if (ret)
+ return ret;
+
if (dwc2->ll_hw_enabled &&
(is_device_mode || dwc2_host_can_poweroff_phy(dwc2))) {
ret = __dwc2_lowlevel_hw_disable(dwc2);
@@ -694,6 +702,24 @@ static int __maybe_unused dwc2_suspend(struct device *dev)
return ret;
}
+static int dwc2_restore_critical_registers(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_gregs_backup *gr;
+
+ gr = &hsotg->gr_backup;
+
+ if (!gr->valid) {
+ dev_err(hsotg->dev, "No valid register backup, failed to restore\n");
+ return -EINVAL;
+ }
+
+ if (gr->gintsts & GINTSTS_CURMODE_HOST)
+ return dwc2_host_restore_critical_registers(hsotg);
+
+ return dwc2_gadget_restore_critical_registers(hsotg, DWC2_RESTORE_DCTL |
+ DWC2_RESTORE_DCFG);
+}
+
static int __maybe_unused dwc2_resume(struct device *dev)
{
struct dwc2_hsotg *dwc2 = dev_get_drvdata(dev);
@@ -706,6 +732,18 @@ static int __maybe_unused dwc2_resume(struct device *dev)
}
dwc2->phy_off_for_suspend = false;
+ /*
+ * During suspend it's possible that the power domain for the
+ * DWC2 controller is disabled and all register values get lost.
+ * In case the GUSBCFG register is not initialized, it's clear the
+ * registers must be restored.
+ */
+ if (!(dwc2_readl(dwc2, GUSBCFG) & GUSBCFG_TOUTCAL_MASK)) {
+ ret = dwc2_restore_critical_registers(dwc2);
+ if (ret)
+ return ret;
+ }
+
if (dwc2->params.activate_stm_id_vb_detection) {
unsigned long flags;
u32 ggpio, gotgctl;
diff --git a/drivers/usb/dwc3/dwc3-am62.c b/drivers/usb/dwc3/dwc3-am62.c
index c158364bc03e..9db8f3ca493d 100644
--- a/drivers/usb/dwc3/dwc3-am62.c
+++ b/drivers/usb/dwc3/dwc3-am62.c
@@ -153,11 +153,11 @@ static int phy_syscon_pll_refclk(struct dwc3_am62 *am62)
{
struct device *dev = am62->dev;
struct device_node *node = dev->of_node;
- struct of_phandle_args args;
struct regmap *syscon;
int ret;
- syscon = syscon_regmap_lookup_by_phandle(node, "ti,syscon-phy-pll-refclk");
+ syscon = syscon_regmap_lookup_by_phandle_args(node, "ti,syscon-phy-pll-refclk",
+ 1, &am62->offset);
if (IS_ERR(syscon)) {
dev_err(dev, "unable to get ti,syscon-phy-pll-refclk regmap\n");
return PTR_ERR(syscon);
@@ -165,14 +165,6 @@ static int phy_syscon_pll_refclk(struct dwc3_am62 *am62)
am62->syscon = syscon;
- ret = of_parse_phandle_with_fixed_args(node, "ti,syscon-phy-pll-refclk", 1,
- 0, &args);
- if (ret)
- return ret;
-
- of_node_put(args.np);
- am62->offset = args.args[0];
-
/* Core voltage. PHY_CORE_VOLTAGE bit Recommended to be 0 always */
ret = regmap_update_bits(am62->syscon, am62->offset, PHY_CORE_VOLTAGE_MASK, 0);
if (ret) {
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index f5d963fae9e0..de686b9e6404 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -163,6 +163,12 @@ static const struct dwc3_exynos_driverdata exynos7_drvdata = {
.suspend_clk_idx = 1,
};
+static const struct dwc3_exynos_driverdata exynos7870_drvdata = {
+ .clk_names = { "bus_early", "ref", "ctrl" },
+ .num_clks = 3,
+ .suspend_clk_idx = -1,
+};
+
static const struct dwc3_exynos_driverdata exynos850_drvdata = {
.clk_names = { "bus_early", "ref" },
.num_clks = 2,
@@ -186,6 +192,9 @@ static const struct of_device_id exynos_dwc3_match[] = {
.compatible = "samsung,exynos7-dwusb3",
.data = &exynos7_drvdata,
}, {
+ .compatible = "samsung,exynos7870-dwusb3",
+ .data = &exynos7870_drvdata,
+ }, {
.compatible = "samsung,exynos850-dwusb3",
.data = &exynos850_drvdata,
}, {
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 052852f80146..54a4ee2b90b7 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -148,11 +148,21 @@ static const struct property_entry dwc3_pci_intel_byt_properties[] = {
{}
};
+/*
+ * Intel Merrifield SoC uses these endpoints for tracing and they cannot
+ * be re-allocated if being used because the side band flow control signals
+ * are hard wired to certain endpoints:
+ * - 1 High BW Bulk IN (IN#1) (RTIT)
+ * - 1 1KB BW Bulk IN (IN#8) + 1 1KB BW Bulk OUT (Run Control) (OUT#8)
+ */
+static const u8 dwc3_pci_mrfld_reserved_endpoints[] = { 3, 16, 17 };
+
static const struct property_entry dwc3_pci_mrfld_properties[] = {
PROPERTY_ENTRY_STRING("dr_mode", "otg"),
PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"),
PROPERTY_ENTRY_BOOL("snps,dis_u3_susphy_quirk"),
PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
+ PROPERTY_ENTRY_U8_ARRAY("snps,reserved-endpoints", dwc3_pci_mrfld_reserved_endpoints),
PROPERTY_ENTRY_BOOL("snps,usb2-gadget-lpm-disable"),
PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
{}
diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c
index ef7c43008946..5d513decaacd 100644
--- a/drivers/usb/dwc3/dwc3-st.c
+++ b/drivers/usb/dwc3/dwc3-st.c
@@ -225,7 +225,7 @@ static int st_dwc3_probe(struct platform_device *pdev)
dwc3_data->syscfg_reg_off = res->start;
- dev_vdbg(dev, "glue-logic addr 0x%pK, syscfg-reg offset 0x%x\n",
+ dev_vdbg(dev, "glue-logic addr 0x%p, syscfg-reg offset 0x%x\n",
dwc3_data->glue_base, dwc3_data->syscfg_reg_off);
struct device_node *child __free(device_node) = of_get_compatible_child(node,
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 89a4dc8ebf94..47e73c4ed62d 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -547,6 +547,7 @@ static int dwc3_gadget_set_xfer_resource(struct dwc3_ep *dep)
int dwc3_gadget_start_config(struct dwc3 *dwc, unsigned int resource_index)
{
struct dwc3_gadget_ep_cmd_params params;
+ struct dwc3_ep *dep;
u32 cmd;
int i;
int ret;
@@ -563,8 +564,13 @@ int dwc3_gadget_start_config(struct dwc3 *dwc, unsigned int resource_index)
return ret;
/* Reset resource allocation flags */
- for (i = resource_index; i < dwc->num_eps && dwc->eps[i]; i++)
- dwc->eps[i]->flags &= ~DWC3_EP_RESOURCE_ALLOCATED;
+ for (i = resource_index; i < dwc->num_eps; i++) {
+ dep = dwc->eps[i];
+ if (!dep)
+ continue;
+
+ dep->flags &= ~DWC3_EP_RESOURCE_ALLOCATED;
+ }
return 0;
}
@@ -751,9 +757,11 @@ void dwc3_gadget_clear_tx_fifos(struct dwc3 *dwc)
dwc->last_fifo_depth = fifo_depth;
/* Clear existing TXFIFO for all IN eps except ep0 */
- for (num = 3; num < min_t(int, dwc->num_eps, DWC3_ENDPOINTS_NUM);
- num += 2) {
+ for (num = 3; num < min_t(int, dwc->num_eps, DWC3_ENDPOINTS_NUM); num += 2) {
dep = dwc->eps[num];
+ if (!dep)
+ continue;
+
/* Don't change TXFRAMNUM on usb31 version */
size = DWC3_IP_IS(DWC3) ? 0 :
dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(num >> 1)) &
@@ -1971,12 +1979,12 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
return -ESHUTDOWN;
}
- if (WARN(req->dep != dep, "request %pK belongs to '%s'\n",
+ if (WARN(req->dep != dep, "request %p belongs to '%s'\n",
&req->request, req->dep->name))
return -EINVAL;
if (WARN(req->status < DWC3_REQUEST_STATUS_COMPLETED,
- "%s: request %pK already in flight\n",
+ "%s: request %p already in flight\n",
dep->name, &req->request))
return -EINVAL;
@@ -2165,7 +2173,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
}
}
- dev_err(dwc->dev, "request %pK was not queued to %s\n",
+ dev_err(dwc->dev, "request %p was not queued to %s\n",
request, ep->name);
ret = -EINVAL;
out:
@@ -3429,14 +3437,53 @@ static int dwc3_gadget_init_endpoint(struct dwc3 *dwc, u8 epnum)
return 0;
}
+static int dwc3_gadget_get_reserved_endpoints(struct dwc3 *dwc, const char *propname,
+ u8 *eps, u8 num)
+{
+ u8 count;
+ int ret;
+
+ if (!device_property_present(dwc->dev, propname))
+ return 0;
+
+ ret = device_property_count_u8(dwc->dev, propname);
+ if (ret < 0)
+ return ret;
+ count = ret;
+
+ ret = device_property_read_u8_array(dwc->dev, propname, eps, min(num, count));
+ if (ret)
+ return ret;
+
+ return count;
+}
+
static int dwc3_gadget_init_endpoints(struct dwc3 *dwc, u8 total)
{
+ const char *propname = "snps,reserved-endpoints";
u8 epnum;
+ u8 reserved_eps[DWC3_ENDPOINTS_NUM];
+ u8 count;
+ u8 num;
+ int ret;
INIT_LIST_HEAD(&dwc->gadget->ep_list);
+ ret = dwc3_gadget_get_reserved_endpoints(dwc, propname,
+ reserved_eps, ARRAY_SIZE(reserved_eps));
+ if (ret < 0) {
+ dev_err(dwc->dev, "failed to read %s\n", propname);
+ return ret;
+ }
+ count = ret;
+
for (epnum = 0; epnum < total; epnum++) {
- int ret;
+ for (num = 0; num < count; num++) {
+ if (epnum == reserved_eps[num])
+ break;
+ }
+ if (num < count)
+ continue;
ret = dwc3_gadget_init_endpoint(dwc, epnum);
if (ret)
@@ -3703,6 +3750,8 @@ out:
for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
dep = dwc->eps[i];
+ if (!dep)
+ continue;
if (!(dep->flags & DWC3_EP_ENABLED))
continue;
@@ -3852,6 +3901,10 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
u8 epnum = event->endpoint_number;
dep = dwc->eps[epnum];
+ if (!dep) {
+ dev_warn(dwc->dev, "spurious event, endpoint %u is not allocated\n", epnum);
+ return;
+ }
if (!(dep->flags & DWC3_EP_ENABLED)) {
if ((epnum > 1) && !(dep->flags & DWC3_EP_TRANSFER_STARTED))
diff --git a/drivers/usb/gadget/function/uvc_queue.c b/drivers/usb/gadget/function/uvc_queue.c
index 5eaeae3e2441..9a1bbd79ff5a 100644
--- a/drivers/usb/gadget/function/uvc_queue.c
+++ b/drivers/usb/gadget/function/uvc_queue.c
@@ -122,8 +122,6 @@ static const struct vb2_ops uvc_queue_qops = {
.queue_setup = uvc_queue_setup,
.buf_prepare = uvc_buffer_prepare,
.buf_queue = uvc_buffer_queue,
- .wait_prepare = vb2_ops_wait_prepare,
- .wait_finish = vb2_ops_wait_finish,
};
int uvcg_queue_init(struct uvc_video_queue *queue, struct device *dev, enum v4l2_buf_type type,
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/dev.c b/drivers/usb/gadget/udc/aspeed-vhub/dev.c
index 573109ca5b79..a09f72772e6e 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/dev.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/dev.c
@@ -548,6 +548,9 @@ int ast_vhub_init_dev(struct ast_vhub *vhub, unsigned int idx)
d->vhub = vhub;
d->index = idx;
d->name = devm_kasprintf(parent, GFP_KERNEL, "port%d", idx+1);
+ if (!d->name)
+ return -ENOMEM;
+
d->regs = vhub->regs + 0x100 + 0x10 * idx;
ast_vhub_init_ep0(vhub, &d->ep0, d);
diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c
index 0881fdd1823e..dcf31a592f5d 100644
--- a/drivers/usb/host/max3421-hcd.c
+++ b/drivers/usb/host/max3421-hcd.c
@@ -1946,6 +1946,12 @@ max3421_remove(struct spi_device *spi)
usb_put_hcd(hcd);
}
+static const struct spi_device_id max3421_spi_ids[] = {
+ { "max3421" },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, max3421_spi_ids);
+
static const struct of_device_id max3421_of_match_table[] = {
{ .compatible = "maxim,max3421", },
{},
@@ -1955,6 +1961,7 @@ MODULE_DEVICE_TABLE(of, max3421_of_match_table);
static struct spi_driver max3421_driver = {
.probe = max3421_probe,
.remove = max3421_remove,
+ .id_table = max3421_spi_ids,
.driver = {
.name = "max3421-hcd",
.of_match_table = max3421_of_match_table,
diff --git a/drivers/usb/host/xhci-histb.c b/drivers/usb/host/xhci-histb.c
index 8a7d46dae62c..02396c8721dc 100644
--- a/drivers/usb/host/xhci-histb.c
+++ b/drivers/usb/host/xhci-histb.c
@@ -355,7 +355,7 @@ static int __maybe_unused xhci_histb_resume(struct device *dev)
if (!device_may_wakeup(dev))
xhci_histb_host_enable(histb);
- return xhci_resume(xhci, PMSG_RESUME);
+ return xhci_resume(xhci, false, false);
}
static const struct dev_pm_ops xhci_histb_pm_ops = {
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index fdf0c1008225..d698095fc88d 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1953,7 +1953,6 @@ no_bw:
xhci->interrupters = NULL;
xhci->page_size = 0;
- xhci->page_shift = 0;
xhci->usb2_rhub.bus_state.bus_suspended = 0;
xhci->usb3_rhub.bus_state.bus_suspended = 0;
}
@@ -2372,6 +2371,22 @@ xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs,
}
EXPORT_SYMBOL_GPL(xhci_create_secondary_interrupter);
+static void xhci_hcd_page_size(struct xhci_hcd *xhci)
+{
+ u32 page_size;
+
+ page_size = readl(&xhci->op_regs->page_size) & XHCI_PAGE_SIZE_MASK;
+ if (!is_power_of_2(page_size)) {
+ xhci_warn(xhci, "Invalid page size register = 0x%x\n", page_size);
+ /* Fallback to 4K page size, since that's common */
+ page_size = 1;
+ }
+
+ xhci->page_size = page_size << 12;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "HCD page size set to %iK",
+ xhci->page_size >> 10);
+}
+
int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
{
struct xhci_interrupter *ir;
@@ -2379,7 +2394,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
dma_addr_t dma;
unsigned int val, val2;
u64 val_64;
- u32 page_size, temp;
+ u32 temp;
int i;
INIT_LIST_HEAD(&xhci->cmd_list);
@@ -2388,20 +2403,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout);
init_completion(&xhci->cmd_ring_stop_completion);
- page_size = readl(&xhci->op_regs->page_size);
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "Supported page size register = 0x%x", page_size);
- i = ffs(page_size);
- if (i < 16)
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "Supported page size of %iK", (1 << (i+12)) / 1024);
- else
- xhci_warn(xhci, "WARN: no supported page size\n");
- /* Use 4K pages, since that's common and the minimum the HC supports */
- xhci->page_shift = 12;
- xhci->page_size = 1 << xhci->page_shift;
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "HCD page size set to %iK", xhci->page_size / 1024);
+ xhci_hcd_page_size(xhci);
/*
* Program the Number of Device Slots Enabled field in the CONFIG
diff --git a/drivers/usb/host/xhci-mvebu.c b/drivers/usb/host/xhci-mvebu.c
index 87f1597a0e5a..257e4d79971f 100644
--- a/drivers/usb/host/xhci-mvebu.c
+++ b/drivers/usb/host/xhci-mvebu.c
@@ -73,13 +73,3 @@ int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd)
return 0;
}
-
-int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd)
-{
- struct xhci_hcd *xhci = hcd_to_xhci(hcd);
-
- /* Without reset on resume, the HC won't work at all */
- xhci->quirks |= XHCI_RESET_ON_RESUME;
-
- return 0;
-}
diff --git a/drivers/usb/host/xhci-mvebu.h b/drivers/usb/host/xhci-mvebu.h
index 3be021793cc8..9d26e22c4842 100644
--- a/drivers/usb/host/xhci-mvebu.h
+++ b/drivers/usb/host/xhci-mvebu.h
@@ -12,16 +12,10 @@ struct usb_hcd;
#if IS_ENABLED(CONFIG_USB_XHCI_MVEBU)
int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd);
-int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd);
#else
static inline int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd)
{
return 0;
}
-
-static inline int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd)
-{
- return 0;
-}
#endif
#endif /* __LINUX_XHCI_MVEBU_H */
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 54460d11f7ee..0c481cbc8f08 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -807,8 +807,10 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
static int xhci_pci_resume(struct usb_hcd *hcd, pm_message_t msg)
{
- struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
+ bool power_lost = msg.event == PM_EVENT_RESTORE;
+ bool is_auto_resume = msg.event == PM_EVENT_AUTO_RESUME;
reset_control_reset(xhci->reset);
@@ -839,7 +841,7 @@ static int xhci_pci_resume(struct usb_hcd *hcd, pm_message_t msg)
if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
xhci_pme_quirk(hcd);
- return xhci_resume(xhci, msg);
+ return xhci_resume(xhci, power_lost, is_auto_resume);
}
static int xhci_pci_poweroff_late(struct usb_hcd *hcd, bool do_wakeup)
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index d85ffa9ffaa7..3155e3a842da 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -106,7 +106,7 @@ static const struct xhci_plat_priv xhci_plat_marvell_armada = {
};
static const struct xhci_plat_priv xhci_plat_marvell_armada3700 = {
- .init_quirk = xhci_mvebu_a3700_init_quirk,
+ .quirks = XHCI_RESET_ON_RESUME,
};
static const struct xhci_plat_priv xhci_plat_brcm = {
@@ -479,9 +479,10 @@ static int xhci_plat_suspend(struct device *dev)
return 0;
}
-static int xhci_plat_resume_common(struct device *dev, struct pm_message pmsg)
+static int xhci_plat_resume_common(struct device *dev, bool power_lost)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
int ret;
@@ -501,7 +502,7 @@ static int xhci_plat_resume_common(struct device *dev, struct pm_message pmsg)
if (ret)
goto disable_clks;
- ret = xhci_resume(xhci, pmsg);
+ ret = xhci_resume(xhci, power_lost || priv->power_lost, false);
if (ret)
goto disable_clks;
@@ -522,12 +523,12 @@ disable_clks:
static int xhci_plat_resume(struct device *dev)
{
- return xhci_plat_resume_common(dev, PMSG_RESUME);
+ return xhci_plat_resume_common(dev, false);
}
static int xhci_plat_restore(struct device *dev)
{
- return xhci_plat_resume_common(dev, PMSG_RESTORE);
+ return xhci_plat_resume_common(dev, true);
}
static int __maybe_unused xhci_plat_runtime_suspend(struct device *dev)
@@ -548,7 +549,7 @@ static int __maybe_unused xhci_plat_runtime_resume(struct device *dev)
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- return xhci_resume(xhci, PMSG_AUTO_RESUME);
+ return xhci_resume(xhci, false, true);
}
const struct dev_pm_ops xhci_plat_pm_ops = {
diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h
index 6475130eac4b..fe4f95e690fa 100644
--- a/drivers/usb/host/xhci-plat.h
+++ b/drivers/usb/host/xhci-plat.h
@@ -15,6 +15,7 @@ struct usb_hcd;
struct xhci_plat_priv {
const char *firmware_name;
unsigned long long quirks;
+ bool power_lost;
void (*plat_start)(struct usb_hcd *);
int (*init_quirk)(struct usb_hcd *);
int (*suspend_quirk)(struct usb_hcd *);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 965bffce301e..5d64c297721c 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -204,6 +204,50 @@ void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
}
/*
+ * If enqueue points at a link TRB, follow links until an ordinary TRB is reached.
+ * Toggle the cycle bit of passed link TRBs and optionally chain them.
+ */
+static void inc_enq_past_link(struct xhci_hcd *xhci, struct xhci_ring *ring, u32 chain)
+{
+ unsigned int link_trb_count = 0;
+
+ while (trb_is_link(ring->enqueue)) {
+
+ /*
+ * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
+ * set, but other sections talk about dealing with the chain bit set. This was
+ * fixed in the 0.96 specification errata, but we have to assume that all 0.95
+ * xHCI hardware can't handle the chain bit being cleared on a link TRB.
+ *
+ * On 0.95 and some 0.96 HCs the chain bit is set once at segment initalization
+ * and never changed here. On all others, modify it as requested by the caller.
+ */
+ if (!xhci_link_chain_quirk(xhci, ring->type)) {
+ ring->enqueue->link.control &= cpu_to_le32(~TRB_CHAIN);
+ ring->enqueue->link.control |= cpu_to_le32(chain);
+ }
+
+ /* Give this link TRB to the hardware */
+ wmb();
+ ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
+
+ /* Toggle the cycle bit after the last ring segment. */
+ if (link_trb_toggles_cycle(ring->enqueue))
+ ring->cycle_state ^= 1;
+
+ ring->enq_seg = ring->enq_seg->next;
+ ring->enqueue = ring->enq_seg->trbs;
+
+ trace_xhci_inc_enq(ring);
+
+ if (link_trb_count++ > ring->num_segs) {
+ xhci_warn(xhci, "Link TRB loop at enqueue\n");
+ break;
+ }
+ }
+}
+
+/*
* See Cycle bit rules. SW is the consumer for the event ring only.
*
* If we've just enqueued a TRB that is in the middle of a TD (meaning the
@@ -211,11 +255,6 @@ void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
* If we've enqueued the last TRB in a TD, make sure the following link TRBs
* have their chain bit cleared (so that each Link TRB is a separate TD).
*
- * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
- * set, but other sections talk about dealing with the chain bit set. This was
- * fixed in the 0.96 specification errata, but we have to assume that all 0.95
- * xHCI hardware can't handle the chain bit being cleared on a link TRB.
- *
* @more_trbs_coming: Will you enqueue more TRBs before calling
* prepare_transfer()?
*/
@@ -223,8 +262,6 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
bool more_trbs_coming)
{
u32 chain;
- union xhci_trb *next;
- unsigned int link_trb_count = 0;
chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
@@ -233,48 +270,67 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
return;
}
- next = ++(ring->enqueue);
+ ring->enqueue++;
- /* Update the dequeue pointer further if that was a link TRB */
- while (trb_is_link(next)) {
+ /*
+ * If we are in the middle of a TD or the caller plans to enqueue more
+ * TDs as one transfer (eg. control), traverse any link TRBs right now.
+ * Otherwise, enqueue can stay on a link until the next prepare_ring().
+ * This avoids enqueue entering deq_seg and simplifies ring expansion.
+ */
+ if (trb_is_link(ring->enqueue) && (chain || more_trbs_coming))
+ inc_enq_past_link(xhci, ring, chain);
+}
- /*
- * If the caller doesn't plan on enqueueing more TDs before
- * ringing the doorbell, then we don't want to give the link TRB
- * to the hardware just yet. We'll give the link TRB back in
- * prepare_ring() just before we enqueue the TD at the top of
- * the ring.
- */
- if (!chain && !more_trbs_coming)
- break;
+/*
+ * If the suspect DMA address is a TRB in this TD, this function returns that
+ * TRB's segment. Otherwise it returns 0.
+ */
+static struct xhci_segment *trb_in_td(struct xhci_td *td, dma_addr_t suspect_dma)
+{
+ dma_addr_t start_dma;
+ dma_addr_t end_seg_dma;
+ dma_addr_t end_trb_dma;
+ struct xhci_segment *cur_seg;
- /* If we're not dealing with 0.95 hardware or isoc rings on
- * AMD 0.96 host, carry over the chain bit of the previous TRB
- * (which may mean the chain bit is cleared).
- */
- if (!xhci_link_chain_quirk(xhci, ring->type)) {
- next->link.control &= cpu_to_le32(~TRB_CHAIN);
- next->link.control |= cpu_to_le32(chain);
- }
- /* Give this link TRB to the hardware */
- wmb();
- next->link.control ^= cpu_to_le32(TRB_CYCLE);
+ start_dma = xhci_trb_virt_to_dma(td->start_seg, td->start_trb);
+ cur_seg = td->start_seg;
- /* Toggle the cycle bit after the last ring segment. */
- if (link_trb_toggles_cycle(next))
- ring->cycle_state ^= 1;
+ do {
+ if (start_dma == 0)
+ return NULL;
+ /* We may get an event for a Link TRB in the middle of a TD */
+ end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
+ &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
+ /* If the end TRB isn't in this segment, this is set to 0 */
+ end_trb_dma = xhci_trb_virt_to_dma(cur_seg, td->end_trb);
- ring->enq_seg = ring->enq_seg->next;
- ring->enqueue = ring->enq_seg->trbs;
- next = ring->enqueue;
+ if (end_trb_dma > 0) {
+ /* The end TRB is in this segment, so suspect should be here */
+ if (start_dma <= end_trb_dma) {
+ if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
+ return cur_seg;
+ } else {
+ /* Case for one segment with
+ * a TD wrapped around to the top
+ */
+ if ((suspect_dma >= start_dma &&
+ suspect_dma <= end_seg_dma) ||
+ (suspect_dma >= cur_seg->dma &&
+ suspect_dma <= end_trb_dma))
+ return cur_seg;
+ }
+ return NULL;
+ }
+ /* Might still be somewhere in this segment */
+ if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
+ return cur_seg;
- trace_xhci_inc_enq(ring);
+ cur_seg = cur_seg->next;
+ start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
+ } while (cur_seg != td->start_seg);
- if (link_trb_count++ > ring->num_segs) {
- xhci_warn(xhci, "%s: Ring link TRB loop\n", __func__);
- break;
- }
- }
+ return NULL;
}
/*
@@ -505,8 +561,8 @@ void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
* pointer command pending because the device can choose to start any
* stream once the endpoint is on the HW schedule.
*/
- if ((ep_state & EP_STOP_CMD_PENDING) || (ep_state & SET_DEQ_PENDING) ||
- (ep_state & EP_HALTED) || (ep_state & EP_CLEARING_TT))
+ if (ep_state & (EP_STOP_CMD_PENDING | SET_DEQ_PENDING | EP_HALTED |
+ EP_CLEARING_TT | EP_STALLED))
return;
trace_xhci_ring_ep_doorbell(slot_id, DB_VALUE(ep_index, stream_id));
@@ -1014,7 +1070,7 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
td->urb->stream_id);
hw_deq &= ~0xf;
- if (td->cancel_status == TD_HALTED || trb_in_td(xhci, td, hw_deq, false)) {
+ if (td->cancel_status == TD_HALTED || trb_in_td(td, hw_deq)) {
switch (td->cancel_status) {
case TD_CLEARED: /* TD is already no-op */
case TD_CLEARING_CACHE: /* set TR deq command already queued */
@@ -1104,7 +1160,7 @@ static struct xhci_td *find_halted_td(struct xhci_virt_ep *ep)
hw_deq = xhci_get_hw_deq(ep->xhci, ep->vdev, ep->ep_index, 0);
hw_deq &= ~0xf;
td = list_first_entry(&ep->ring->td_list, struct xhci_td, td_list);
- if (trb_in_td(ep->xhci, td, hw_deq, false))
+ if (trb_in_td(td, hw_deq))
return td;
}
return NULL;
@@ -1164,7 +1220,14 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
*/
switch (GET_EP_CTX_STATE(ep_ctx)) {
case EP_STATE_HALTED:
- xhci_dbg(xhci, "Stop ep completion raced with stall, reset ep\n");
+ xhci_dbg(xhci, "Stop ep completion raced with stall\n");
+ /*
+ * If the halt happened before Stop Endpoint failed, its transfer event
+ * should have already been handled and Reset Endpoint should be pending.
+ */
+ if (ep->ep_state & EP_HALTED)
+ goto reset_done;
+
if (ep->ep_state & EP_HAS_STREAMS) {
reset_type = EP_SOFT_RESET;
} else {
@@ -1175,8 +1238,11 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
}
/* reset ep, reset handler cleans up cancelled tds */
err = xhci_handle_halted_endpoint(xhci, ep, td, reset_type);
+ xhci_dbg(xhci, "Stop ep completion resetting ep, status %d\n", err);
if (err)
break;
+reset_done:
+ /* Reset EP handler will clean up cancelled TDs */
ep->ep_state &= ~EP_STOP_CMD_PENDING;
return;
case EP_STATE_STOPPED:
@@ -1198,16 +1264,19 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
* Stopped state, but it will soon change to Running.
*
* Assume this bug on unexpected Stop Endpoint failures.
- * Keep retrying until the EP starts and stops again, on
- * chips where this is known to help. Wait for 100ms.
+ * Keep retrying until the EP starts and stops again.
*/
- if (time_is_before_jiffies(ep->stop_time + msecs_to_jiffies(100)))
- break;
fallthrough;
case EP_STATE_RUNNING:
/* Race, HW handled stop ep cmd before ep was running */
xhci_dbg(xhci, "Stop ep completion ctx error, ctx_state %d\n",
GET_EP_CTX_STATE(ep_ctx));
+ /*
+ * Don't retry forever if we guessed wrong or a defective HC never starts
+ * the EP or says 'Running' but fails the command. We must give back TDs.
+ */
+ if (time_is_before_jiffies(ep->stop_time + msecs_to_jiffies(100)))
+ break;
command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
if (!command) {
@@ -1332,43 +1401,6 @@ void xhci_hc_died(struct xhci_hcd *xhci)
usb_hc_died(xhci_to_hcd(xhci));
}
-static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
- struct xhci_virt_device *dev,
- struct xhci_ring *ep_ring,
- unsigned int ep_index)
-{
- union xhci_trb *dequeue_temp;
-
- dequeue_temp = ep_ring->dequeue;
-
- /* If we get two back-to-back stalls, and the first stalled transfer
- * ends just before a link TRB, the dequeue pointer will be left on
- * the link TRB by the code in the while loop. So we have to update
- * the dequeue pointer one segment further, or we'll jump off
- * the segment into la-la-land.
- */
- if (trb_is_link(ep_ring->dequeue)) {
- ep_ring->deq_seg = ep_ring->deq_seg->next;
- ep_ring->dequeue = ep_ring->deq_seg->trbs;
- }
-
- while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
- /* We have more usable TRBs */
- ep_ring->dequeue++;
- if (trb_is_link(ep_ring->dequeue)) {
- if (ep_ring->dequeue ==
- dev->eps[ep_index].queued_deq_ptr)
- break;
- ep_ring->deq_seg = ep_ring->deq_seg->next;
- ep_ring->dequeue = ep_ring->deq_seg->trbs;
- }
- if (ep_ring->dequeue == dequeue_temp) {
- xhci_dbg(xhci, "Unable to find new dequeue pointer\n");
- break;
- }
- }
-}
-
/*
* When we get a completion for a Set Transfer Ring Dequeue Pointer command,
* we need to clear the set deq pending flag in the endpoint ring state, so that
@@ -1473,8 +1505,8 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
/* Update the ring's dequeue segment and dequeue pointer
* to reflect the new position.
*/
- update_ring_for_set_deq_completion(xhci, ep->vdev,
- ep_ring, ep_index);
+ ep_ring->deq_seg = ep->queued_deq_seg;
+ ep_ring->dequeue = ep->queued_deq_ptr;
} else {
xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n");
xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
@@ -2116,67 +2148,6 @@ cleanup:
spin_lock(&xhci->lock);
}
-/*
- * If the suspect DMA address is a TRB in this TD, this function returns that
- * TRB's segment. Otherwise it returns 0.
- */
-struct xhci_segment *trb_in_td(struct xhci_hcd *xhci, struct xhci_td *td, dma_addr_t suspect_dma,
- bool debug)
-{
- dma_addr_t start_dma;
- dma_addr_t end_seg_dma;
- dma_addr_t end_trb_dma;
- struct xhci_segment *cur_seg;
-
- start_dma = xhci_trb_virt_to_dma(td->start_seg, td->start_trb);
- cur_seg = td->start_seg;
-
- do {
- if (start_dma == 0)
- return NULL;
- /* We may get an event for a Link TRB in the middle of a TD */
- end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
- &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
- /* If the end TRB isn't in this segment, this is set to 0 */
- end_trb_dma = xhci_trb_virt_to_dma(cur_seg, td->end_trb);
-
- if (debug)
- xhci_warn(xhci,
- "Looking for event-dma %016llx trb-start %016llx trb-end %016llx seg-start %016llx seg-end %016llx\n",
- (unsigned long long)suspect_dma,
- (unsigned long long)start_dma,
- (unsigned long long)end_trb_dma,
- (unsigned long long)cur_seg->dma,
- (unsigned long long)end_seg_dma);
-
- if (end_trb_dma > 0) {
- /* The end TRB is in this segment, so suspect should be here */
- if (start_dma <= end_trb_dma) {
- if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
- return cur_seg;
- } else {
- /* Case for one segment with
- * a TD wrapped around to the top
- */
- if ((suspect_dma >= start_dma &&
- suspect_dma <= end_seg_dma) ||
- (suspect_dma >= cur_seg->dma &&
- suspect_dma <= end_trb_dma))
- return cur_seg;
- }
- return NULL;
- } else {
- /* Might still be somewhere in this segment */
- if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
- return cur_seg;
- }
- cur_seg = cur_seg->next;
- start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
- } while (cur_seg != td->start_seg);
-
- return NULL;
-}
-
static void xhci_clear_hub_tt_buffer(struct xhci_hcd *xhci, struct xhci_td *td,
struct xhci_virt_ep *ep)
{
@@ -2476,6 +2447,12 @@ static void process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
if (ep_trb != td->end_trb)
td->error_mid_td = true;
break;
+ case COMP_MISSED_SERVICE_ERROR:
+ frame->status = -EXDEV;
+ sum_trbs_for_length = true;
+ if (ep_trb != td->end_trb)
+ td->error_mid_td = true;
+ break;
case COMP_INCOMPATIBLE_DEVICE_ERROR:
case COMP_STALL_ERROR:
frame->status = -EPROTO;
@@ -2596,6 +2573,9 @@ static void process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
xhci_handle_halted_endpoint(xhci, ep, td, EP_SOFT_RESET);
return;
+ case COMP_STALL_ERROR:
+ ep->ep_state |= EP_STALLED;
+ break;
default:
/* do nothing */
break;
@@ -2644,6 +2624,22 @@ static int handle_transferless_tx_event(struct xhci_hcd *xhci, struct xhci_virt_
return 0;
}
+static bool xhci_spurious_success_tx_event(struct xhci_hcd *xhci,
+ struct xhci_ring *ring)
+{
+ switch (ring->old_trb_comp_code) {
+ case COMP_SHORT_PACKET:
+ return xhci->quirks & XHCI_SPURIOUS_SUCCESS;
+ case COMP_USB_TRANSACTION_ERROR:
+ case COMP_BABBLE_DETECTED_ERROR:
+ case COMP_ISOCH_BUFFER_OVERRUN:
+ return xhci->quirks & XHCI_ETRON_HOST &&
+ ring->type == TYPE_ISOC;
+ default:
+ return false;
+ }
+}
+
/*
* If this function returns an error condition, it means it got a Transfer
* event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
@@ -2664,6 +2660,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
int status = -EINPROGRESS;
struct xhci_ep_ctx *ep_ctx;
u32 trb_comp_code;
+ bool ring_xrun_event = false;
slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
@@ -2697,8 +2694,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
case COMP_SUCCESS:
if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
trb_comp_code = COMP_SHORT_PACKET;
- xhci_dbg(xhci, "Successful completion on short TX for slot %u ep %u with last td short %d\n",
- slot_id, ep_index, ep_ring->last_td_was_short);
+ xhci_dbg(xhci, "Successful completion on short TX for slot %u ep %u with last td comp code %d\n",
+ slot_id, ep_index, ep_ring->old_trb_comp_code);
}
break;
case COMP_SHORT_PACKET:
@@ -2770,14 +2767,12 @@ static int handle_tx_event(struct xhci_hcd *xhci,
* Underrun Event for OUT Isoch endpoint.
*/
xhci_dbg(xhci, "Underrun event on slot %u ep %u\n", slot_id, ep_index);
- if (ep->skip)
- break;
- return 0;
+ ring_xrun_event = true;
+ break;
case COMP_RING_OVERRUN:
xhci_dbg(xhci, "Overrun event on slot %u ep %u\n", slot_id, ep_index);
- if (ep->skip)
- break;
- return 0;
+ ring_xrun_event = true;
+ break;
case COMP_MISSED_SERVICE_ERROR:
/*
* When encounter missed service error, one or more isoc tds
@@ -2787,9 +2782,9 @@ static int handle_tx_event(struct xhci_hcd *xhci,
*/
ep->skip = true;
xhci_dbg(xhci,
- "Miss service interval error for slot %u ep %u, set skip flag\n",
- slot_id, ep_index);
- return 0;
+ "Miss service interval error for slot %u ep %u, set skip flag%s\n",
+ slot_id, ep_index, ep_trb_dma ? ", skip now" : "");
+ break;
case COMP_NO_PING_RESPONSE_ERROR:
ep->skip = true;
xhci_dbg(xhci,
@@ -2832,11 +2827,15 @@ static int handle_tx_event(struct xhci_hcd *xhci,
*/
td = list_first_entry_or_null(&ep_ring->td_list, struct xhci_td, td_list);
- if (td && td->error_mid_td && !trb_in_td(xhci, td, ep_trb_dma, false)) {
+ if (td && td->error_mid_td && !trb_in_td(td, ep_trb_dma)) {
xhci_dbg(xhci, "Missing TD completion event after mid TD error\n");
xhci_dequeue_td(xhci, td, ep_ring, td->status);
}
+ /* If the TRB pointer is NULL, missed TDs will be skipped on the next event */
+ if (trb_comp_code == COMP_MISSED_SERVICE_ERROR && !ep_trb_dma)
+ return 0;
+
if (list_empty(&ep_ring->td_list)) {
/*
* Don't print wanings if ring is empty due to a stopped endpoint generating an
@@ -2846,7 +2845,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
*/
if (trb_comp_code != COMP_STOPPED &&
trb_comp_code != COMP_STOPPED_LENGTH_INVALID &&
- !ep_ring->last_td_was_short) {
+ !ring_xrun_event &&
+ !xhci_spurious_success_tx_event(xhci, ep_ring)) {
xhci_warn(xhci, "Event TRB for slot %u ep %u with no TDs queued\n",
slot_id, ep_index);
}
@@ -2860,14 +2860,31 @@ static int handle_tx_event(struct xhci_hcd *xhci,
td_list);
/* Is this a TRB in the currently executing TD? */
- ep_seg = trb_in_td(xhci, td, ep_trb_dma, false);
+ ep_seg = trb_in_td(td, ep_trb_dma);
if (!ep_seg) {
if (ep->skip && usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
+ /* this event is unlikely to match any TD, don't skip them all */
+ if (trb_comp_code == COMP_STOPPED_LENGTH_INVALID)
+ return 0;
+
skip_isoc_td(xhci, td, ep, status);
- if (!list_empty(&ep_ring->td_list))
+
+ if (!list_empty(&ep_ring->td_list)) {
+ if (ring_xrun_event) {
+ /*
+ * If we are here, we are on xHCI 1.0 host with no
+ * idea how many TDs were missed or where the xrun
+ * occurred. New TDs may have been added after the
+ * xrun, so skip only one TD to be safe.
+ */
+ xhci_dbg(xhci, "Skipped one TD for slot %u ep %u",
+ slot_id, ep_index);
+ return 0;
+ }
continue;
+ }
xhci_dbg(xhci, "All TDs skipped for slot %u ep %u. Clear skip flag.\n",
slot_id, ep_index);
@@ -2876,6 +2893,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
goto check_endpoint_halted;
}
+ /* TD was queued after xrun, maybe xrun was on a link, don't panic yet */
+ if (ring_xrun_event)
+ return 0;
+
/*
* Skip the Force Stopped Event. The 'ep_trb' of FSE is not in the current
* TD pointed by 'ep_ring->dequeue' because that the hardware dequeue
@@ -2890,21 +2911,17 @@ static int handle_tx_event(struct xhci_hcd *xhci,
/*
* Some hosts give a spurious success event after a short
- * transfer. Ignore it.
+ * transfer or error on last TRB. Ignore it.
*/
- if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
- ep_ring->last_td_was_short) {
- ep_ring->last_td_was_short = false;
+ if (xhci_spurious_success_tx_event(xhci, ep_ring)) {
+ xhci_dbg(xhci, "Spurious event dma %pad, comp_code %u after %u\n",
+ &ep_trb_dma, trb_comp_code, ep_ring->old_trb_comp_code);
+ ep_ring->old_trb_comp_code = trb_comp_code;
return 0;
}
/* HC is busted, give up! */
- xhci_err(xhci,
- "ERROR Transfer event TRB DMA ptr not part of current TD ep_index %d comp_code %u\n",
- ep_index, trb_comp_code);
- trb_in_td(xhci, td, ep_trb_dma, true);
-
- return -ESHUTDOWN;
+ goto debug_finding_td;
}
if (ep->skip) {
@@ -2922,10 +2939,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
*/
} while (ep->skip);
- if (trb_comp_code == COMP_SHORT_PACKET)
- ep_ring->last_td_was_short = true;
- else
- ep_ring->last_td_was_short = false;
+ ep_ring->old_trb_comp_code = trb_comp_code;
+
+ /* Get out if a TD was queued at enqueue after the xrun occurred */
+ if (ring_xrun_event)
+ return 0;
ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) / sizeof(*ep_trb)];
trace_xhci_handle_transfer(ep_ring, (struct xhci_generic_trb *) ep_trb, ep_trb_dma);
@@ -2957,6 +2975,17 @@ check_endpoint_halted:
return 0;
+debug_finding_td:
+ xhci_err(xhci, "Event dma %pad for ep %d status %d not part of TD at %016llx - %016llx\n",
+ &ep_trb_dma, ep_index, trb_comp_code,
+ (unsigned long long)xhci_trb_virt_to_dma(td->start_seg, td->start_trb),
+ (unsigned long long)xhci_trb_virt_to_dma(td->end_seg, td->end_trb));
+
+ xhci_for_each_ring_seg(ep_ring->first_seg, ep_seg)
+ xhci_warn(xhci, "Ring seg %u dma %pad\n", ep_seg->num, &ep_seg->dma);
+
+ return -ESHUTDOWN;
+
err_out:
xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
(unsigned long long) xhci_trb_virt_to_dma(
@@ -3216,7 +3245,6 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
{
- unsigned int link_trb_count = 0;
unsigned int new_segs = 0;
/* Make sure the endpoint has been added to xHC schedule */
@@ -3264,33 +3292,9 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
}
}
- while (trb_is_link(ep_ring->enqueue)) {
- /* If we're not dealing with 0.95 hardware or isoc rings
- * on AMD 0.96 host, clear the chain bit.
- */
- if (!xhci_link_chain_quirk(xhci, ep_ring->type))
- ep_ring->enqueue->link.control &=
- cpu_to_le32(~TRB_CHAIN);
- else
- ep_ring->enqueue->link.control |=
- cpu_to_le32(TRB_CHAIN);
-
- wmb();
- ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
-
- /* Toggle the cycle bit after the last ring segment. */
- if (link_trb_toggles_cycle(ep_ring->enqueue))
- ep_ring->cycle_state ^= 1;
-
- ep_ring->enq_seg = ep_ring->enq_seg->next;
- ep_ring->enqueue = ep_ring->enq_seg->trbs;
-
- /* prevent infinite loop if all first trbs are link trbs */
- if (link_trb_count++ > ep_ring->num_segs) {
- xhci_warn(xhci, "Ring is an endless link TRB loop\n");
- return -EINVAL;
- }
- }
+ /* Ensure that new TRBs won't overwrite a link */
+ if (trb_is_link(ep_ring->enqueue))
+ inc_enq_past_link(xhci, ep_ring, 0);
if (last_trb_on_seg(ep_ring->enq_seg, ep_ring->enqueue)) {
xhci_warn(xhci, "Missing link TRB at end of ring segment\n");
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index 22dc86fb5254..b5c362c2051d 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -2162,11 +2162,11 @@ static void tegra_xhci_program_utmi_power_lp0_exit(struct tegra_xusb *tegra)
}
}
-static int tegra_xusb_enter_elpg(struct tegra_xusb *tegra, bool runtime)
+static int tegra_xusb_enter_elpg(struct tegra_xusb *tegra, bool is_auto_resume)
{
struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
struct device *dev = tegra->dev;
- bool wakeup = runtime ? true : device_may_wakeup(dev);
+ bool wakeup = is_auto_resume ? true : device_may_wakeup(dev);
unsigned int i;
int err;
u32 usbcmd;
@@ -2232,11 +2232,11 @@ out:
return err;
}
-static int tegra_xusb_exit_elpg(struct tegra_xusb *tegra, bool runtime)
+static int tegra_xusb_exit_elpg(struct tegra_xusb *tegra, bool is_auto_resume)
{
struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
struct device *dev = tegra->dev;
- bool wakeup = runtime ? true : device_may_wakeup(dev);
+ bool wakeup = is_auto_resume ? true : device_may_wakeup(dev);
unsigned int i;
u32 usbcmd;
int err;
@@ -2287,7 +2287,7 @@ static int tegra_xusb_exit_elpg(struct tegra_xusb *tegra, bool runtime)
if (wakeup)
tegra_xhci_disable_phy_sleepwalk(tegra);
- err = xhci_resume(xhci, runtime ? PMSG_AUTO_RESUME : PMSG_RESUME);
+ err = xhci_resume(xhci, false, is_auto_resume);
if (err < 0) {
dev_err(tegra->dev, "failed to resume XHCI: %d\n", err);
goto disable_phy;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 1a90ebc8a30e..83a4adf57bae 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -994,16 +994,14 @@ EXPORT_SYMBOL_GPL(xhci_suspend);
* This is called when the machine transition from S3/S4 mode.
*
*/
-int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
+int xhci_resume(struct xhci_hcd *xhci, bool power_lost, bool is_auto_resume)
{
- bool hibernated = (msg.event == PM_EVENT_RESTORE);
u32 command, temp = 0;
struct usb_hcd *hcd = xhci_to_hcd(xhci);
int retval = 0;
bool comp_timer_running = false;
bool pending_portevent = false;
bool suspended_usb3_devs = false;
- bool reinit_xhc = false;
if (!hcd->state)
return 0;
@@ -1022,10 +1020,10 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
spin_lock_irq(&xhci->lock);
- if (hibernated || xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend)
- reinit_xhc = true;
+ if (xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend)
+ power_lost = true;
- if (!reinit_xhc) {
+ if (!power_lost) {
/*
* Some controllers might lose power during suspend, so wait
* for controller not ready bit to clear, just as in xHC init.
@@ -1065,12 +1063,12 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
/* re-initialize the HC on Restore Error, or Host Controller Error */
if ((temp & (STS_SRE | STS_HCE)) &&
!(xhci->xhc_state & XHCI_STATE_REMOVING)) {
- reinit_xhc = true;
- if (!xhci->broken_suspend)
+ if (!power_lost)
xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp);
+ power_lost = true;
}
- if (reinit_xhc) {
+ if (power_lost) {
if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
!(xhci_all_ports_seen_u0(xhci))) {
del_timer_sync(&xhci->comp_mode_recovery_timer);
@@ -1168,8 +1166,7 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
pending_portevent = xhci_pending_portevent(xhci);
- if (suspended_usb3_devs && !pending_portevent &&
- msg.event == PM_EVENT_AUTO_RESUME) {
+ if (suspended_usb3_devs && !pending_portevent && is_auto_resume) {
msleep(120);
pending_portevent = xhci_pending_portevent(xhci);
}
@@ -1608,6 +1605,11 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
goto free_priv;
}
+ /* Class driver might not be aware ep halted due to async URB giveback */
+ if (*ep_state & EP_STALLED)
+ dev_dbg(&urb->dev->dev, "URB %p queued before clearing halt\n",
+ urb);
+
switch (usb_endpoint_type(&urb->ep->desc)) {
case USB_ENDPOINT_XFER_CONTROL:
@@ -1768,8 +1770,8 @@ static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
goto done;
}
- /* In this case no commands are pending but the endpoint is stopped */
- if (ep->ep_state & EP_CLEARING_TT) {
+ /* In these cases no commands are pending but the endpoint is stopped */
+ if (ep->ep_state & (EP_CLEARING_TT | EP_STALLED)) {
/* and cancelled TDs can be given back right away */
xhci_dbg(xhci, "Invalidating TDs instantly on slot %d ep %d in state 0x%x\n",
urb->dev->slot_id, ep_index, ep->ep_state);
@@ -3207,8 +3209,11 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
ep = &vdev->eps[ep_index];
- /* Bail out if toggle is already being cleared by a endpoint reset */
spin_lock_irqsave(&xhci->lock, flags);
+
+ ep->ep_state &= ~EP_STALLED;
+
+ /* Bail out if toggle is already being cleared by a endpoint reset */
if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) {
ep->ep_state &= ~EP_HARD_CLEAR_TOGGLE;
spin_unlock_irqrestore(&xhci->lock, flags);
@@ -4759,8 +4764,8 @@ static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
*/
if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT)
return timeout_ns;
- dev_dbg(&udev->dev, "Hub-initiated U1 disabled "
- "due to long timeout %llu ms\n", timeout_ns);
+ dev_dbg(&udev->dev, "Hub-initiated U1 disabled due to long timeout %lluus\n",
+ timeout_ns);
return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1);
}
@@ -4817,8 +4822,8 @@ static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
*/
if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT)
return timeout_ns;
- dev_dbg(&udev->dev, "Hub-initiated U2 disabled "
- "due to long timeout %llu ms\n", timeout_ns);
+ dev_dbg(&udev->dev, "Hub-initiated U2 disabled due to long timeout %lluus\n",
+ timeout_ns * 256);
return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2);
}
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 779b01dee068..37860f1e3aba 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -211,6 +211,9 @@ struct xhci_op_regs {
#define CONFIG_CIE (1 << 9)
/* bits 10:31 - reserved and should be preserved */
+/* bits 15:0 - HCD page shift bit */
+#define XHCI_PAGE_SIZE_MASK 0xffff
+
/**
* struct xhci_intr_reg - Interrupt Register Set
* @irq_pending: IMAN - Interrupt Management Register. Used to enable
@@ -661,7 +664,7 @@ struct xhci_virt_ep {
unsigned int err_count;
unsigned int ep_state;
#define SET_DEQ_PENDING (1 << 0)
-#define EP_HALTED (1 << 1) /* For stall handling */
+#define EP_HALTED (1 << 1) /* Halted host ep handling */
#define EP_STOP_CMD_PENDING (1 << 2) /* For URB cancellation */
/* Transitioning the endpoint to using streams, don't enqueue URBs */
#define EP_GETTING_STREAMS (1 << 3)
@@ -672,6 +675,7 @@ struct xhci_virt_ep {
#define EP_SOFT_CLEAR_TOGGLE (1 << 7)
/* usb_hub_clear_tt_buffer is in progress */
#define EP_CLEARING_TT (1 << 8)
+#define EP_STALLED (1 << 9) /* For stall handling */
/* ---- Related to URB cancellation ---- */
struct list_head cancelled_td_list;
struct xhci_hcd *xhci;
@@ -1371,7 +1375,7 @@ struct xhci_ring {
unsigned int num_trbs_free; /* used only by xhci DbC */
unsigned int bounce_buf_len;
enum xhci_ring_type type;
- bool last_td_was_short;
+ u32 old_trb_comp_code;
struct radix_tree_root *trb_address_map;
};
@@ -1514,10 +1518,7 @@ struct xhci_hcd {
u16 max_interrupters;
/* imod_interval in ns (I * 250ns) */
u32 imod_interval;
- /* 4KB min, 128MB max */
- int page_size;
- /* Valid values are 12 to 20, inclusive */
- int page_shift;
+ u32 page_size;
/* MSI-X/MSI vectors */
int nvecs;
/* optional clocks */
@@ -1759,11 +1760,20 @@ static inline void xhci_write_64(struct xhci_hcd *xhci,
}
-/* Link TRB chain should always be set on 0.95 hosts, and AMD 0.96 ISOC rings */
+/*
+ * Reportedly, some chapters of v0.95 spec said that Link TRB always has its chain bit set.
+ * Other chapters and later specs say that it should only be set if the link is inside a TD
+ * which continues from the end of one segment to the next segment.
+ *
+ * Some 0.95 hardware was found to misbehave if any link TRB doesn't have the chain bit set.
+ *
+ * 0.96 hardware from AMD and NEC was found to ignore unchained isochronous link TRBs when
+ * "resynchronizing the pipe" after a Missed Service Error.
+ */
static inline bool xhci_link_chain_quirk(struct xhci_hcd *xhci, enum xhci_ring_type type)
{
return (xhci->quirks & XHCI_LINK_TRB_QUIRK) ||
- (type == TYPE_ISOC && (xhci->quirks & XHCI_AMD_0x96_HOST));
+ (type == TYPE_ISOC && (xhci->quirks & (XHCI_AMD_0x96_HOST | XHCI_NEC_HOST)));
}
/* xHCI debugging */
@@ -1870,7 +1880,7 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id);
int xhci_ext_cap_init(struct xhci_hcd *xhci);
int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup);
-int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg);
+int xhci_resume(struct xhci_hcd *xhci, bool power_lost, bool is_auto_resume);
irqreturn_t xhci_irq(struct usb_hcd *hcd);
irqreturn_t xhci_msi_irq(int irq, void *hcd);
@@ -1884,8 +1894,6 @@ int xhci_set_interrupter_moderation(struct xhci_interrupter *ir,
/* xHCI ring, segment, TRB, and TD functions */
dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb);
-struct xhci_segment *trb_in_td(struct xhci_hcd *xhci, struct xhci_td *td,
- dma_addr_t suspect_dma, bool debug);
int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code);
void xhci_ring_cmd_db(struct xhci_hcd *xhci);
int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd,
diff --git a/drivers/usb/misc/onboard_usb_dev.h b/drivers/usb/misc/onboard_usb_dev.h
index 317b3eb99c02..933797a7e084 100644
--- a/drivers/usb/misc/onboard_usb_dev.h
+++ b/drivers/usb/misc/onboard_usb_dev.h
@@ -23,6 +23,13 @@ static const struct onboard_dev_pdata microchip_usb424_data = {
.is_hub = true,
};
+static const struct onboard_dev_pdata microchip_usb2514_data = {
+ .reset_us = 1,
+ .num_supplies = 2,
+ .supply_names = { "vdd", "vdda" },
+ .is_hub = true,
+};
+
static const struct onboard_dev_pdata microchip_usb5744_data = {
.reset_us = 0,
.power_on_delay_us = 10000,
@@ -96,7 +103,7 @@ static const struct onboard_dev_pdata xmos_xvf3500_data = {
static const struct of_device_id onboard_dev_match[] = {
{ .compatible = "usb424,2412", .data = &microchip_usb424_data, },
- { .compatible = "usb424,2514", .data = &microchip_usb424_data, },
+ { .compatible = "usb424,2514", .data = &microchip_usb2514_data, },
{ .compatible = "usb424,2517", .data = &microchip_usb424_data, },
{ .compatible = "usb424,2744", .data = &microchip_usb5744_data, },
{ .compatible = "usb424,5744", .data = &microchip_usb5744_data, },
diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c
index e24cdb667307..4fb453ca5450 100644
--- a/drivers/usb/misc/usb251xb.c
+++ b/drivers/usb/misc/usb251xb.c
@@ -636,10 +636,8 @@ static int usb251xb_probe(struct usb251xb *hub)
if (np && usb_data) {
err = usb251xb_get_ofdata(hub, usb_data);
- if (err) {
- dev_err(dev, "failed to get ofdata: %d\n", err);
- return err;
- }
+ if (err)
+ return dev_err_probe(dev, err, "failed to get ofdata\n");
}
/*
diff --git a/drivers/usb/musb/jz4740.c b/drivers/usb/musb/jz4740.c
index acdeb1117cd3..df56c972986f 100644
--- a/drivers/usb/musb/jz4740.c
+++ b/drivers/usb/musb/jz4740.c
@@ -59,7 +59,7 @@ static irqreturn_t jz4740_musb_interrupt(int irq, void *__hci)
return IRQ_NONE;
}
-static struct musb_fifo_cfg jz4740_musb_fifo_cfg[] = {
+static const struct musb_fifo_cfg jz4740_musb_fifo_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 64, },
@@ -205,7 +205,7 @@ static const struct musb_hdrc_platform_data jz4740_musb_pdata = {
.platform_ops = &jz4740_musb_ops,
};
-static struct musb_fifo_cfg jz4770_musb_fifo_cfg[] = {
+static const struct musb_fifo_cfg jz4770_musb_fifo_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
diff --git a/drivers/usb/musb/mediatek.c b/drivers/usb/musb/mediatek.c
index aa988d74b58d..c6cbe718b1da 100644
--- a/drivers/usb/musb/mediatek.c
+++ b/drivers/usb/musb/mediatek.c
@@ -365,7 +365,7 @@ static const struct musb_platform_ops mtk_musb_ops = {
#define MTK_MUSB_MAX_EP_NUM 8
#define MTK_MUSB_RAM_BITS 11
-static struct musb_fifo_cfg mtk_musb_mode_cfg[] = {
+static const struct musb_fifo_cfg mtk_musb_mode_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
diff --git a/drivers/usb/musb/mpfs.c b/drivers/usb/musb/mpfs.c
index 7edc8429b274..71e4271cba75 100644
--- a/drivers/usb/musb/mpfs.c
+++ b/drivers/usb/musb/mpfs.c
@@ -29,7 +29,7 @@ struct mpfs_glue {
struct clk *clk;
};
-static struct musb_fifo_cfg mpfs_musb_mode_cfg[] = {
+static const struct musb_fifo_cfg mpfs_musb_mode_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 7f349f5e781d..96fa700eaed1 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1271,7 +1271,7 @@ MODULE_PARM_DESC(fifo_mode, "initial endpoint configuration");
*/
/* mode 0 - fits in 2KB */
-static struct musb_fifo_cfg mode_0_cfg[] = {
+static const struct musb_fifo_cfg mode_0_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, },
@@ -1280,7 +1280,7 @@ static struct musb_fifo_cfg mode_0_cfg[] = {
};
/* mode 1 - fits in 4KB */
-static struct musb_fifo_cfg mode_1_cfg[] = {
+static const struct musb_fifo_cfg mode_1_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, },
{ .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, .mode = BUF_DOUBLE, },
@@ -1289,7 +1289,7 @@ static struct musb_fifo_cfg mode_1_cfg[] = {
};
/* mode 2 - fits in 4KB */
-static struct musb_fifo_cfg mode_2_cfg[] = {
+static const struct musb_fifo_cfg mode_2_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
@@ -1299,7 +1299,7 @@ static struct musb_fifo_cfg mode_2_cfg[] = {
};
/* mode 3 - fits in 4KB */
-static struct musb_fifo_cfg mode_3_cfg[] = {
+static const struct musb_fifo_cfg mode_3_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, },
{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
@@ -1309,7 +1309,7 @@ static struct musb_fifo_cfg mode_3_cfg[] = {
};
/* mode 4 - fits in 16KB */
-static struct musb_fifo_cfg mode_4_cfg[] = {
+static const struct musb_fifo_cfg mode_4_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
@@ -1340,7 +1340,7 @@ static struct musb_fifo_cfg mode_4_cfg[] = {
};
/* mode 5 - fits in 8KB */
-static struct musb_fifo_cfg mode_5_cfg[] = {
+static const struct musb_fifo_cfg mode_5_cfg[] = {
{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
@@ -1447,7 +1447,7 @@ fifo_setup(struct musb *musb, struct musb_hw_ep *hw_ep,
return offset + (maxpacket << ((c_size & MUSB_FIFOSZ_DPB) ? 1 : 0));
}
-static struct musb_fifo_cfg ep0_cfg = {
+static const struct musb_fifo_cfg ep0_cfg = {
.style = FIFO_RXTX, .maxpacket = 64,
};
diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c
index eac1cde86be3..a6bd3e968cc7 100644
--- a/drivers/usb/musb/sunxi.c
+++ b/drivers/usb/musb/sunxi.c
@@ -629,7 +629,7 @@ static const struct musb_platform_ops sunxi_musb_ops = {
#define SUNXI_MUSB_RAM_BITS 11
/* Allwinner OTG supports up to 5 endpoints */
-static struct musb_fifo_cfg sunxi_musb_mode_cfg_5eps[] = {
+static const struct musb_fifo_cfg sunxi_musb_mode_cfg_5eps[] = {
MUSB_EP_FIFO_SINGLE(1, FIFO_TX, 512),
MUSB_EP_FIFO_SINGLE(1, FIFO_RX, 512),
MUSB_EP_FIFO_SINGLE(2, FIFO_TX, 512),
@@ -643,7 +643,7 @@ static struct musb_fifo_cfg sunxi_musb_mode_cfg_5eps[] = {
};
/* H3/V3s OTG supports only 4 endpoints */
-static struct musb_fifo_cfg sunxi_musb_mode_cfg_4eps[] = {
+static const struct musb_fifo_cfg sunxi_musb_mode_cfg_4eps[] = {
MUSB_EP_FIFO_SINGLE(1, FIFO_TX, 512),
MUSB_EP_FIFO_SINGLE(1, FIFO_RX, 512),
MUSB_EP_FIFO_SINGLE(2, FIFO_TX, 512),
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index 7490f1798b46..7069dd3f4d0d 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -769,11 +769,9 @@ static int mxs_phy_probe(struct platform_device *pdev)
return PTR_ERR(base);
clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(clk)) {
- dev_err(&pdev->dev,
- "can't get the clock, err=%ld", PTR_ERR(clk));
- return PTR_ERR(clk);
- }
+ if (IS_ERR(clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(clk),
+ "can't get the clock\n");
mxs_phy = devm_kzalloc(&pdev->dev, sizeof(*mxs_phy), GFP_KERNEL);
if (!mxs_phy)
diff --git a/drivers/usb/phy/phy-ulpi.c b/drivers/usb/phy/phy-ulpi.c
index e683a37e3a7a..4df63e67bb37 100644
--- a/drivers/usb/phy/phy-ulpi.c
+++ b/drivers/usb/phy/phy-ulpi.c
@@ -256,29 +256,6 @@ static void otg_ulpi_init(struct usb_phy *phy, struct usb_otg *otg,
}
struct usb_phy *
-otg_ulpi_create(struct usb_phy_io_ops *ops,
- unsigned int flags)
-{
- struct usb_phy *phy;
- struct usb_otg *otg;
-
- phy = kzalloc(sizeof(*phy), GFP_KERNEL);
- if (!phy)
- return NULL;
-
- otg = kzalloc(sizeof(*otg), GFP_KERNEL);
- if (!otg) {
- kfree(phy);
- return NULL;
- }
-
- otg_ulpi_init(phy, otg, ops, flags);
-
- return phy;
-}
-EXPORT_SYMBOL_GPL(otg_ulpi_create);
-
-struct usb_phy *
devm_otg_ulpi_create(struct device *dev,
struct usb_phy_io_ops *ops,
unsigned int flags)
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index ca3da79afd23..93710b762893 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -66,29 +66,16 @@
#define MOS_WDR_TIMEOUT 5000 /* default urb timeout */
-#define MOS_PORT1 0x0200
-#define MOS_PORT2 0x0300
-#define MOS_VENREG 0x0000
-#define MOS_MAX_PORT 0x02
-#define MOS_WRITE 0x0E
-#define MOS_READ 0x0D
-
/* Requests */
#define MCS_RD_RTYPE 0xC0
#define MCS_WR_RTYPE 0x40
#define MCS_RDREQ 0x0D
#define MCS_WRREQ 0x0E
-#define MCS_CTRL_TIMEOUT 500
#define VENDOR_READ_LENGTH (0x01)
-#define MAX_NAME_LEN 64
-
#define ZLP_REG1 0x3A /* Zero_Flag_Reg1 58 */
#define ZLP_REG5 0x3E /* Zero_Flag_Reg5 62 */
-/* For higher baud Rates use TIOCEXBAUD */
-#define TIOCEXBAUD 0x5462
-
/*
* Vendor id and device id defines
*
diff --git a/drivers/usb/storage/alauda.c b/drivers/usb/storage/alauda.c
index 6263c4e61678..e01f3a42bde4 100644
--- a/drivers/usb/storage/alauda.c
+++ b/drivers/usb/storage/alauda.c
@@ -174,7 +174,7 @@ struct alauda_card_info {
unsigned char zoneshift; /* 1<<zs blocks per zone */
};
-static struct alauda_card_info alauda_card_ids[] = {
+static const struct alauda_card_info alauda_card_ids[] = {
/* NAND flash */
{ 0x6e, 20, 8, 4, 8}, /* 1 MB */
{ 0xe8, 20, 8, 4, 8}, /* 1 MB */
@@ -200,7 +200,7 @@ static struct alauda_card_info alauda_card_ids[] = {
{ 0,}
};
-static struct alauda_card_info *alauda_card_find_id(unsigned char id)
+static const struct alauda_card_info *alauda_card_find_id(unsigned char id)
{
int i;
@@ -383,7 +383,7 @@ static int alauda_init_media(struct us_data *us)
{
unsigned char *data = us->iobuf;
int ready = 0;
- struct alauda_card_info *media_info;
+ const struct alauda_card_info *media_info;
unsigned int num_zones;
while (ready == 0) {
@@ -1132,7 +1132,7 @@ static int alauda_transport(struct scsi_cmnd *srb, struct us_data *us)
int rc;
struct alauda_info *info = (struct alauda_info *) us->extra;
unsigned char *ptr = us->iobuf;
- static unsigned char inquiry_response[36] = {
+ static const unsigned char inquiry_response[36] = {
0x00, 0x80, 0x00, 0x01, 0x1F, 0x00, 0x00, 0x00
};
diff --git a/drivers/usb/storage/datafab.c b/drivers/usb/storage/datafab.c
index bbfa2398b170..9ba369483c9b 100644
--- a/drivers/usb/storage/datafab.c
+++ b/drivers/usb/storage/datafab.c
@@ -319,7 +319,7 @@ static int datafab_determine_lun(struct us_data *us,
//
// There might be a better way of doing this?
- static unsigned char scommand[8] = { 0, 1, 0, 0, 0, 0xa0, 0xec, 1 };
+ static const unsigned char scommand[8] = { 0, 1, 0, 0, 0, 0xa0, 0xec, 1 };
unsigned char *command = us->iobuf;
unsigned char *buf;
int count = 0, rc;
@@ -384,7 +384,7 @@ static int datafab_id_device(struct us_data *us,
// to the ATA spec, 'Sector Count' isn't used but the Windows driver
// sets this bit so we do too...
//
- static unsigned char scommand[8] = { 0, 1, 0, 0, 0, 0xa0, 0xec, 1 };
+ static const unsigned char scommand[8] = { 0, 1, 0, 0, 0, 0xa0, 0xec, 1 };
unsigned char *command = us->iobuf;
unsigned char *reply;
int rc;
@@ -437,16 +437,16 @@ static int datafab_handle_mode_sense(struct us_data *us,
struct scsi_cmnd * srb,
int sense_6)
{
- static unsigned char rw_err_page[12] = {
+ static const unsigned char rw_err_page[12] = {
0x1, 0xA, 0x21, 1, 0, 0, 0, 0, 1, 0, 0, 0
};
- static unsigned char cache_page[12] = {
+ static const unsigned char cache_page[12] = {
0x8, 0xA, 0x1, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
- static unsigned char rbac_page[12] = {
+ static const unsigned char rbac_page[12] = {
0x1B, 0xA, 0, 0x81, 0, 0, 0, 0, 0, 0, 0, 0
};
- static unsigned char timer_page[8] = {
+ static const unsigned char timer_page[8] = {
0x1C, 0x6, 0, 0, 0, 0
};
unsigned char pc, page_code;
@@ -550,7 +550,7 @@ static int datafab_transport(struct scsi_cmnd *srb, struct us_data *us)
int rc;
unsigned long block, blocks;
unsigned char *ptr = us->iobuf;
- static unsigned char inquiry_reply[8] = {
+ static const unsigned char inquiry_reply[8] = {
0x00, 0x80, 0x00, 0x01, 0x1F, 0x00, 0x00, 0x00
};
diff --git a/drivers/usb/storage/initializers.c b/drivers/usb/storage/initializers.c
index f8f9ce8dc710..b243bd5521a6 100644
--- a/drivers/usb/storage/initializers.c
+++ b/drivers/usb/storage/initializers.c
@@ -54,7 +54,7 @@ int usb_stor_ucr61s2b_init(struct us_data *us)
struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap*) us->iobuf;
int res;
unsigned int partial;
- static char init_string[] = "\xec\x0a\x06\x00$PCCHIPS";
+ static const char init_string[] = "\xec\x0a\x06\x00$PCCHIPS";
usb_stor_dbg(us, "Sending UCR-61S2B initialization packet...\n");
diff --git a/drivers/usb/storage/jumpshot.c b/drivers/usb/storage/jumpshot.c
index 39ca84d68591..089c6f8ac85f 100644
--- a/drivers/usb/storage/jumpshot.c
+++ b/drivers/usb/storage/jumpshot.c
@@ -367,16 +367,16 @@ static int jumpshot_handle_mode_sense(struct us_data *us,
struct scsi_cmnd * srb,
int sense_6)
{
- static unsigned char rw_err_page[12] = {
+ static const unsigned char rw_err_page[12] = {
0x1, 0xA, 0x21, 1, 0, 0, 0, 0, 1, 0, 0, 0
};
- static unsigned char cache_page[12] = {
+ static const unsigned char cache_page[12] = {
0x8, 0xA, 0x1, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
- static unsigned char rbac_page[12] = {
+ static const unsigned char rbac_page[12] = {
0x1B, 0xA, 0, 0x81, 0, 0, 0, 0, 0, 0, 0, 0
};
- static unsigned char timer_page[8] = {
+ static const unsigned char timer_page[8] = {
0x1C, 0x6, 0, 0, 0, 0
};
unsigned char pc, page_code;
@@ -477,7 +477,7 @@ static int jumpshot_transport(struct scsi_cmnd *srb, struct us_data *us)
int rc;
unsigned long block, blocks;
unsigned char *ptr = us->iobuf;
- static unsigned char inquiry_response[8] = {
+ static const unsigned char inquiry_response[8] = {
0x00, 0x80, 0x00, 0x01, 0x1F, 0x00, 0x00, 0x00
};
diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
index 2a82ed7b68ea..4e516b445136 100644
--- a/drivers/usb/storage/realtek_cr.c
+++ b/drivers/usb/storage/realtek_cr.c
@@ -191,7 +191,7 @@ MODULE_DEVICE_TABLE(usb, realtek_cr_ids);
.initFunction = init_function, \
}
-static struct us_unusual_dev realtek_cr_unusual_dev_list[] = {
+static const struct us_unusual_dev realtek_cr_unusual_dev_list[] = {
# include "unusual_realtek.h"
{} /* Terminating entry */
};
@@ -797,10 +797,10 @@ static void rts51x_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
{
struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra);
static int card_first_show = 1;
- static u8 media_not_present[] = { 0x70, 0, 0x02, 0, 0, 0, 0,
+ static const u8 media_not_present[] = { 0x70, 0, 0x02, 0, 0, 0, 0,
10, 0, 0, 0, 0, 0x3A, 0, 0, 0, 0, 0
};
- static u8 invalid_cmd_field[] = { 0x70, 0, 0x05, 0, 0, 0, 0,
+ static const u8 invalid_cmd_field[] = { 0x70, 0, 0x05, 0, 0, 0, 0,
10, 0, 0, 0, 0, 0x24, 0, 0, 0, 0, 0
};
int ret;
diff --git a/drivers/usb/storage/sddr09.c b/drivers/usb/storage/sddr09.c
index d21ce3466e25..e66b920e99e2 100644
--- a/drivers/usb/storage/sddr09.c
+++ b/drivers/usb/storage/sddr09.c
@@ -144,7 +144,7 @@ static inline char *nand_flash_manufacturer(int manuf_id) {
* 256 MB NAND flash has a 5-byte ID with 2nd byte 0xaa, 0xba, 0xca or 0xda.
*/
-static struct nand_flash_dev nand_flash_ids[] = {
+static const struct nand_flash_dev nand_flash_ids[] = {
/* NAND flash */
{ 0x6e, 20, 8, 4, 8, 2}, /* 1 MB */
{ 0xe8, 20, 8, 4, 8, 2}, /* 1 MB */
@@ -169,7 +169,7 @@ static struct nand_flash_dev nand_flash_ids[] = {
{ 0,}
};
-static struct nand_flash_dev *
+static const struct nand_flash_dev *
nand_find_id(unsigned char id) {
int i;
@@ -1133,9 +1133,9 @@ sddr09_reset(struct us_data *us) {
}
#endif
-static struct nand_flash_dev *
+static const struct nand_flash_dev *
sddr09_get_cardinfo(struct us_data *us, unsigned char flags) {
- struct nand_flash_dev *cardinfo;
+ const struct nand_flash_dev *cardinfo;
unsigned char deviceID[4];
char blurbtxt[256];
int result;
@@ -1545,12 +1545,12 @@ static int sddr09_transport(struct scsi_cmnd *srb, struct us_data *us)
struct sddr09_card_info *info;
- static unsigned char inquiry_response[8] = {
+ static const unsigned char inquiry_response[8] = {
0x00, 0x80, 0x00, 0x02, 0x1F, 0x00, 0x00, 0x00
};
/* note: no block descriptor support */
- static unsigned char mode_page_01[19] = {
+ static const unsigned char mode_page_01[19] = {
0x00, 0x0F, 0x00, 0x0, 0x0, 0x0, 0x00,
0x01, 0x0A,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
@@ -1584,7 +1584,7 @@ static int sddr09_transport(struct scsi_cmnd *srb, struct us_data *us)
}
if (srb->cmnd[0] == READ_CAPACITY) {
- struct nand_flash_dev *cardinfo;
+ const struct nand_flash_dev *cardinfo;
sddr09_get_wp(us, info); /* read WP bit */
diff --git a/drivers/usb/storage/sddr55.c b/drivers/usb/storage/sddr55.c
index d5cdff30f6f3..b323f0a36260 100644
--- a/drivers/usb/storage/sddr55.c
+++ b/drivers/usb/storage/sddr55.c
@@ -775,11 +775,11 @@ static void sddr55_card_info_destructor(void *extra) {
static int sddr55_transport(struct scsi_cmnd *srb, struct us_data *us)
{
int result;
- static unsigned char inquiry_response[8] = {
+ static const unsigned char inquiry_response[8] = {
0x00, 0x80, 0x00, 0x02, 0x1F, 0x00, 0x00, 0x00
};
// write-protected for now, no block descriptor support
- static unsigned char mode_page_01[20] = {
+ static const unsigned char mode_page_01[20] = {
0x0, 0x12, 0x00, 0x80, 0x0, 0x0, 0x0, 0x0,
0x01, 0x0A,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
diff --git a/drivers/usb/storage/shuttle_usbat.c b/drivers/usb/storage/shuttle_usbat.c
index c33cbf177e6f..27faa0ead11d 100644
--- a/drivers/usb/storage/shuttle_usbat.c
+++ b/drivers/usb/storage/shuttle_usbat.c
@@ -1683,7 +1683,7 @@ static int usbat_flash_transport(struct scsi_cmnd * srb, struct us_data *us)
struct usbat_info *info = (struct usbat_info *) (us->extra);
unsigned long block, blocks;
unsigned char *ptr = us->iobuf;
- static unsigned char inquiry_response[36] = {
+ static const unsigned char inquiry_response[36] = {
0x00, 0x80, 0x00, 0x01, 0x1F, 0x00, 0x00, 0x00
};
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index e6bc8ecaecbb..1aa1bd26c81f 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -528,7 +528,7 @@ static void last_sector_hacks(struct us_data *us, struct scsi_cmnd *srb)
u32 sector;
/* To Report "Medium Error: Record Not Found */
- static unsigned char record_not_found[18] = {
+ static const unsigned char record_not_found[18] = {
[0] = 0x70, /* current error */
[2] = MEDIUM_ERROR, /* = 0x03 */
[7] = 0x0a, /* additional length */
diff --git a/drivers/usb/typec/altmodes/thunderbolt.c b/drivers/usb/typec/altmodes/thunderbolt.c
index 1b475b1d98e7..6eadf7835f8f 100644
--- a/drivers/usb/typec/altmodes/thunderbolt.c
+++ b/drivers/usb/typec/altmodes/thunderbolt.c
@@ -112,7 +112,7 @@ static void tbt_altmode_work(struct work_struct *work)
return;
disable_plugs:
- for (int i = TYPEC_PLUG_SOP_PP; i > 0; --i) {
+ for (int i = TYPEC_PLUG_SOP_PP; i >= 0; --i) {
if (tbt->plug[i])
typec_altmode_put_plug(tbt->plug[i]);
@@ -143,7 +143,7 @@ static int tbt_enter_modes_ordered(struct typec_altmode *alt)
if (tbt->plug[TYPEC_PLUG_SOP_P]) {
ret = typec_cable_altmode_enter(alt, TYPEC_PLUG_SOP_P, NULL);
if (ret < 0) {
- for (int i = TYPEC_PLUG_SOP_PP; i > 0; --i) {
+ for (int i = TYPEC_PLUG_SOP_PP; i >= 0; --i) {
if (tbt->plug[i])
typec_altmode_put_plug(tbt->plug[i]);
@@ -324,7 +324,7 @@ static void tbt_altmode_remove(struct typec_altmode *alt)
{
struct tbt_altmode *tbt = typec_altmode_get_drvdata(alt);
- for (int i = TYPEC_PLUG_SOP_PP; i > 0; --i) {
+ for (int i = TYPEC_PLUG_SOP_PP; i >= 0; --i) {
if (tbt->plug[i])
typec_altmode_put_plug(tbt->plug[i]);
}
@@ -351,10 +351,10 @@ static bool tbt_ready(struct typec_altmode *alt)
*/
for (int i = 0; i < TYPEC_PLUG_SOP_PP + 1; i++) {
plug = typec_altmode_get_plug(tbt->alt, i);
- if (IS_ERR(plug))
+ if (!plug)
continue;
- if (!plug || plug->svid != USB_TYPEC_TBT_SID)
+ if (plug->svid != USB_TYPEC_TBT_SID)
break;
plug->desc = "Thunderbolt3";
diff --git a/drivers/usb/typec/mux/Kconfig b/drivers/usb/typec/mux/Kconfig
index 67381b4ef4f6..6dd8f961b593 100644
--- a/drivers/usb/typec/mux/Kconfig
+++ b/drivers/usb/typec/mux/Kconfig
@@ -56,6 +56,16 @@ config TYPEC_MUX_NB7VPQ904M
Say Y or M if your system has a On Semiconductor NB7VPQ904M Type-C
redriver chip found on some devices with a Type-C port.
+config TYPEC_MUX_PS883X
+ tristate "Parade PS883x Type-C retimer driver"
+ depends on I2C
+ depends on DRM || DRM=n
+ select DRM_AUX_BRIDGE if DRM_BRIDGE && OF
+ select REGMAP_I2C
+ help
+ Say Y or M if your system has a Parade PS883x Type-C retimer chip
+ found on some devices with a Type-C port.
+
config TYPEC_MUX_PTN36502
tristate "NXP PTN36502 Type-C redriver driver"
depends on I2C
diff --git a/drivers/usb/typec/mux/Makefile b/drivers/usb/typec/mux/Makefile
index 60879446da93..b4f599eb5053 100644
--- a/drivers/usb/typec/mux/Makefile
+++ b/drivers/usb/typec/mux/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_TYPEC_MUX_PI3USB30532) += pi3usb30532.o
obj-$(CONFIG_TYPEC_MUX_INTEL_PMC) += intel_pmc_mux.o
obj-$(CONFIG_TYPEC_MUX_IT5205) += it5205.o
obj-$(CONFIG_TYPEC_MUX_NB7VPQ904M) += nb7vpq904m.o
+obj-$(CONFIG_TYPEC_MUX_PS883X) += ps883x.o
obj-$(CONFIG_TYPEC_MUX_PTN36502) += ptn36502.o
obj-$(CONFIG_TYPEC_MUX_TUSB1046) += tusb1046.o
obj-$(CONFIG_TYPEC_MUX_WCD939X_USBSS) += wcd939x-usbss.o
diff --git a/drivers/usb/typec/mux/ps883x.c b/drivers/usb/typec/mux/ps883x.c
new file mode 100644
index 000000000000..ad59babf7cce
--- /dev/null
+++ b/drivers/usb/typec/mux/ps883x.c
@@ -0,0 +1,466 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Parade ps883x usb retimer driver
+ *
+ * Copyright (C) 2024 Linaro Ltd.
+ */
+
+#include <drm/bridge/aux-bridge.h>
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/usb/typec_altmode.h>
+#include <linux/usb/typec_dp.h>
+#include <linux/usb/typec_mux.h>
+#include <linux/usb/typec_retimer.h>
+
+#define REG_USB_PORT_CONN_STATUS_0 0x00
+
+#define CONN_STATUS_0_CONNECTION_PRESENT BIT(0)
+#define CONN_STATUS_0_ORIENTATION_REVERSED BIT(1)
+#define CONN_STATUS_0_USB_3_1_CONNECTED BIT(5)
+
+#define REG_USB_PORT_CONN_STATUS_1 0x01
+
+#define CONN_STATUS_1_DP_CONNECTED BIT(0)
+#define CONN_STATUS_1_DP_SINK_REQUESTED BIT(1)
+#define CONN_STATUS_1_DP_PIN_ASSIGNMENT_C_D BIT(2)
+#define CONN_STATUS_1_DP_HPD_LEVEL BIT(7)
+
+#define REG_USB_PORT_CONN_STATUS_2 0x02
+
+struct ps883x_retimer {
+ struct i2c_client *client;
+ struct gpio_desc *reset_gpio;
+ struct regmap *regmap;
+ struct typec_switch_dev *sw;
+ struct typec_retimer *retimer;
+ struct clk *xo_clk;
+ struct regulator *vdd_supply;
+ struct regulator *vdd33_supply;
+ struct regulator *vdd33_cap_supply;
+ struct regulator *vddat_supply;
+ struct regulator *vddar_supply;
+ struct regulator *vddio_supply;
+
+ struct typec_switch *typec_switch;
+ struct typec_mux *typec_mux;
+
+ struct mutex lock; /* protect non-concurrent retimer & switch */
+
+ enum typec_orientation orientation;
+ unsigned long mode;
+ unsigned int svid;
+};
+
+static int ps883x_configure(struct ps883x_retimer *retimer, int cfg0,
+ int cfg1, int cfg2)
+{
+ struct device *dev = &retimer->client->dev;
+ int ret;
+
+ ret = regmap_write(retimer->regmap, REG_USB_PORT_CONN_STATUS_0, cfg0);
+ if (ret) {
+ dev_err(dev, "failed to write conn_status_0: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_write(retimer->regmap, REG_USB_PORT_CONN_STATUS_1, cfg1);
+ if (ret) {
+ dev_err(dev, "failed to write conn_status_1: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_write(retimer->regmap, REG_USB_PORT_CONN_STATUS_2, cfg2);
+ if (ret) {
+ dev_err(dev, "failed to write conn_status_2: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ps883x_set(struct ps883x_retimer *retimer)
+{
+ int cfg0 = CONN_STATUS_0_CONNECTION_PRESENT;
+ int cfg1 = 0x00;
+ int cfg2 = 0x00;
+
+ if (retimer->orientation == TYPEC_ORIENTATION_NONE ||
+ retimer->mode == TYPEC_STATE_SAFE) {
+ return ps883x_configure(retimer, cfg0, cfg1, cfg2);
+ }
+
+ if (retimer->mode != TYPEC_STATE_USB && retimer->svid != USB_TYPEC_DP_SID)
+ return -EINVAL;
+
+ if (retimer->orientation == TYPEC_ORIENTATION_REVERSE)
+ cfg0 |= CONN_STATUS_0_ORIENTATION_REVERSED;
+
+ switch (retimer->mode) {
+ case TYPEC_STATE_USB:
+ cfg0 |= CONN_STATUS_0_USB_3_1_CONNECTED;
+ break;
+
+ case TYPEC_DP_STATE_C:
+ cfg1 = CONN_STATUS_1_DP_CONNECTED |
+ CONN_STATUS_1_DP_SINK_REQUESTED |
+ CONN_STATUS_1_DP_PIN_ASSIGNMENT_C_D |
+ CONN_STATUS_1_DP_HPD_LEVEL;
+ break;
+
+ case TYPEC_DP_STATE_D:
+ cfg0 |= CONN_STATUS_0_USB_3_1_CONNECTED;
+ cfg1 = CONN_STATUS_1_DP_CONNECTED |
+ CONN_STATUS_1_DP_SINK_REQUESTED |
+ CONN_STATUS_1_DP_PIN_ASSIGNMENT_C_D |
+ CONN_STATUS_1_DP_HPD_LEVEL;
+ break;
+
+ case TYPEC_DP_STATE_E:
+ cfg1 = CONN_STATUS_1_DP_CONNECTED |
+ CONN_STATUS_1_DP_HPD_LEVEL;
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return ps883x_configure(retimer, cfg0, cfg1, cfg2);
+}
+
+static int ps883x_sw_set(struct typec_switch_dev *sw,
+ enum typec_orientation orientation)
+{
+ struct ps883x_retimer *retimer = typec_switch_get_drvdata(sw);
+ int ret = 0;
+
+ ret = typec_switch_set(retimer->typec_switch, orientation);
+ if (ret)
+ return ret;
+
+ mutex_lock(&retimer->lock);
+
+ if (retimer->orientation != orientation) {
+ retimer->orientation = orientation;
+
+ ret = ps883x_set(retimer);
+ }
+
+ mutex_unlock(&retimer->lock);
+
+ return ret;
+}
+
+static int ps883x_retimer_set(struct typec_retimer *rtmr,
+ struct typec_retimer_state *state)
+{
+ struct ps883x_retimer *retimer = typec_retimer_get_drvdata(rtmr);
+ struct typec_mux_state mux_state;
+ int ret = 0;
+
+ mutex_lock(&retimer->lock);
+
+ if (state->mode != retimer->mode) {
+ retimer->mode = state->mode;
+
+ if (state->alt)
+ retimer->svid = state->alt->svid;
+ else
+ retimer->svid = 0;
+
+ ret = ps883x_set(retimer);
+ }
+
+ mutex_unlock(&retimer->lock);
+
+ if (ret)
+ return ret;
+
+ mux_state.alt = state->alt;
+ mux_state.data = state->data;
+ mux_state.mode = state->mode;
+
+ return typec_mux_set(retimer->typec_mux, &mux_state);
+}
+
+static int ps883x_enable_vregs(struct ps883x_retimer *retimer)
+{
+ struct device *dev = &retimer->client->dev;
+ int ret;
+
+ ret = regulator_enable(retimer->vdd33_supply);
+ if (ret) {
+ dev_err(dev, "cannot enable VDD 3.3V regulator: %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_enable(retimer->vdd33_cap_supply);
+ if (ret) {
+ dev_err(dev, "cannot enable VDD 3.3V CAP regulator: %d\n", ret);
+ goto err_vdd33_disable;
+ }
+
+ usleep_range(4000, 10000);
+
+ ret = regulator_enable(retimer->vdd_supply);
+ if (ret) {
+ dev_err(dev, "cannot enable VDD regulator: %d\n", ret);
+ goto err_vdd33_cap_disable;
+ }
+
+ ret = regulator_enable(retimer->vddar_supply);
+ if (ret) {
+ dev_err(dev, "cannot enable VDD AR regulator: %d\n", ret);
+ goto err_vdd_disable;
+ }
+
+ ret = regulator_enable(retimer->vddat_supply);
+ if (ret) {
+ dev_err(dev, "cannot enable VDD AT regulator: %d\n", ret);
+ goto err_vddar_disable;
+ }
+
+ ret = regulator_enable(retimer->vddio_supply);
+ if (ret) {
+ dev_err(dev, "cannot enable VDD IO regulator: %d\n", ret);
+ goto err_vddat_disable;
+ }
+
+ return 0;
+
+err_vddat_disable:
+ regulator_disable(retimer->vddat_supply);
+err_vddar_disable:
+ regulator_disable(retimer->vddar_supply);
+err_vdd_disable:
+ regulator_disable(retimer->vdd_supply);
+err_vdd33_cap_disable:
+ regulator_disable(retimer->vdd33_cap_supply);
+err_vdd33_disable:
+ regulator_disable(retimer->vdd33_supply);
+
+ return ret;
+}
+
+static void ps883x_disable_vregs(struct ps883x_retimer *retimer)
+{
+ regulator_disable(retimer->vddio_supply);
+ regulator_disable(retimer->vddat_supply);
+ regulator_disable(retimer->vddar_supply);
+ regulator_disable(retimer->vdd_supply);
+ regulator_disable(retimer->vdd33_cap_supply);
+ regulator_disable(retimer->vdd33_supply);
+}
+
+static int ps883x_get_vregs(struct ps883x_retimer *retimer)
+{
+ struct device *dev = &retimer->client->dev;
+
+ retimer->vdd_supply = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(retimer->vdd_supply))
+ return dev_err_probe(dev, PTR_ERR(retimer->vdd_supply),
+ "failed to get VDD\n");
+
+ retimer->vdd33_supply = devm_regulator_get(dev, "vdd33");
+ if (IS_ERR(retimer->vdd33_supply))
+ return dev_err_probe(dev, PTR_ERR(retimer->vdd33_supply),
+ "failed to get VDD 3.3V\n");
+
+ retimer->vdd33_cap_supply = devm_regulator_get(dev, "vdd33-cap");
+ if (IS_ERR(retimer->vdd33_cap_supply))
+ return dev_err_probe(dev, PTR_ERR(retimer->vdd33_cap_supply),
+ "failed to get VDD CAP 3.3V\n");
+
+ retimer->vddat_supply = devm_regulator_get(dev, "vddat");
+ if (IS_ERR(retimer->vddat_supply))
+ return dev_err_probe(dev, PTR_ERR(retimer->vddat_supply),
+ "failed to get VDD AT\n");
+
+ retimer->vddar_supply = devm_regulator_get(dev, "vddar");
+ if (IS_ERR(retimer->vddar_supply))
+ return dev_err_probe(dev, PTR_ERR(retimer->vddar_supply),
+ "failed to get VDD AR\n");
+
+ retimer->vddio_supply = devm_regulator_get(dev, "vddio");
+ if (IS_ERR(retimer->vddio_supply))
+ return dev_err_probe(dev, PTR_ERR(retimer->vddio_supply),
+ "failed to get VDD IO\n");
+
+ return 0;
+}
+
+static const struct regmap_config ps883x_retimer_regmap = {
+ .max_register = 0x1f,
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int ps883x_retimer_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct typec_switch_desc sw_desc = { };
+ struct typec_retimer_desc rtmr_desc = { };
+ struct ps883x_retimer *retimer;
+ unsigned int val;
+ int ret;
+
+ retimer = devm_kzalloc(dev, sizeof(*retimer), GFP_KERNEL);
+ if (!retimer)
+ return -ENOMEM;
+
+ retimer->client = client;
+
+ mutex_init(&retimer->lock);
+
+ retimer->regmap = devm_regmap_init_i2c(client, &ps883x_retimer_regmap);
+ if (IS_ERR(retimer->regmap))
+ return dev_err_probe(dev, PTR_ERR(retimer->regmap),
+ "failed to allocate register map\n");
+
+ ret = ps883x_get_vregs(retimer);
+ if (ret)
+ return ret;
+
+ retimer->xo_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(retimer->xo_clk))
+ return dev_err_probe(dev, PTR_ERR(retimer->xo_clk),
+ "failed to get xo clock\n");
+
+ retimer->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_ASIS);
+ if (IS_ERR(retimer->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(retimer->reset_gpio),
+ "failed to get reset gpio\n");
+
+ retimer->typec_switch = typec_switch_get(dev);
+ if (IS_ERR(retimer->typec_switch))
+ return dev_err_probe(dev, PTR_ERR(retimer->typec_switch),
+ "failed to acquire orientation-switch\n");
+
+ retimer->typec_mux = typec_mux_get(dev);
+ if (IS_ERR(retimer->typec_mux)) {
+ ret = dev_err_probe(dev, PTR_ERR(retimer->typec_mux),
+ "failed to acquire mode-mux\n");
+ goto err_switch_put;
+ }
+
+ ret = drm_aux_bridge_register(dev);
+ if (ret)
+ goto err_mux_put;
+
+ ret = ps883x_enable_vregs(retimer);
+ if (ret)
+ goto err_mux_put;
+
+ ret = clk_prepare_enable(retimer->xo_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable XO: %d\n", ret);
+ goto err_vregs_disable;
+ }
+
+ /* skip resetting if already configured */
+ if (regmap_test_bits(retimer->regmap, REG_USB_PORT_CONN_STATUS_0,
+ CONN_STATUS_0_CONNECTION_PRESENT) == 1) {
+ gpiod_direction_output(retimer->reset_gpio, 0);
+ } else {
+ gpiod_direction_output(retimer->reset_gpio, 1);
+
+ /* VDD IO supply enable to reset release delay */
+ usleep_range(4000, 14000);
+
+ gpiod_set_value(retimer->reset_gpio, 0);
+
+ /* firmware initialization delay */
+ msleep(60);
+
+ /* make sure device is accessible */
+ ret = regmap_read(retimer->regmap, REG_USB_PORT_CONN_STATUS_0,
+ &val);
+ if (ret) {
+ dev_err(dev, "failed to read conn_status_0: %d\n", ret);
+ if (ret == -ENXIO)
+ ret = -EIO;
+ goto err_clk_disable;
+ }
+ }
+
+ sw_desc.drvdata = retimer;
+ sw_desc.fwnode = dev_fwnode(dev);
+ sw_desc.set = ps883x_sw_set;
+
+ retimer->sw = typec_switch_register(dev, &sw_desc);
+ if (IS_ERR(retimer->sw)) {
+ ret = PTR_ERR(retimer->sw);
+ dev_err(dev, "failed to register typec switch: %d\n", ret);
+ goto err_clk_disable;
+ }
+
+ rtmr_desc.drvdata = retimer;
+ rtmr_desc.fwnode = dev_fwnode(dev);
+ rtmr_desc.set = ps883x_retimer_set;
+
+ retimer->retimer = typec_retimer_register(dev, &rtmr_desc);
+ if (IS_ERR(retimer->retimer)) {
+ ret = PTR_ERR(retimer->retimer);
+ dev_err(dev, "failed to register typec retimer: %d\n", ret);
+ goto err_switch_unregister;
+ }
+
+ return 0;
+
+err_switch_unregister:
+ typec_switch_unregister(retimer->sw);
+err_clk_disable:
+ clk_disable_unprepare(retimer->xo_clk);
+err_vregs_disable:
+ gpiod_set_value(retimer->reset_gpio, 1);
+ ps883x_disable_vregs(retimer);
+err_mux_put:
+ typec_mux_put(retimer->typec_mux);
+err_switch_put:
+ typec_switch_put(retimer->typec_switch);
+
+ return ret;
+}
+
+static void ps883x_retimer_remove(struct i2c_client *client)
+{
+ struct ps883x_retimer *retimer = i2c_get_clientdata(client);
+
+ typec_retimer_unregister(retimer->retimer);
+ typec_switch_unregister(retimer->sw);
+
+ gpiod_set_value(retimer->reset_gpio, 1);
+
+ clk_disable_unprepare(retimer->xo_clk);
+
+ ps883x_disable_vregs(retimer);
+
+ typec_mux_put(retimer->typec_mux);
+ typec_switch_put(retimer->typec_switch);
+}
+
+static const struct of_device_id ps883x_retimer_of_table[] = {
+ { .compatible = "parade,ps8830" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ps883x_retimer_of_table);
+
+static struct i2c_driver ps883x_retimer_driver = {
+ .driver = {
+ .name = "ps883x_retimer",
+ .of_match_table = ps883x_retimer_of_table,
+ },
+ .probe = ps883x_retimer_probe,
+ .remove = ps883x_retimer_remove,
+};
+
+module_i2c_driver(ps883x_retimer_driver);
+
+MODULE_DESCRIPTION("Parade ps883x Type-C Retimer driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/typec/ucsi/cros_ec_ucsi.c b/drivers/usb/typec/ucsi/cros_ec_ucsi.c
index c605c8616726..4ec1c6d22310 100644
--- a/drivers/usb/typec/ucsi/cros_ec_ucsi.c
+++ b/drivers/usb/typec/ucsi/cros_ec_ucsi.c
@@ -105,12 +105,13 @@ static int cros_ucsi_async_control(struct ucsi *ucsi, u64 cmd)
return 0;
}
-static int cros_ucsi_sync_control(struct ucsi *ucsi, u64 cmd)
+static int cros_ucsi_sync_control(struct ucsi *ucsi, u64 cmd, u32 *cci,
+ void *data, size_t size)
{
struct cros_ucsi_data *udata = ucsi_get_drvdata(ucsi);
int ret;
- ret = ucsi_sync_control_common(ucsi, cmd);
+ ret = ucsi_sync_control_common(ucsi, cmd, cci, data, size);
switch (ret) {
case -EBUSY:
/* EC may return -EBUSY if CCI.busy is set.
@@ -205,12 +206,19 @@ static int cros_ucsi_event(struct notifier_block *nb,
{
struct cros_ucsi_data *udata = container_of(nb, struct cros_ucsi_data, nb);
- if (!(host_event & PD_EVENT_PPM))
- return NOTIFY_OK;
+ if (host_event & PD_EVENT_INIT) {
+ /* Late init event received from ChromeOS EC. Treat this as a
+ * system resume to re-enable communication with the PPM.
+ */
+ dev_dbg(udata->dev, "Late PD init received\n");
+ ucsi_resume(udata->ucsi);
+ }
- dev_dbg(udata->dev, "UCSI notification received\n");
- flush_work(&udata->work);
- schedule_work(&udata->work);
+ if (host_event & PD_EVENT_PPM) {
+ dev_dbg(udata->dev, "UCSI notification received\n");
+ flush_work(&udata->work);
+ schedule_work(&udata->work);
+ }
return NOTIFY_OK;
}
diff --git a/drivers/usb/typec/ucsi/debugfs.c b/drivers/usb/typec/ucsi/debugfs.c
index 83ff23086d79..eae2b18a2d8a 100644
--- a/drivers/usb/typec/ucsi/debugfs.c
+++ b/drivers/usb/typec/ucsi/debugfs.c
@@ -28,11 +28,12 @@ static int ucsi_cmd(void *data, u64 val)
ucsi->debugfs->status = 0;
switch (UCSI_COMMAND(val)) {
- case UCSI_SET_UOM:
+ case UCSI_SET_CCOM:
case UCSI_SET_UOR:
case UCSI_SET_PDR:
case UCSI_CONNECTOR_RESET:
case UCSI_SET_SINK_PATH:
+ case UCSI_SET_NEW_CAM:
ret = ucsi_send_command(ucsi, val, NULL, 0);
break;
case UCSI_GET_CAPABILITY:
@@ -42,6 +43,9 @@ static int ucsi_cmd(void *data, u64 val)
case UCSI_GET_PDOS:
case UCSI_GET_CABLE_PROPERTY:
case UCSI_GET_CONNECTOR_STATUS:
+ case UCSI_GET_ERROR_STATUS:
+ case UCSI_GET_CAM_CS:
+ case UCSI_GET_LPM_PPM_INFO:
ret = ucsi_send_command(ucsi, val,
&ucsi->debugfs->response,
sizeof(ucsi->debugfs->response));
diff --git a/drivers/usb/typec/ucsi/trace.c b/drivers/usb/typec/ucsi/trace.c
index cb62ad835761..596a9542d401 100644
--- a/drivers/usb/typec/ucsi/trace.c
+++ b/drivers/usb/typec/ucsi/trace.c
@@ -12,7 +12,7 @@ static const char * const ucsi_cmd_strs[] = {
[UCSI_SET_NOTIFICATION_ENABLE] = "SET_NOTIFICATION_ENABLE",
[UCSI_GET_CAPABILITY] = "GET_CAPABILITY",
[UCSI_GET_CONNECTOR_CAPABILITY] = "GET_CONNECTOR_CAPABILITY",
- [UCSI_SET_UOM] = "SET_UOM",
+ [UCSI_SET_CCOM] = "SET_CCOM",
[UCSI_SET_UOR] = "SET_UOR",
[UCSI_SET_PDM] = "SET_PDM",
[UCSI_SET_PDR] = "SET_PDR",
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 2a2915b0a645..e8c7e9dc4930 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -55,7 +55,8 @@ void ucsi_notify_common(struct ucsi *ucsi, u32 cci)
}
EXPORT_SYMBOL_GPL(ucsi_notify_common);
-int ucsi_sync_control_common(struct ucsi *ucsi, u64 command)
+int ucsi_sync_control_common(struct ucsi *ucsi, u64 command, u32 *cci,
+ void *data, size_t size)
{
bool ack = UCSI_COMMAND(command) == UCSI_ACK_CC_CI;
int ret;
@@ -80,6 +81,13 @@ out_clear_bit:
else
clear_bit(COMMAND_PENDING, &ucsi->flags);
+ if (!ret && cci)
+ ret = ucsi->ops->read_cci(ucsi, cci);
+
+ if (!ret && data &&
+ (*cci & UCSI_CCI_COMMAND_COMPLETE))
+ ret = ucsi->ops->read_message_in(ucsi, data, size);
+
return ret;
}
EXPORT_SYMBOL_GPL(ucsi_sync_control_common);
@@ -95,7 +103,7 @@ static int ucsi_acknowledge(struct ucsi *ucsi, bool conn_ack)
ctrl |= UCSI_ACK_CONNECTOR_CHANGE;
}
- return ucsi->ops->sync_control(ucsi, ctrl);
+ return ucsi->ops->sync_control(ucsi, ctrl, NULL, NULL, 0);
}
static int ucsi_run_command(struct ucsi *ucsi, u64 command, u32 *cci,
@@ -108,9 +116,7 @@ static int ucsi_run_command(struct ucsi *ucsi, u64 command, u32 *cci,
if (size > UCSI_MAX_DATA_LENGTH(ucsi))
return -EINVAL;
- ret = ucsi->ops->sync_control(ucsi, command);
- if (ucsi->ops->read_cci(ucsi, cci))
- return -EIO;
+ ret = ucsi->ops->sync_control(ucsi, command, cci, data, size);
if (*cci & UCSI_CCI_BUSY)
return ucsi_run_command(ucsi, UCSI_CANCEL, cci, NULL, 0, false) ?: -EBUSY;
@@ -127,9 +133,6 @@ static int ucsi_run_command(struct ucsi *ucsi, u64 command, u32 *cci,
else
err = 0;
- if (!err && data && UCSI_CCI_LENGTH(*cci))
- err = ucsi->ops->read_message_in(ucsi, data, size);
-
/*
* Don't ACK connection change if there was an error.
*/
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index 28780acc4af2..3a2c1762bec1 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -79,7 +79,8 @@ struct ucsi_operations {
int (*read_cci)(struct ucsi *ucsi, u32 *cci);
int (*poll_cci)(struct ucsi *ucsi, u32 *cci);
int (*read_message_in)(struct ucsi *ucsi, void *val, size_t val_len);
- int (*sync_control)(struct ucsi *ucsi, u64 command);
+ int (*sync_control)(struct ucsi *ucsi, u64 command, u32 *cci,
+ void *data, size_t size);
int (*async_control)(struct ucsi *ucsi, u64 command);
bool (*update_altmodes)(struct ucsi *ucsi, struct ucsi_altmode *orig,
struct ucsi_altmode *updated);
@@ -108,7 +109,7 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num);
#define UCSI_GET_CAPABILITY_SIZE 128
#define UCSI_GET_CONNECTOR_CAPABILITY 0x07
#define UCSI_GET_CONNECTOR_CAPABILITY_SIZE 32
-#define UCSI_SET_UOM 0x08
+#define UCSI_SET_CCOM 0x08
#define UCSI_SET_UOR 0x09
#define UCSI_SET_PDM 0x0a
#define UCSI_SET_PDR 0x0b
@@ -123,7 +124,9 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num);
#define UCSI_GET_CONNECTOR_STATUS_SIZE 152
#define UCSI_GET_ERROR_STATUS 0x13
#define UCSI_GET_PD_MESSAGE 0x15
+#define UCSI_GET_CAM_CS 0x18
#define UCSI_SET_SINK_PATH 0x1c
+#define UCSI_GET_LPM_PPM_INFO 0x22
#define UCSI_CONNECTOR_NUMBER(_num_) ((u64)(_num_) << 16)
#define UCSI_COMMAND(_cmd_) ((_cmd_) & 0xff)
@@ -531,7 +534,8 @@ void ucsi_altmode_update_active(struct ucsi_connector *con);
int ucsi_resume(struct ucsi *ucsi);
void ucsi_notify_common(struct ucsi *ucsi, u32 cci);
-int ucsi_sync_control_common(struct ucsi *ucsi, u64 command);
+int ucsi_sync_control_common(struct ucsi *ucsi, u64 command, u32 *cci,
+ void *data, size_t size);
#if IS_ENABLED(CONFIG_POWER_SUPPLY)
int ucsi_register_port_psy(struct ucsi_connector *con);
diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
index ac1ebb5d9527..6b92f296e985 100644
--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
+++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
@@ -105,17 +105,23 @@ static const struct ucsi_operations ucsi_acpi_ops = {
.async_control = ucsi_acpi_async_control
};
-static int ucsi_gram_read_message_in(struct ucsi *ucsi, void *val, size_t val_len)
+static int ucsi_gram_sync_control(struct ucsi *ucsi, u64 command, u32 *cci,
+ void *val, size_t len)
{
u16 bogus_change = UCSI_CONSTAT_POWER_LEVEL_CHANGE |
UCSI_CONSTAT_PDOS_CHANGE;
struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
int ret;
- ret = ucsi_acpi_read_message_in(ucsi, val, val_len);
+ ret = ucsi_sync_control_common(ucsi, command, cci, val, len);
if (ret < 0)
return ret;
+ if (UCSI_COMMAND(ua->cmd) == UCSI_GET_PDOS &&
+ ua->cmd & UCSI_GET_PDOS_PARTNER_PDO(1) &&
+ ua->cmd & UCSI_GET_PDOS_SRC_PDOS)
+ ua->check_bogus_event = true;
+
if (UCSI_COMMAND(ua->cmd) == UCSI_GET_CONNECTOR_STATUS &&
ua->check_bogus_event) {
/* Clear the bogus change */
@@ -128,28 +134,11 @@ static int ucsi_gram_read_message_in(struct ucsi *ucsi, void *val, size_t val_le
return ret;
}
-static int ucsi_gram_sync_control(struct ucsi *ucsi, u64 command)
-{
- struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
- int ret;
-
- ret = ucsi_sync_control_common(ucsi, command);
- if (ret < 0)
- return ret;
-
- if (UCSI_COMMAND(ua->cmd) == UCSI_GET_PDOS &&
- ua->cmd & UCSI_GET_PDOS_PARTNER_PDO(1) &&
- ua->cmd & UCSI_GET_PDOS_SRC_PDOS)
- ua->check_bogus_event = true;
-
- return ret;
-}
-
static const struct ucsi_operations ucsi_gram_ops = {
.read_version = ucsi_acpi_read_version,
.read_cci = ucsi_acpi_read_cci,
.poll_cci = ucsi_acpi_poll_cci,
- .read_message_in = ucsi_gram_read_message_in,
+ .read_message_in = ucsi_acpi_read_message_in,
.sync_control = ucsi_gram_sync_control,
.async_control = ucsi_acpi_async_control
};
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
index 4b1668733a4b..f01e4ef6619d 100644
--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -222,7 +222,6 @@ struct ucsi_ccg {
u16 fw_build;
struct work_struct pm_work;
- u64 last_cmd_sent;
bool has_multiple_dp;
struct ucsi_ccg_altmode orig[UCSI_MAX_ALTMODES];
struct ucsi_ccg_altmode updated[UCSI_MAX_ALTMODES];
@@ -538,9 +537,10 @@ static void ucsi_ccg_update_set_new_cam_cmd(struct ucsi_ccg *uc,
* first and then vdo=0x3
*/
static void ucsi_ccg_nvidia_altmode(struct ucsi_ccg *uc,
- struct ucsi_altmode *alt)
+ struct ucsi_altmode *alt,
+ u64 command)
{
- switch (UCSI_ALTMODE_OFFSET(uc->last_cmd_sent)) {
+ switch (UCSI_ALTMODE_OFFSET(command)) {
case NVIDIA_FTB_DP_OFFSET:
if (alt[0].mid == USB_TYPEC_NVIDIA_VLINK_DBG_VDO)
alt[0].mid = USB_TYPEC_NVIDIA_VLINK_DP_VDO |
@@ -578,37 +578,11 @@ static int ucsi_ccg_read_cci(struct ucsi *ucsi, u32 *cci)
static int ucsi_ccg_read_message_in(struct ucsi *ucsi, void *val, size_t val_len)
{
struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
- struct ucsi_capability *cap;
- struct ucsi_altmode *alt;
spin_lock(&uc->op_lock);
memcpy(val, uc->op_data.message_in, val_len);
spin_unlock(&uc->op_lock);
- switch (UCSI_COMMAND(uc->last_cmd_sent)) {
- case UCSI_GET_CURRENT_CAM:
- if (uc->has_multiple_dp)
- ucsi_ccg_update_get_current_cam_cmd(uc, (u8 *)val);
- break;
- case UCSI_GET_ALTERNATE_MODES:
- if (UCSI_ALTMODE_RECIPIENT(uc->last_cmd_sent) ==
- UCSI_RECIPIENT_SOP) {
- alt = val;
- if (alt[0].svid == USB_TYPEC_NVIDIA_VLINK_SID)
- ucsi_ccg_nvidia_altmode(uc, alt);
- }
- break;
- case UCSI_GET_CAPABILITY:
- if (uc->fw_build == CCG_FW_BUILD_NVIDIA_TEGRA) {
- cap = val;
- cap->features &= ~UCSI_CAP_ALT_MODE_DETAILS;
- }
- break;
- default:
- break;
- }
- uc->last_cmd_sent = 0;
-
return 0;
}
@@ -628,7 +602,8 @@ static int ucsi_ccg_async_control(struct ucsi *ucsi, u64 command)
return ccg_write(uc, reg, (u8 *)&command, sizeof(command));
}
-static int ucsi_ccg_sync_control(struct ucsi *ucsi, u64 command)
+static int ucsi_ccg_sync_control(struct ucsi *ucsi, u64 command, u32 *cci,
+ void *data, size_t size)
{
struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
struct ucsi_connector *con;
@@ -638,11 +613,9 @@ static int ucsi_ccg_sync_control(struct ucsi *ucsi, u64 command)
mutex_lock(&uc->lock);
pm_runtime_get_sync(uc->dev);
- uc->last_cmd_sent = command;
-
- if (UCSI_COMMAND(uc->last_cmd_sent) == UCSI_SET_NEW_CAM &&
+ if (UCSI_COMMAND(command) == UCSI_SET_NEW_CAM &&
uc->has_multiple_dp) {
- con_index = (uc->last_cmd_sent >> 16) &
+ con_index = (command >> 16) &
UCSI_CMD_CONNECTOR_MASK;
if (con_index == 0) {
ret = -EINVAL;
@@ -652,7 +625,31 @@ static int ucsi_ccg_sync_control(struct ucsi *ucsi, u64 command)
ucsi_ccg_update_set_new_cam_cmd(uc, con, &command);
}
- ret = ucsi_sync_control_common(ucsi, command);
+ ret = ucsi_sync_control_common(ucsi, command, cci, data, size);
+
+ switch (UCSI_COMMAND(command)) {
+ case UCSI_GET_CURRENT_CAM:
+ if (uc->has_multiple_dp)
+ ucsi_ccg_update_get_current_cam_cmd(uc, (u8 *)data);
+ break;
+ case UCSI_GET_ALTERNATE_MODES:
+ if (UCSI_ALTMODE_RECIPIENT(command) == UCSI_RECIPIENT_SOP) {
+ struct ucsi_altmode *alt = data;
+
+ if (alt[0].svid == USB_TYPEC_NVIDIA_VLINK_SID)
+ ucsi_ccg_nvidia_altmode(uc, alt, command);
+ }
+ break;
+ case UCSI_GET_CAPABILITY:
+ if (uc->fw_build == CCG_FW_BUILD_NVIDIA_TEGRA) {
+ struct ucsi_capability *cap = data;
+
+ cap->features &= ~UCSI_CAP_ALT_MODE_DETAILS;
+ }
+ break;
+ default:
+ break;
+ }
err_put:
pm_runtime_put_sync(uc->dev);
@@ -1391,22 +1388,35 @@ static ssize_t do_flash_store(struct device *dev,
if (!flash)
return n;
- if (uc->fw_build == 0x0) {
- dev_err(dev, "fail to flash FW due to missing FW build info\n");
- return -EINVAL;
- }
-
schedule_work(&uc->work);
return n;
}
+static umode_t ucsi_ccg_attrs_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct ucsi_ccg *uc = i2c_get_clientdata(to_i2c_client(dev));
+
+ if (!uc->fw_build)
+ return 0;
+
+ return attr->mode;
+}
+
static DEVICE_ATTR_WO(do_flash);
static struct attribute *ucsi_ccg_attrs[] = {
&dev_attr_do_flash.attr,
NULL,
};
-ATTRIBUTE_GROUPS(ucsi_ccg);
+static struct attribute_group ucsi_ccg_attr_group = {
+ .attrs = ucsi_ccg_attrs,
+ .is_visible = ucsi_ccg_attrs_is_visible,
+};
+static const struct attribute_group *ucsi_ccg_groups[] = {
+ &ucsi_ccg_attr_group,
+ NULL,
+};
static int ucsi_ccg_probe(struct i2c_client *client)
{
@@ -1433,11 +1443,10 @@ static int ucsi_ccg_probe(struct i2c_client *client)
uc->fw_build = CCG_FW_BUILD_NVIDIA_TEGRA;
else if (!strcmp(fw_name, "nvidia,gpu"))
uc->fw_build = CCG_FW_BUILD_NVIDIA;
+ if (!uc->fw_build)
+ dev_err(uc->dev, "failed to get FW build information\n");
}
- if (!uc->fw_build)
- dev_err(uc->dev, "failed to get FW build information\n");
-
/* reset ccg device and initialize ucsi */
status = ucsi_ccg_init(uc);
if (status < 0) {
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index cc2007be2173..5b5fda617b80 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -407,8 +407,8 @@ static struct dentry *v9fs_vfs_mkdir_dotl(struct mnt_idmap *idmap,
err);
goto error;
}
- v9fs_fid_add(dentry, &fid);
v9fs_set_create_acl(inode, fid, dacl, pacl);
+ v9fs_fid_add(dentry, &fid);
d_instantiate(dentry, inode);
err = 0;
inc_nlink(dir);
diff --git a/fs/bcachefs/Kconfig b/fs/bcachefs/Kconfig
index c9798750202d..bf1c94e51dd0 100644
--- a/fs/bcachefs/Kconfig
+++ b/fs/bcachefs/Kconfig
@@ -26,6 +26,7 @@ config BCACHEFS_FS
select SRCU
select SYMBOLIC_ERRNAME
select MIN_HEAP
+ select XARRAY_MULTI
help
The bcachefs filesystem - a modern, copy on write filesystem, with
support for multiple devices, compression, checksumming, etc.
diff --git a/fs/bcachefs/acl.c b/fs/bcachefs/acl.c
index 99487727ae64..d03adc36100e 100644
--- a/fs/bcachefs/acl.c
+++ b/fs/bcachefs/acl.c
@@ -273,7 +273,7 @@ struct posix_acl *bch2_get_acl(struct inode *vinode, int type, bool rcu)
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct bch_hash_info hash = bch2_hash_info_init(c, &inode->ei_inode);
struct xattr_search_key search = X_SEARCH(acl_to_xattr_type(type), "", 0);
- struct btree_iter iter = { NULL };
+ struct btree_iter iter = {};
struct posix_acl *acl = NULL;
if (rcu)
@@ -344,7 +344,7 @@ int bch2_set_acl(struct mnt_idmap *idmap,
{
struct bch_inode_info *inode = to_bch_ei(dentry->d_inode);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct btree_iter inode_iter = { NULL };
+ struct btree_iter inode_iter = {};
struct bch_inode_unpacked inode_u;
struct posix_acl *acl;
umode_t mode;
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c
index c12ca7538e4f..94ea9e49aec4 100644
--- a/fs/bcachefs/alloc_background.c
+++ b/fs/bcachefs/alloc_background.c
@@ -610,7 +610,7 @@ int bch2_alloc_read(struct bch_fs *c)
* bch2_check_alloc_key() which runs later:
*/
if (!ca) {
- bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0));
+ bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode + 1, 0));
continue;
}
@@ -631,17 +631,17 @@ int bch2_alloc_read(struct bch_fs *c)
* bch2_check_alloc_key() which runs later:
*/
if (!ca) {
- bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0));
+ bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode + 1, 0));
continue;
}
if (k.k->p.offset < ca->mi.first_bucket) {
- bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode, ca->mi.first_bucket));
+ bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode, ca->mi.first_bucket));
continue;
}
if (k.k->p.offset >= ca->mi.nbuckets) {
- bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0));
+ bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode + 1, 0));
continue;
}
@@ -1039,9 +1039,10 @@ invalid_bucket:
* This synthesizes deleted extents for holes, similar to BTREE_ITER_slots for
* extents style btrees, but works on non-extents btrees:
*/
-static struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos end, struct bkey *hole)
+static struct bkey_s_c bch2_get_key_or_hole(struct btree_trans *trans, struct btree_iter *iter,
+ struct bpos end, struct bkey *hole)
{
- struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
+ struct bkey_s_c k = bch2_btree_iter_peek_slot(trans, iter);
if (bkey_err(k))
return k;
@@ -1052,9 +1053,9 @@ static struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos
struct btree_iter iter2;
struct bpos next;
- bch2_trans_copy_iter(&iter2, iter);
+ bch2_trans_copy_iter(trans, &iter2, iter);
- struct btree_path *path = btree_iter_path(iter->trans, iter);
+ struct btree_path *path = btree_iter_path(trans, iter);
if (!bpos_eq(path->l[0].b->key.k.p, SPOS_MAX))
end = bkey_min(end, bpos_nosnap_successor(path->l[0].b->key.k.p));
@@ -1064,9 +1065,9 @@ static struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos
* btree node min/max is a closed interval, upto takes a half
* open interval:
*/
- k = bch2_btree_iter_peek_max(&iter2, end);
+ k = bch2_btree_iter_peek_max(trans, &iter2, end);
next = iter2.pos;
- bch2_trans_iter_exit(iter->trans, &iter2);
+ bch2_trans_iter_exit(trans, &iter2);
BUG_ON(next.offset >= iter->pos.offset + U32_MAX);
@@ -1107,13 +1108,14 @@ static bool next_bucket(struct bch_fs *c, struct bch_dev **ca, struct bpos *buck
return *ca != NULL;
}
-static struct bkey_s_c bch2_get_key_or_real_bucket_hole(struct btree_iter *iter,
- struct bch_dev **ca, struct bkey *hole)
+static struct bkey_s_c bch2_get_key_or_real_bucket_hole(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bch_dev **ca, struct bkey *hole)
{
- struct bch_fs *c = iter->trans->c;
+ struct bch_fs *c = trans->c;
struct bkey_s_c k;
again:
- k = bch2_get_key_or_hole(iter, POS_MAX, hole);
+ k = bch2_get_key_or_hole(trans, iter, POS_MAX, hole);
if (bkey_err(k))
return k;
@@ -1126,7 +1128,7 @@ again:
if (!next_bucket(c, ca, &hole_start))
return bkey_s_c_null;
- bch2_btree_iter_set_pos(iter, hole_start);
+ bch2_btree_iter_set_pos(trans, iter, hole_start);
goto again;
}
@@ -1167,8 +1169,8 @@ int bch2_check_alloc_key(struct btree_trans *trans,
a = bch2_alloc_to_v4(alloc_k, &a_convert);
- bch2_btree_iter_set_pos(discard_iter, alloc_k.k->p);
- k = bch2_btree_iter_peek_slot(discard_iter);
+ bch2_btree_iter_set_pos(trans, discard_iter, alloc_k.k->p);
+ k = bch2_btree_iter_peek_slot(trans, discard_iter);
ret = bkey_err(k);
if (ret)
goto err;
@@ -1181,8 +1183,8 @@ int bch2_check_alloc_key(struct btree_trans *trans,
goto err;
}
- bch2_btree_iter_set_pos(freespace_iter, alloc_freespace_pos(alloc_k.k->p, *a));
- k = bch2_btree_iter_peek_slot(freespace_iter);
+ bch2_btree_iter_set_pos(trans, freespace_iter, alloc_freespace_pos(alloc_k.k->p, *a));
+ k = bch2_btree_iter_peek_slot(trans, freespace_iter);
ret = bkey_err(k);
if (ret)
goto err;
@@ -1195,8 +1197,8 @@ int bch2_check_alloc_key(struct btree_trans *trans,
goto err;
}
- bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(alloc_k.k->p, &gens_offset));
- k = bch2_btree_iter_peek_slot(bucket_gens_iter);
+ bch2_btree_iter_set_pos(trans, bucket_gens_iter, alloc_gens_pos(alloc_k.k->p, &gens_offset));
+ k = bch2_btree_iter_peek_slot(trans, bucket_gens_iter);
ret = bkey_err(k);
if (ret)
goto err;
@@ -1249,9 +1251,9 @@ int bch2_check_alloc_hole_freespace(struct btree_trans *trans,
if (!ca->mi.freespace_initialized)
return 0;
- bch2_btree_iter_set_pos(freespace_iter, start);
+ bch2_btree_iter_set_pos(trans, freespace_iter, start);
- k = bch2_btree_iter_peek_slot(freespace_iter);
+ k = bch2_btree_iter_peek_slot(trans, freespace_iter);
ret = bkey_err(k);
if (ret)
goto err;
@@ -1300,9 +1302,9 @@ int bch2_check_alloc_hole_bucket_gens(struct btree_trans *trans,
unsigned i, gens_offset, gens_end_offset;
int ret;
- bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(start, &gens_offset));
+ bch2_btree_iter_set_pos(trans, bucket_gens_iter, alloc_gens_pos(start, &gens_offset));
- k = bch2_btree_iter_peek_slot(bucket_gens_iter);
+ k = bch2_btree_iter_peek_slot(trans, bucket_gens_iter);
ret = bkey_err(k);
if (ret)
goto err;
@@ -1435,7 +1437,7 @@ int bch2_check_discard_freespace_key(struct btree_trans *trans, struct btree_ite
*gen = a->gen;
out:
fsck_err:
- bch2_set_btree_iter_dontneed(&alloc_iter);
+ bch2_set_btree_iter_dontneed(trans, &alloc_iter);
bch2_trans_iter_exit(trans, &alloc_iter);
printbuf_exit(&buf);
return ret;
@@ -1572,7 +1574,7 @@ int bch2_check_alloc_info(struct bch_fs *c)
bch2_trans_begin(trans);
- k = bch2_get_key_or_real_bucket_hole(&iter, &ca, &hole);
+ k = bch2_get_key_or_real_bucket_hole(trans, &iter, &ca, &hole);
ret = bkey_err(k);
if (ret)
goto bkey_err;
@@ -1610,7 +1612,7 @@ int bch2_check_alloc_info(struct bch_fs *c)
if (ret)
goto bkey_err;
- bch2_btree_iter_set_pos(&iter, next);
+ bch2_btree_iter_set_pos(trans, &iter, next);
bkey_err:
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
continue;
@@ -1638,7 +1640,7 @@ bkey_err:
BTREE_ITER_prefetch);
while (1) {
bch2_trans_begin(trans);
- k = bch2_btree_iter_peek(&iter);
+ k = bch2_btree_iter_peek(trans, &iter);
if (!k.k)
break;
@@ -1657,7 +1659,7 @@ bkey_err:
break;
}
- bch2_btree_iter_set_pos(&iter, bpos_nosnap_successor(iter.pos));
+ bch2_btree_iter_set_pos(trans, &iter, bpos_nosnap_successor(iter.pos));
}
bch2_trans_iter_exit(trans, &iter);
if (ret)
@@ -1685,7 +1687,7 @@ static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
struct printbuf buf = PRINTBUF;
int ret;
- alloc_k = bch2_btree_iter_peek(alloc_iter);
+ alloc_k = bch2_btree_iter_peek(trans, alloc_iter);
if (!alloc_k.k)
return 0;
@@ -1826,7 +1828,7 @@ static int bch2_discard_one_bucket(struct btree_trans *trans,
{
struct bch_fs *c = trans->c;
struct bpos pos = need_discard_iter->pos;
- struct btree_iter iter = { NULL };
+ struct btree_iter iter = {};
struct bkey_s_c k;
struct bkey_i_alloc_v4 *a;
struct printbuf buf = PRINTBUF;
@@ -1950,7 +1952,7 @@ static void bch2_do_discards_work(struct work_struct *work)
trace_discard_buckets(c, s.seen, s.open, s.need_journal_commit, s.discarded,
bch2_err_str(ret));
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[WRITE]);
bch2_write_ref_put(c, BCH_WRITE_REF_discard);
}
@@ -1967,7 +1969,7 @@ void bch2_dev_do_discards(struct bch_dev *ca)
if (queue_work(c->write_ref_wq, &ca->discard_work))
return;
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[WRITE]);
put_write_ref:
bch2_write_ref_put(c, BCH_WRITE_REF_discard);
}
@@ -2045,7 +2047,7 @@ static void bch2_do_discards_fast_work(struct work_struct *work)
trace_discard_buckets_fast(c, s.seen, s.open, s.need_journal_commit, s.discarded, bch2_err_str(ret));
bch2_trans_put(trans);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[WRITE]);
bch2_write_ref_put(c, BCH_WRITE_REF_discard_fast);
}
@@ -2065,7 +2067,7 @@ static void bch2_discard_one_bucket_fast(struct bch_dev *ca, u64 bucket)
if (queue_work(c->write_ref_wq, &ca->discard_fast_work))
return;
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[WRITE]);
put_ref:
bch2_write_ref_put(c, BCH_WRITE_REF_discard_fast);
}
@@ -2082,6 +2084,9 @@ static int invalidate_one_bp(struct btree_trans *trans,
if (ret)
return ret;
+ if (!extent_k.k)
+ return 0;
+
struct bkey_i *n =
bch2_bkey_make_mut(trans, &extent_iter, &extent_k,
BTREE_UPDATE_internal_snapshot_node);
@@ -2199,9 +2204,9 @@ static struct bkey_s_c next_lru_key(struct btree_trans *trans, struct btree_iter
{
struct bkey_s_c k;
again:
- k = bch2_btree_iter_peek_max(iter, lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX));
+ k = bch2_btree_iter_peek_max(trans, iter, lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX));
if (!k.k && !*wrapped) {
- bch2_btree_iter_set_pos(iter, lru_pos(ca->dev_idx, 0, 0));
+ bch2_btree_iter_set_pos(trans, iter, lru_pos(ca->dev_idx, 0, 0));
*wrapped = true;
goto again;
}
@@ -2251,12 +2256,12 @@ restart_err:
if (ret)
break;
- bch2_btree_iter_advance(&iter);
+ bch2_btree_iter_advance(trans, &iter);
}
bch2_trans_iter_exit(trans, &iter);
err:
bch2_trans_put(trans);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[WRITE]);
bch2_bkey_buf_exit(&last_flushed, c);
bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
}
@@ -2274,7 +2279,7 @@ void bch2_dev_do_invalidates(struct bch_dev *ca)
if (queue_work(c->write_ref_wq, &ca->invalidate_work))
return;
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[WRITE]);
put_ref:
bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
}
@@ -2321,7 +2326,7 @@ int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca,
break;
}
- k = bch2_get_key_or_hole(&iter, end, &hole);
+ k = bch2_get_key_or_hole(trans, &iter, end, &hole);
ret = bkey_err(k);
if (ret)
goto bkey_err;
@@ -2340,7 +2345,7 @@ int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca,
if (ret)
goto bkey_err;
- bch2_btree_iter_advance(&iter);
+ bch2_btree_iter_advance(trans, &iter);
} else {
struct bkey_i *freespace;
@@ -2360,7 +2365,7 @@ int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca,
if (ret)
goto bkey_err;
- bch2_btree_iter_set_pos(&iter, k.k->p);
+ bch2_btree_iter_set_pos(trans, &iter, k.k->p);
}
bkey_err:
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
@@ -2506,7 +2511,7 @@ void bch2_recalc_capacity(struct bch_fs *c)
bch2_set_ra_pages(c, ra_pages);
- for_each_rw_member(c, ca) {
+ __for_each_online_member(c, ca, BIT(BCH_MEMBER_STATE_rw), READ) {
u64 dev_reserve = 0;
/*
diff --git a/fs/bcachefs/alloc_background.h b/fs/bcachefs/alloc_background.h
index c556ccaffe89..34b3d6ac4fbb 100644
--- a/fs/bcachefs/alloc_background.h
+++ b/fs/bcachefs/alloc_background.h
@@ -321,11 +321,11 @@ static inline u64 should_invalidate_buckets(struct bch_dev *ca,
{
u64 want_free = ca->mi.nbuckets >> 7;
u64 free = max_t(s64, 0,
- u.d[BCH_DATA_free].buckets
- + u.d[BCH_DATA_need_discard].buckets
+ u.buckets[BCH_DATA_free]
+ + u.buckets[BCH_DATA_need_discard]
- bch2_dev_buckets_reserved(ca, BCH_WATERMARK_stripe));
- return clamp_t(s64, want_free - free, 0, u.d[BCH_DATA_cached].buckets);
+ return clamp_t(s64, want_free - free, 0, u.buckets[BCH_DATA_cached]);
}
void bch2_dev_do_invalidates(struct bch_dev *);
diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c
index da0d72928b5b..7c930ef77380 100644
--- a/fs/bcachefs/alloc_foreground.c
+++ b/fs/bcachefs/alloc_foreground.c
@@ -327,7 +327,7 @@ again:
bucket = sector_to_bucket(ca,
round_up(bucket_to_sector(ca, bucket) + 1,
1ULL << ca->mi.btree_bitmap_shift));
- bch2_btree_iter_set_pos(&iter, POS(ca->dev_idx, bucket));
+ bch2_btree_iter_set_pos(trans, &iter, POS(ca->dev_idx, bucket));
s->buckets_seen++;
s->skipped_mi_btree_bitmap++;
continue;
@@ -355,7 +355,7 @@ again:
watermark, s, cl)
: NULL;
next:
- bch2_set_btree_iter_dontneed(&citer);
+ bch2_set_btree_iter_dontneed(trans, &citer);
bch2_trans_iter_exit(trans, &citer);
if (ob)
break;
@@ -417,7 +417,7 @@ again:
1ULL << ca->mi.btree_bitmap_shift));
alloc_cursor = bucket|(iter.pos.offset & (~0ULL << 56));
- bch2_btree_iter_set_pos(&iter, POS(ca->dev_idx, alloc_cursor));
+ bch2_btree_iter_set_pos(trans, &iter, POS(ca->dev_idx, alloc_cursor));
s->skipped_mi_btree_bitmap++;
goto next;
}
@@ -426,7 +426,7 @@ again:
if (ob) {
if (!IS_ERR(ob))
*dev_alloc_cursor = iter.pos.offset;
- bch2_set_btree_iter_dontneed(&iter);
+ bch2_set_btree_iter_dontneed(trans, &iter);
break;
}
@@ -469,7 +469,7 @@ static noinline void trace_bucket_alloc2(struct bch_fs *c, struct bch_dev *ca,
prt_printf(&buf, "watermark\t%s\n", bch2_watermarks[watermark]);
prt_printf(&buf, "data type\t%s\n", __bch2_data_types[data_type]);
prt_printf(&buf, "blocking\t%u\n", cl != NULL);
- prt_printf(&buf, "free\t%llu\n", usage->d[BCH_DATA_free].buckets);
+ prt_printf(&buf, "free\t%llu\n", usage->buckets[BCH_DATA_free]);
prt_printf(&buf, "avail\t%llu\n", dev_buckets_free(ca, *usage, watermark));
prt_printf(&buf, "copygc_wait\t%lu/%lli\n",
bch2_copygc_wait_amount(c),
@@ -524,10 +524,10 @@ again:
bch2_dev_usage_read_fast(ca, usage);
avail = dev_buckets_free(ca, *usage, watermark);
- if (usage->d[BCH_DATA_need_discard].buckets > avail)
+ if (usage->buckets[BCH_DATA_need_discard] > avail)
bch2_dev_do_discards(ca);
- if (usage->d[BCH_DATA_need_gc_gens].buckets > avail)
+ if (usage->buckets[BCH_DATA_need_gc_gens] > avail)
bch2_gc_gens_async(c);
if (should_invalidate_buckets(ca, *usage))
@@ -606,8 +606,7 @@ struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
static int __dev_stripe_cmp(struct dev_stripe_state *stripe,
unsigned l, unsigned r)
{
- return ((stripe->next_alloc[l] > stripe->next_alloc[r]) -
- (stripe->next_alloc[l] < stripe->next_alloc[r]));
+ return cmp_int(stripe->next_alloc[l], stripe->next_alloc[r]);
}
#define dev_stripe_cmp(l, r) __dev_stripe_cmp(stripe, l, r)
@@ -626,25 +625,62 @@ struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *c,
return ret;
}
+static const u64 stripe_clock_hand_rescale = 1ULL << 62; /* trigger rescale at */
+static const u64 stripe_clock_hand_max = 1ULL << 56; /* max after rescale */
+static const u64 stripe_clock_hand_inv = 1ULL << 52; /* max increment, if a device is empty */
+
+static noinline void bch2_stripe_state_rescale(struct dev_stripe_state *stripe)
+{
+ /*
+ * Avoid underflowing clock hands if at all possible, if clock hands go
+ * to 0 then we lose information - clock hands can be in a wide range if
+ * we have devices we rarely try to allocate from, if we generally
+ * allocate from a specified target but only sometimes have to fall back
+ * to the whole filesystem.
+ */
+ u64 scale_max = U64_MAX; /* maximum we can subtract without underflow */
+ u64 scale_min = 0; /* minumum we must subtract to avoid overflow */
+
+ for (u64 *v = stripe->next_alloc;
+ v < stripe->next_alloc + ARRAY_SIZE(stripe->next_alloc); v++) {
+ if (*v)
+ scale_max = min(scale_max, *v);
+ if (*v > stripe_clock_hand_max)
+ scale_min = max(scale_min, *v - stripe_clock_hand_max);
+ }
+
+ u64 scale = max(scale_min, scale_max);
+
+ for (u64 *v = stripe->next_alloc;
+ v < stripe->next_alloc + ARRAY_SIZE(stripe->next_alloc); v++)
+ *v = *v < scale ? 0 : *v - scale;
+}
+
static inline void bch2_dev_stripe_increment_inlined(struct bch_dev *ca,
struct dev_stripe_state *stripe,
struct bch_dev_usage *usage)
{
+ /*
+ * Stripe state has a per device clock hand: we allocate from the device
+ * with the smallest clock hand.
+ *
+ * When we allocate, we don't do a simple increment; we add the inverse
+ * of the device's free space. This results in round robin behavior that
+ * biases in favor of the device(s) with more free space.
+ */
+
u64 *v = stripe->next_alloc + ca->dev_idx;
u64 free_space = __dev_buckets_available(ca, *usage, BCH_WATERMARK_normal);
u64 free_space_inv = free_space
- ? div64_u64(1ULL << 48, free_space)
- : 1ULL << 48;
- u64 scale = *v / 4;
+ ? div64_u64(stripe_clock_hand_inv, free_space)
+ : stripe_clock_hand_inv;
- if (*v + free_space_inv >= *v)
- *v += free_space_inv;
- else
- *v = U64_MAX;
+ /* Saturating add, avoid overflow: */
+ u64 sum = *v + free_space_inv;
+ *v = sum >= *v ? sum : U64_MAX;
- for (v = stripe->next_alloc;
- v < stripe->next_alloc + ARRAY_SIZE(stripe->next_alloc); v++)
- *v = *v < scale ? 0 : *v - scale;
+ if (unlikely(*v > stripe_clock_hand_rescale))
+ bch2_stripe_state_rescale(stripe);
}
void bch2_dev_stripe_increment(struct bch_dev *ca,
@@ -1633,7 +1669,7 @@ void bch2_fs_alloc_debug_to_text(struct printbuf *out, struct bch_fs *c)
void bch2_dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
{
struct bch_fs *c = ca->fs;
- struct bch_dev_usage stats = bch2_dev_usage_read(ca);
+ struct bch_dev_usage_full stats = bch2_dev_usage_full_read(ca);
unsigned nr[BCH_DATA_NR];
memset(nr, 0, sizeof(nr));
@@ -1656,7 +1692,8 @@ void bch2_dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
printbuf_tabstop_push(out, 16);
prt_printf(out, "open buckets\t%i\r\n", ca->nr_open_buckets);
- prt_printf(out, "buckets to invalidate\t%llu\r\n", should_invalidate_buckets(ca, stats));
+ prt_printf(out, "buckets to invalidate\t%llu\r\n",
+ should_invalidate_buckets(ca, bch2_dev_usage_read(ca)));
}
static noinline void bch2_print_allocator_stuck(struct bch_fs *c)
diff --git a/fs/bcachefs/backpointers.c b/fs/bcachefs/backpointers.c
index 21d1d86d5008..ff26bb515150 100644
--- a/fs/bcachefs/backpointers.c
+++ b/fs/bcachefs/backpointers.c
@@ -252,12 +252,24 @@ struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *trans,
0,
bp.v->level,
iter_flags);
- struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
+ struct bkey_s_c k = bch2_btree_iter_peek_slot(trans, iter);
if (bkey_err(k)) {
bch2_trans_iter_exit(trans, iter);
return k;
}
+ /*
+ * peek_slot() doesn't normally return NULL - except when we ask for a
+ * key at a btree level that doesn't exist.
+ *
+ * We may want to revisit this and change peek_slot():
+ */
+ if (!k.k) {
+ bkey_init(&iter->k);
+ iter->k.p = bp.v->pos;
+ k.k = &iter->k;
+ }
+
if (k.k &&
extent_matches_bp(c, bp.v->btree_id, bp.v->level, k, bp))
return k;
@@ -293,7 +305,7 @@ struct btree *bch2_backpointer_get_node(struct btree_trans *trans,
0,
bp.v->level - 1,
0);
- struct btree *b = bch2_btree_iter_peek_node(iter);
+ struct btree *b = bch2_btree_iter_peek_node(trans, iter);
if (IS_ERR_OR_NULL(b))
goto err;
@@ -321,7 +333,7 @@ static int bch2_check_backpointer_has_valid_bucket(struct btree_trans *trans, st
return 0;
struct bch_fs *c = trans->c;
- struct btree_iter alloc_iter = { NULL };
+ struct btree_iter alloc_iter = {};
struct bkey_s_c alloc_k;
struct printbuf buf = PRINTBUF;
int ret = 0;
@@ -462,7 +474,7 @@ err:
if (bio)
bio_put(bio);
kvfree(data_buf);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
printbuf_exit(&buf);
return ret;
}
@@ -650,7 +662,7 @@ static int check_btree_root_to_backpointers(struct btree_trans *trans,
retry:
bch2_trans_node_iter_init(trans, &iter, btree_id, POS_MIN,
0, bch2_btree_id_root(c, btree_id)->b->c.level, 0);
- b = bch2_btree_iter_peek_node(&iter);
+ b = bch2_btree_iter_peek_node(trans, &iter);
ret = PTR_ERR_OR_ZERO(b);
if (ret)
goto err;
@@ -934,7 +946,7 @@ static int btree_node_get_and_pin(struct btree_trans *trans, struct bkey_i *k,
{
struct btree_iter iter;
bch2_trans_node_iter_init(trans, &iter, btree, k->k.p, 0, level, 0);
- struct btree *b = bch2_btree_iter_peek_node(&iter);
+ struct btree *b = bch2_btree_iter_peek_node(trans, &iter);
int ret = PTR_ERR_OR_ZERO(b);
if (ret)
goto err;
diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h
index f52311017aee..5d9f208a1bb7 100644
--- a/fs/bcachefs/bcachefs.h
+++ b/fs/bcachefs/bcachefs.h
@@ -524,8 +524,8 @@ struct bch_dev {
struct percpu_ref ref;
#endif
struct completion ref_completion;
- struct percpu_ref io_ref;
- struct completion io_ref_completion;
+ struct percpu_ref io_ref[2];
+ struct completion io_ref_completion[2];
struct bch_fs *fs;
@@ -562,7 +562,8 @@ struct bch_dev {
unsigned long *bucket_backpointer_mismatches;
unsigned long *bucket_backpointer_empty;
- struct bch_dev_usage __percpu *usage;
+ struct bch_dev_usage_full __percpu
+ *usage;
/* Allocator: */
u64 alloc_cursor[3];
diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c
index 2025d408979c..7b98ba2dec64 100644
--- a/fs/bcachefs/btree_gc.c
+++ b/fs/bcachefs/btree_gc.c
@@ -691,7 +691,7 @@ retry_root:
struct btree_iter iter;
bch2_trans_node_iter_init(trans, &iter, btree, POS_MIN,
0, bch2_btree_id_root(c, btree)->b->c.level, 0);
- struct btree *b = bch2_btree_iter_peek_node(&iter);
+ struct btree *b = bch2_btree_iter_peek_node(trans, &iter);
ret = PTR_ERR_OR_ZERO(b);
if (ret)
goto err_root;
@@ -1199,7 +1199,7 @@ int bch2_gc_gens(struct bch_fs *c)
BCH_TRANS_COMMIT_no_enospc, ({
ca = bch2_dev_iterate(c, ca, k.k->p.inode);
if (!ca) {
- bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0));
+ bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode + 1, 0));
continue;
}
bch2_alloc_write_oldest_gen(trans, ca, &iter, k);
diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c
index 1d94a2bf706d..5fd4a58d2ad2 100644
--- a/fs/bcachefs/btree_io.c
+++ b/fs/bcachefs/btree_io.c
@@ -1353,7 +1353,7 @@ start:
"btree read error %s for %s",
bch2_blk_status_to_str(bio->bi_status), buf.buf);
if (rb->have_ioref)
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
rb->have_ioref = false;
bch2_mark_io_failure(&failed, &rb->pick, false);
@@ -1609,6 +1609,7 @@ static void btree_node_read_all_replicas_endio(struct bio *bio)
struct bch_dev *ca = bch2_dev_have_ref(c, rb->pick.ptr.dev);
bch2_latency_acct(ca, rb->start_time, READ);
+ percpu_ref_put(&ca->io_ref[READ]);
}
ra->err[rb->idx] = bio->bi_status;
@@ -1908,7 +1909,8 @@ static void btree_node_scrub_work(struct work_struct *work)
scrub->key.k->k.p, 0, scrub->level - 1, 0);
struct btree *b;
- int ret = lockrestart_do(trans, PTR_ERR_OR_ZERO(b = bch2_btree_iter_peek_node(&iter)));
+ int ret = lockrestart_do(trans,
+ PTR_ERR_OR_ZERO(b = bch2_btree_iter_peek_node(trans, &iter)));
if (ret)
goto err;
@@ -1927,7 +1929,7 @@ err:
printbuf_exit(&err);
bch2_bkey_buf_exit(&scrub->key, c);;
btree_bounce_free(c, c->opts.btree_node_size, scrub->used_mempool, scrub->buf);
- percpu_ref_put(&scrub->ca->io_ref);
+ percpu_ref_put(&scrub->ca->io_ref[READ]);
kfree(scrub);
bch2_write_ref_put(c, BCH_WRITE_REF_btree_node_scrub);
}
@@ -1996,7 +1998,7 @@ int bch2_btree_node_scrub(struct btree_trans *trans,
return 0;
err_free:
btree_bounce_free(c, c->opts.btree_node_size, used_mempool, buf);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
err:
bch2_write_ref_put(c, BCH_WRITE_REF_btree_node_scrub);
return ret;
@@ -2144,6 +2146,7 @@ static void btree_node_write_endio(struct bio *bio)
if (ca && bio->bi_status) {
struct printbuf buf = PRINTBUF;
+ buf.atomic++;
prt_printf(&buf, "btree write error: %s\n ",
bch2_blk_status_to_str(bio->bi_status));
bch2_btree_pos_to_text(&buf, c, b);
@@ -2158,8 +2161,12 @@ static void btree_node_write_endio(struct bio *bio)
spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
}
+ /*
+ * XXX: we should be using io_ref[WRITE], but we aren't retrying failed
+ * btree writes yet (due to device removal/ro):
+ */
if (wbio->have_ioref)
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
if (parent) {
bio_put(bio);
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index a9c110b846b5..e34e9598ef25 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -244,10 +244,8 @@ void bch2_trans_verify_paths(struct btree_trans *trans)
bch2_btree_path_verify(trans, path);
}
-static void bch2_btree_iter_verify(struct btree_iter *iter)
+static void bch2_btree_iter_verify(struct btree_trans *trans, struct btree_iter *iter)
{
- struct btree_trans *trans = iter->trans;
-
BUG_ON(!!(iter->flags & BTREE_ITER_cached) != btree_iter_path(trans, iter)->cached);
BUG_ON((iter->flags & BTREE_ITER_is_extents) &&
@@ -276,9 +274,9 @@ static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
bkey_gt(iter->pos, iter->k.p)));
}
-static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k)
+static int bch2_btree_iter_verify_ret(struct btree_trans *trans,
+ struct btree_iter *iter, struct bkey_s_c k)
{
- struct btree_trans *trans = iter->trans;
struct btree_iter copy;
struct bkey_s_c prev;
int ret = 0;
@@ -299,7 +297,7 @@ static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k
bch2_trans_iter_init(trans, &copy, iter->btree_id, iter->pos,
BTREE_ITER_nopreserve|
BTREE_ITER_all_snapshots);
- prev = bch2_btree_iter_prev(&copy);
+ prev = bch2_btree_iter_prev(trans, &copy);
if (!prev.k)
goto out;
@@ -365,9 +363,11 @@ static inline void bch2_btree_path_verify_level(struct btree_trans *trans,
struct btree_path *path, unsigned l) {}
static inline void bch2_btree_path_verify(struct btree_trans *trans,
struct btree_path *path) {}
-static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
+static inline void bch2_btree_iter_verify(struct btree_trans *trans,
+ struct btree_iter *iter) {}
static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
-static inline int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { return 0; }
+static inline int bch2_btree_iter_verify_ret(struct btree_trans *trans, struct btree_iter *iter,
+ struct bkey_s_c k) { return 0; }
#endif
@@ -1855,10 +1855,8 @@ hole:
return (struct bkey_s_c) { u, NULL };
}
-void bch2_set_btree_iter_dontneed(struct btree_iter *iter)
+void bch2_set_btree_iter_dontneed(struct btree_trans *trans, struct btree_iter *iter)
{
- struct btree_trans *trans = iter->trans;
-
if (!iter->path || trans->restarted)
return;
@@ -1870,17 +1868,14 @@ void bch2_set_btree_iter_dontneed(struct btree_iter *iter)
/* Btree iterators: */
int __must_check
-__bch2_btree_iter_traverse(struct btree_iter *iter)
+__bch2_btree_iter_traverse(struct btree_trans *trans, struct btree_iter *iter)
{
- return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
+ return bch2_btree_path_traverse(trans, iter->path, iter->flags);
}
int __must_check
-bch2_btree_iter_traverse(struct btree_iter *iter)
+bch2_btree_iter_traverse(struct btree_trans *trans, struct btree_iter *iter)
{
- struct btree_trans *trans = iter->trans;
- int ret;
-
bch2_trans_verify_not_unlocked_or_in_restart(trans);
iter->path = bch2_btree_path_set_pos(trans, iter->path,
@@ -1888,7 +1883,7 @@ bch2_btree_iter_traverse(struct btree_iter *iter)
iter->flags & BTREE_ITER_intent,
btree_iter_ip_allocated(iter));
- ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
+ int ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
if (ret)
return ret;
@@ -1900,14 +1895,14 @@ bch2_btree_iter_traverse(struct btree_iter *iter)
/* Iterate across nodes (leaf and interior nodes) */
-struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
+struct btree *bch2_btree_iter_peek_node(struct btree_trans *trans,
+ struct btree_iter *iter)
{
- struct btree_trans *trans = iter->trans;
struct btree *b = NULL;
int ret;
EBUG_ON(trans->paths[iter->path].cached);
- bch2_btree_iter_verify(iter);
+ bch2_btree_iter_verify(trans, iter);
ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
if (ret)
@@ -1929,7 +1924,7 @@ struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
out:
bch2_btree_iter_verify_entry_exit(iter);
- bch2_btree_iter_verify(iter);
+ bch2_btree_iter_verify(trans, iter);
return b;
err:
@@ -1938,26 +1933,26 @@ err:
}
/* Only kept for -tools */
-struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *iter)
+struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_trans *trans,
+ struct btree_iter *iter)
{
struct btree *b;
- while (b = bch2_btree_iter_peek_node(iter),
+ while (b = bch2_btree_iter_peek_node(trans, iter),
bch2_err_matches(PTR_ERR_OR_ZERO(b), BCH_ERR_transaction_restart))
- bch2_trans_begin(iter->trans);
+ bch2_trans_begin(trans);
return b;
}
-struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
+struct btree *bch2_btree_iter_next_node(struct btree_trans *trans, struct btree_iter *iter)
{
- struct btree_trans *trans = iter->trans;
struct btree *b = NULL;
int ret;
EBUG_ON(trans->paths[iter->path].cached);
bch2_trans_verify_not_unlocked_or_in_restart(trans);
- bch2_btree_iter_verify(iter);
+ bch2_btree_iter_verify(trans, iter);
ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
if (ret)
@@ -2024,7 +2019,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
EBUG_ON(btree_iter_path(trans, iter)->uptodate);
out:
bch2_btree_iter_verify_entry_exit(iter);
- bch2_btree_iter_verify(iter);
+ bch2_btree_iter_verify(trans, iter);
return b;
err:
@@ -2034,7 +2029,7 @@ err:
/* Iterate across keys (in leaf nodes only) */
-inline bool bch2_btree_iter_advance(struct btree_iter *iter)
+inline bool bch2_btree_iter_advance(struct btree_trans *trans, struct btree_iter *iter)
{
struct bpos pos = iter->k.p;
bool ret = !(iter->flags & BTREE_ITER_all_snapshots
@@ -2043,11 +2038,11 @@ inline bool bch2_btree_iter_advance(struct btree_iter *iter)
if (ret && !(iter->flags & BTREE_ITER_is_extents))
pos = bkey_successor(iter, pos);
- bch2_btree_iter_set_pos(iter, pos);
+ bch2_btree_iter_set_pos(trans, iter, pos);
return ret;
}
-inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
+inline bool bch2_btree_iter_rewind(struct btree_trans *trans, struct btree_iter *iter)
{
struct bpos pos = bkey_start_pos(&iter->k);
bool ret = !(iter->flags & BTREE_ITER_all_snapshots
@@ -2056,7 +2051,7 @@ inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
if (ret && !(iter->flags & BTREE_ITER_is_extents))
pos = bkey_predecessor(iter, pos);
- bch2_btree_iter_set_pos(iter, pos);
+ bch2_btree_iter_set_pos(trans, iter, pos);
return ret;
}
@@ -2183,9 +2178,9 @@ void btree_trans_peek_prev_journal(struct btree_trans *trans,
* bkey_s_c_null:
*/
static noinline
-struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos pos)
+struct bkey_s_c btree_trans_peek_key_cache(struct btree_trans *trans, struct btree_iter *iter,
+ struct bpos pos)
{
- struct btree_trans *trans = iter->trans;
struct bch_fs *c = trans->c;
struct bkey u;
struct bkey_s_c k;
@@ -2231,14 +2226,14 @@ struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos
return k;
}
-static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bpos search_key)
+static struct bkey_s_c __bch2_btree_iter_peek(struct btree_trans *trans, struct btree_iter *iter,
+ struct bpos search_key)
{
- struct btree_trans *trans = iter->trans;
struct bkey_s_c k, k2;
int ret;
EBUG_ON(btree_iter_path(trans, iter)->cached);
- bch2_btree_iter_verify(iter);
+ bch2_btree_iter_verify(trans, iter);
while (1) {
iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
@@ -2248,7 +2243,7 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
if (unlikely(ret)) {
/* ensure that iter->k is consistent with iter->pos: */
- bch2_btree_iter_set_pos(iter, iter->pos);
+ bch2_btree_iter_set_pos(trans, iter, iter->pos);
k = bkey_s_c_err(ret);
break;
}
@@ -2258,7 +2253,7 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
if (unlikely(!l->b)) {
/* No btree nodes at requested level: */
- bch2_btree_iter_set_pos(iter, SPOS_MAX);
+ bch2_btree_iter_set_pos(trans, iter, SPOS_MAX);
k = bkey_s_c_null;
break;
}
@@ -2269,10 +2264,10 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
k.k &&
- (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) {
+ (k2 = btree_trans_peek_key_cache(trans, iter, k.k->p)).k) {
k = k2;
if (bkey_err(k)) {
- bch2_btree_iter_set_pos(iter, iter->pos);
+ bch2_btree_iter_set_pos(trans, iter, iter->pos);
break;
}
}
@@ -2305,27 +2300,28 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
search_key = bpos_successor(l->b->key.k.p);
} else {
/* End of btree: */
- bch2_btree_iter_set_pos(iter, SPOS_MAX);
+ bch2_btree_iter_set_pos(trans, iter, SPOS_MAX);
k = bkey_s_c_null;
break;
}
}
- bch2_btree_iter_verify(iter);
+ bch2_btree_iter_verify(trans, iter);
return k;
}
/**
* bch2_btree_iter_peek_max() - returns first key greater than or equal to
* iterator's current position
+ * @trans: btree transaction object
* @iter: iterator to peek from
* @end: search limit: returns keys less than or equal to @end
*
* Returns: key if found, or an error extractable with bkey_err().
*/
-struct bkey_s_c bch2_btree_iter_peek_max(struct btree_iter *iter, struct bpos end)
+struct bkey_s_c bch2_btree_iter_peek_max(struct btree_trans *trans, struct btree_iter *iter,
+ struct bpos end)
{
- struct btree_trans *trans = iter->trans;
struct bpos search_key = btree_iter_search_key(iter);
struct bkey_s_c k;
struct bpos iter_pos = iter->pos;
@@ -2348,7 +2344,7 @@ struct bkey_s_c bch2_btree_iter_peek_max(struct btree_iter *iter, struct bpos en
}
while (1) {
- k = __bch2_btree_iter_peek(iter, search_key);
+ k = __bch2_btree_iter_peek(trans, iter, search_key);
if (unlikely(!k.k))
goto end;
if (unlikely(bkey_err(k)))
@@ -2462,9 +2458,9 @@ out_no_locked:
if (!(iter->flags & BTREE_ITER_all_snapshots))
iter->pos.snapshot = iter->snapshot;
- ret = bch2_btree_iter_verify_ret(iter, k);
+ ret = bch2_btree_iter_verify_ret(trans, iter, k);
if (unlikely(ret)) {
- bch2_btree_iter_set_pos(iter, iter->pos);
+ bch2_btree_iter_set_pos(trans, iter, iter->pos);
k = bkey_s_c_err(ret);
}
@@ -2472,7 +2468,7 @@ out_no_locked:
return k;
end:
- bch2_btree_iter_set_pos(iter, end);
+ bch2_btree_iter_set_pos(trans, iter, end);
k = bkey_s_c_null;
goto out_no_locked;
}
@@ -2480,24 +2476,25 @@ end:
/**
* bch2_btree_iter_next() - returns first key greater than iterator's current
* position
+ * @trans: btree transaction object
* @iter: iterator to peek from
*
* Returns: key if found, or an error extractable with bkey_err().
*/
-struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
+struct bkey_s_c bch2_btree_iter_next(struct btree_trans *trans, struct btree_iter *iter)
{
- if (!bch2_btree_iter_advance(iter))
+ if (!bch2_btree_iter_advance(trans, iter))
return bkey_s_c_null;
- return bch2_btree_iter_peek(iter);
+ return bch2_btree_iter_peek(trans, iter);
}
-static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_iter *iter, struct bpos search_key)
+static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_trans *trans, struct btree_iter *iter,
+ struct bpos search_key)
{
- struct btree_trans *trans = iter->trans;
struct bkey_s_c k, k2;
- bch2_btree_iter_verify(iter);
+ bch2_btree_iter_verify(trans, iter);
while (1) {
iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
@@ -2507,7 +2504,7 @@ static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_iter *iter, stru
int ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
if (unlikely(ret)) {
/* ensure that iter->k is consistent with iter->pos: */
- bch2_btree_iter_set_pos(iter, iter->pos);
+ bch2_btree_iter_set_pos(trans, iter, iter->pos);
k = bkey_s_c_err(ret);
break;
}
@@ -2517,7 +2514,7 @@ static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_iter *iter, stru
if (unlikely(!l->b)) {
/* No btree nodes at requested level: */
- bch2_btree_iter_set_pos(iter, SPOS_MAX);
+ bch2_btree_iter_set_pos(trans, iter, SPOS_MAX);
k = bkey_s_c_null;
break;
}
@@ -2533,10 +2530,10 @@ static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_iter *iter, stru
if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
k.k &&
- (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) {
+ (k2 = btree_trans_peek_key_cache(trans, iter, k.k->p)).k) {
k = k2;
if (bkey_err(k2)) {
- bch2_btree_iter_set_pos(iter, iter->pos);
+ bch2_btree_iter_set_pos(trans, iter, iter->pos);
break;
}
}
@@ -2557,25 +2554,27 @@ static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_iter *iter, stru
search_key = bpos_predecessor(path->l[0].b->data->min_key);
} else {
/* Start of btree: */
- bch2_btree_iter_set_pos(iter, POS_MIN);
+ bch2_btree_iter_set_pos(trans, iter, POS_MIN);
k = bkey_s_c_null;
break;
}
}
- bch2_btree_iter_verify(iter);
+ bch2_btree_iter_verify(trans, iter);
return k;
}
/**
* bch2_btree_iter_peek_prev_min() - returns first key less than or equal to
* iterator's current position
+ * @trans: btree transaction object
* @iter: iterator to peek from
* @end: search limit: returns keys greater than or equal to @end
*
* Returns: key if found, or an error extractable with bkey_err().
*/
-struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_iter *iter, struct bpos end)
+struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_trans *trans, struct btree_iter *iter,
+ struct bpos end)
{
if ((iter->flags & (BTREE_ITER_is_extents|BTREE_ITER_filter_snapshots)) &&
!bkey_eq(iter->pos, POS_MAX)) {
@@ -2587,7 +2586,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_iter *iter, struct bp
* real visible extents - easiest to just use peek_slot() (which
* internally uses peek() for extents)
*/
- struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
+ struct bkey_s_c k = bch2_btree_iter_peek_slot(trans, iter);
if (bkey_err(k))
return k;
@@ -2597,7 +2596,6 @@ struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_iter *iter, struct bp
return k;
}
- struct btree_trans *trans = iter->trans;
struct bpos search_key = iter->pos;
struct bkey_s_c k;
btree_path_idx_t saved_path = 0;
@@ -2613,7 +2611,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_iter *iter, struct bp
}
while (1) {
- k = __bch2_btree_iter_peek_prev(iter, search_key);
+ k = __bch2_btree_iter_peek_prev(trans, iter, search_key);
if (unlikely(!k.k))
goto end;
if (unlikely(bkey_err(k)))
@@ -2704,10 +2702,10 @@ out_no_locked:
bch2_path_put_nokeep(trans, saved_path, iter->flags & BTREE_ITER_intent);
bch2_btree_iter_verify_entry_exit(iter);
- bch2_btree_iter_verify(iter);
+ bch2_btree_iter_verify(trans, iter);
return k;
end:
- bch2_btree_iter_set_pos(iter, end);
+ bch2_btree_iter_set_pos(trans, iter, end);
k = bkey_s_c_null;
goto out_no_locked;
}
@@ -2715,27 +2713,27 @@ end:
/**
* bch2_btree_iter_prev() - returns first key less than iterator's current
* position
+ * @trans: btree transaction object
* @iter: iterator to peek from
*
* Returns: key if found, or an error extractable with bkey_err().
*/
-struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
+struct bkey_s_c bch2_btree_iter_prev(struct btree_trans *trans, struct btree_iter *iter)
{
- if (!bch2_btree_iter_rewind(iter))
+ if (!bch2_btree_iter_rewind(trans, iter))
return bkey_s_c_null;
- return bch2_btree_iter_peek_prev(iter);
+ return bch2_btree_iter_peek_prev(trans, iter);
}
-struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
+struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_trans *trans, struct btree_iter *iter)
{
- struct btree_trans *trans = iter->trans;
struct bpos search_key;
struct bkey_s_c k;
int ret;
bch2_trans_verify_not_unlocked_or_in_restart(trans);
- bch2_btree_iter_verify(iter);
+ bch2_btree_iter_verify(trans, iter);
bch2_btree_iter_verify_entry_exit(iter);
EBUG_ON(btree_iter_path(trans, iter)->level && (iter->flags & BTREE_ITER_with_key_cache));
@@ -2751,7 +2749,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
if (iter->pos.inode == KEY_INODE_MAX)
return bkey_s_c_null;
- bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
+ bch2_btree_iter_set_pos(trans, iter, bpos_nosnap_successor(iter->pos));
}
search_key = btree_iter_search_key(iter);
@@ -2785,7 +2783,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
goto out;
if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
- (k = btree_trans_peek_key_cache(iter, iter->pos)).k) {
+ (k = btree_trans_peek_key_cache(trans, iter, iter->pos)).k) {
if (!bkey_err(k))
iter->k = *k.k;
/* We're not returning a key from iter->path: */
@@ -2812,8 +2810,8 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
if (iter->flags & BTREE_ITER_intent) {
struct btree_iter iter2;
- bch2_trans_copy_iter(&iter2, iter);
- k = bch2_btree_iter_peek_max(&iter2, end);
+ bch2_trans_copy_iter(trans, &iter2, iter);
+ k = bch2_btree_iter_peek_max(trans, &iter2, end);
if (k.k && !bkey_err(k)) {
swap(iter->key_cache_path, iter2.key_cache_path);
@@ -2824,9 +2822,9 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
} else {
struct bpos pos = iter->pos;
- k = bch2_btree_iter_peek_max(iter, end);
+ k = bch2_btree_iter_peek_max(trans, iter, end);
if (unlikely(bkey_err(k)))
- bch2_btree_iter_set_pos(iter, pos);
+ bch2_btree_iter_set_pos(trans, iter, pos);
else
iter->pos = pos;
}
@@ -2857,39 +2855,39 @@ out:
btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
out_no_locked:
bch2_btree_iter_verify_entry_exit(iter);
- bch2_btree_iter_verify(iter);
- ret = bch2_btree_iter_verify_ret(iter, k);
+ bch2_btree_iter_verify(trans, iter);
+ ret = bch2_btree_iter_verify_ret(trans, iter, k);
if (unlikely(ret))
return bkey_s_c_err(ret);
return k;
}
-struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
+struct bkey_s_c bch2_btree_iter_next_slot(struct btree_trans *trans, struct btree_iter *iter)
{
- if (!bch2_btree_iter_advance(iter))
+ if (!bch2_btree_iter_advance(trans, iter))
return bkey_s_c_null;
- return bch2_btree_iter_peek_slot(iter);
+ return bch2_btree_iter_peek_slot(trans, iter);
}
-struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
+struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_trans *trans, struct btree_iter *iter)
{
- if (!bch2_btree_iter_rewind(iter))
+ if (!bch2_btree_iter_rewind(trans, iter))
return bkey_s_c_null;
- return bch2_btree_iter_peek_slot(iter);
+ return bch2_btree_iter_peek_slot(trans, iter);
}
/* Obsolete, but still used by rust wrapper in -tools */
-struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *iter)
+struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_trans *trans, struct btree_iter *iter)
{
struct bkey_s_c k;
- while (btree_trans_too_many_iters(iter->trans) ||
- (k = bch2_btree_iter_peek_type(iter, iter->flags),
+ while (btree_trans_too_many_iters(trans) ||
+ (k = bch2_btree_iter_peek_type(trans, iter, iter->flags),
bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
- bch2_trans_begin(iter->trans);
+ bch2_trans_begin(trans);
return k;
}
@@ -3035,7 +3033,6 @@ void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
iter->path = 0;
iter->update_path = 0;
iter->key_cache_path = 0;
- iter->trans = NULL;
}
void bch2_trans_iter_init_outlined(struct btree_trans *trans,
@@ -3075,10 +3072,9 @@ void bch2_trans_node_iter_init(struct btree_trans *trans,
BUG_ON(iter->min_depth != depth);
}
-void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src)
+void bch2_trans_copy_iter(struct btree_trans *trans,
+ struct btree_iter *dst, struct btree_iter *src)
{
- struct btree_trans *trans = src->trans;
-
*dst = *src;
#ifdef TRACK_PATH_ALLOCATED
dst->ip_allocated = _RET_IP_;
diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h
index e6f51a3b8187..9d2cccf5d21a 100644
--- a/fs/bcachefs/btree_iter.h
+++ b/fs/bcachefs/btree_iter.h
@@ -393,36 +393,37 @@ void bch2_trans_node_add(struct btree_trans *trans, struct btree_path *, struct
void bch2_trans_node_drop(struct btree_trans *trans, struct btree *);
void bch2_trans_node_reinit_iter(struct btree_trans *, struct btree *);
-int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter);
-int __must_check bch2_btree_iter_traverse(struct btree_iter *);
+int __must_check __bch2_btree_iter_traverse(struct btree_trans *, struct btree_iter *);
+int __must_check bch2_btree_iter_traverse(struct btree_trans *, struct btree_iter *);
-struct btree *bch2_btree_iter_peek_node(struct btree_iter *);
-struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *);
-struct btree *bch2_btree_iter_next_node(struct btree_iter *);
+struct btree *bch2_btree_iter_peek_node(struct btree_trans *, struct btree_iter *);
+struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_trans *, struct btree_iter *);
+struct btree *bch2_btree_iter_next_node(struct btree_trans *, struct btree_iter *);
-struct bkey_s_c bch2_btree_iter_peek_max(struct btree_iter *, struct bpos);
-struct bkey_s_c bch2_btree_iter_next(struct btree_iter *);
+struct bkey_s_c bch2_btree_iter_peek_max(struct btree_trans *, struct btree_iter *, struct bpos);
+struct bkey_s_c bch2_btree_iter_next(struct btree_trans *, struct btree_iter *);
-static inline struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
+static inline struct bkey_s_c bch2_btree_iter_peek(struct btree_trans *trans,
+ struct btree_iter *iter)
{
- return bch2_btree_iter_peek_max(iter, SPOS_MAX);
+ return bch2_btree_iter_peek_max(trans, iter, SPOS_MAX);
}
-struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_iter *, struct bpos);
+struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_trans *, struct btree_iter *, struct bpos);
-static inline struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
+static inline struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_trans *trans, struct btree_iter *iter)
{
- return bch2_btree_iter_peek_prev_min(iter, POS_MIN);
+ return bch2_btree_iter_peek_prev_min(trans, iter, POS_MIN);
}
-struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *);
+struct bkey_s_c bch2_btree_iter_prev(struct btree_trans *, struct btree_iter *);
-struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *);
-struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *);
-struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *);
+struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_trans *, struct btree_iter *);
+struct bkey_s_c bch2_btree_iter_next_slot(struct btree_trans *, struct btree_iter *);
+struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_trans *, struct btree_iter *);
-bool bch2_btree_iter_advance(struct btree_iter *);
-bool bch2_btree_iter_rewind(struct btree_iter *);
+bool bch2_btree_iter_advance(struct btree_trans *, struct btree_iter *);
+bool bch2_btree_iter_rewind(struct btree_trans *, struct btree_iter *);
static inline void __bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
{
@@ -433,10 +434,9 @@ static inline void __bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpo
iter->k.size = 0;
}
-static inline void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
+static inline void bch2_btree_iter_set_pos(struct btree_trans *trans,
+ struct btree_iter *iter, struct bpos new_pos)
{
- struct btree_trans *trans = iter->trans;
-
if (unlikely(iter->update_path))
bch2_path_put(trans, iter->update_path,
iter->flags & BTREE_ITER_intent);
@@ -454,13 +454,14 @@ static inline void bch2_btree_iter_set_pos_to_extent_start(struct btree_iter *it
iter->pos = bkey_start_pos(&iter->k);
}
-static inline void bch2_btree_iter_set_snapshot(struct btree_iter *iter, u32 snapshot)
+static inline void bch2_btree_iter_set_snapshot(struct btree_trans *trans,
+ struct btree_iter *iter, u32 snapshot)
{
struct bpos pos = iter->pos;
iter->snapshot = snapshot;
pos.snapshot = snapshot;
- bch2_btree_iter_set_pos(iter, pos);
+ bch2_btree_iter_set_pos(trans, iter, pos);
}
void bch2_trans_iter_exit(struct btree_trans *, struct btree_iter *);
@@ -502,7 +503,6 @@ static inline void bch2_trans_iter_init_common(struct btree_trans *trans,
unsigned flags,
unsigned long ip)
{
- iter->trans = trans;
iter->update_path = 0;
iter->key_cache_path = 0;
iter->btree_id = btree_id;
@@ -539,9 +539,9 @@ static inline void bch2_trans_iter_init(struct btree_trans *trans,
void bch2_trans_node_iter_init(struct btree_trans *, struct btree_iter *,
enum btree_id, struct bpos,
unsigned, unsigned, unsigned);
-void bch2_trans_copy_iter(struct btree_iter *, struct btree_iter *);
+void bch2_trans_copy_iter(struct btree_trans *, struct btree_iter *, struct btree_iter *);
-void bch2_set_btree_iter_dontneed(struct btree_iter *);
+void bch2_set_btree_iter_dontneed(struct btree_trans *, struct btree_iter *);
void *__bch2_trans_kmalloc(struct btree_trans *, size_t);
@@ -588,7 +588,7 @@ static inline struct bkey_s_c __bch2_bkey_get_iter(struct btree_trans *trans,
struct bkey_s_c k;
bch2_trans_iter_init(trans, iter, btree_id, pos, flags);
- k = bch2_btree_iter_peek_slot(iter);
+ k = bch2_btree_iter_peek_slot(trans, iter);
if (!bkey_err(k) && type && k.k->type != type)
k = bkey_s_c_err(-BCH_ERR_ENOENT_bkey_type_mismatch);
@@ -658,14 +658,14 @@ u32 bch2_trans_begin(struct btree_trans *);
int _ret3 = 0; \
do { \
_ret3 = lockrestart_do((_trans), ({ \
- struct btree *_b = bch2_btree_iter_peek_node(&_iter); \
+ struct btree *_b = bch2_btree_iter_peek_node(_trans, &_iter);\
if (!_b) \
break; \
\
PTR_ERR_OR_ZERO(_b) ?: (_do); \
})) ?: \
lockrestart_do((_trans), \
- PTR_ERR_OR_ZERO(bch2_btree_iter_next_node(&_iter))); \
+ PTR_ERR_OR_ZERO(bch2_btree_iter_next_node(_trans, &_iter)));\
} while (!_ret3); \
\
bch2_trans_iter_exit((_trans), &(_iter)); \
@@ -677,31 +677,34 @@ u32 bch2_trans_begin(struct btree_trans *);
__for_each_btree_node(_trans, _iter, _btree_id, _start, \
0, 0, _flags, _b, _do)
-static inline struct bkey_s_c bch2_btree_iter_peek_prev_type(struct btree_iter *iter,
+static inline struct bkey_s_c bch2_btree_iter_peek_prev_type(struct btree_trans *trans,
+ struct btree_iter *iter,
unsigned flags)
{
- return flags & BTREE_ITER_slots ? bch2_btree_iter_peek_slot(iter) :
- bch2_btree_iter_peek_prev(iter);
+ return flags & BTREE_ITER_slots ? bch2_btree_iter_peek_slot(trans, iter) :
+ bch2_btree_iter_peek_prev(trans, iter);
}
-static inline struct bkey_s_c bch2_btree_iter_peek_type(struct btree_iter *iter,
+static inline struct bkey_s_c bch2_btree_iter_peek_type(struct btree_trans *trans,
+ struct btree_iter *iter,
unsigned flags)
{
- return flags & BTREE_ITER_slots ? bch2_btree_iter_peek_slot(iter) :
- bch2_btree_iter_peek(iter);
+ return flags & BTREE_ITER_slots ? bch2_btree_iter_peek_slot(trans, iter) :
+ bch2_btree_iter_peek(trans, iter);
}
-static inline struct bkey_s_c bch2_btree_iter_peek_max_type(struct btree_iter *iter,
- struct bpos end,
- unsigned flags)
+static inline struct bkey_s_c bch2_btree_iter_peek_max_type(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bpos end,
+ unsigned flags)
{
if (!(flags & BTREE_ITER_slots))
- return bch2_btree_iter_peek_max(iter, end);
+ return bch2_btree_iter_peek_max(trans, iter, end);
if (bkey_gt(iter->pos, end))
return bkey_s_c_null;
- return bch2_btree_iter_peek_slot(iter);
+ return bch2_btree_iter_peek_slot(trans, iter);
}
int __bch2_btree_trans_too_many_iters(struct btree_trans *);
@@ -768,14 +771,14 @@ transaction_restart: \
\
do { \
_ret3 = lockrestart_do(_trans, ({ \
- (_k) = bch2_btree_iter_peek_max_type(&(_iter), \
+ (_k) = bch2_btree_iter_peek_max_type(_trans, &(_iter), \
_end, (_flags)); \
if (!(_k).k) \
break; \
\
bkey_err(_k) ?: (_do); \
})); \
- } while (!_ret3 && bch2_btree_iter_advance(&(_iter))); \
+ } while (!_ret3 && bch2_btree_iter_advance(_trans, &(_iter))); \
\
bch2_trans_iter_exit((_trans), &(_iter)); \
_ret3; \
@@ -813,14 +816,14 @@ transaction_restart: \
\
do { \
_ret3 = lockrestart_do(_trans, ({ \
- (_k) = bch2_btree_iter_peek_prev_type(&(_iter), \
+ (_k) = bch2_btree_iter_peek_prev_type(_trans, &(_iter), \
(_flags)); \
if (!(_k).k) \
break; \
\
bkey_err(_k) ?: (_do); \
})); \
- } while (!_ret3 && bch2_btree_iter_rewind(&(_iter))); \
+ } while (!_ret3 && bch2_btree_iter_rewind(_trans, &(_iter))); \
\
bch2_trans_iter_exit((_trans), &(_iter)); \
_ret3; \
@@ -850,37 +853,38 @@ transaction_restart: \
(_do) ?: bch2_trans_commit(_trans, (_disk_res),\
(_journal_seq), (_commit_flags)))
-struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *);
+struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_trans *,
+ struct btree_iter *);
#define for_each_btree_key_max_norestart(_trans, _iter, _btree_id, \
_start, _end, _flags, _k, _ret) \
for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
(_start), (_flags)); \
- (_k) = bch2_btree_iter_peek_max_type(&(_iter), _end, _flags),\
+ (_k) = bch2_btree_iter_peek_max_type(_trans, &(_iter), _end, _flags),\
!((_ret) = bkey_err(_k)) && (_k).k; \
- bch2_btree_iter_advance(&(_iter)))
+ bch2_btree_iter_advance(_trans, &(_iter)))
-#define for_each_btree_key_max_continue_norestart(_iter, _end, _flags, _k, _ret)\
+#define for_each_btree_key_max_continue_norestart(_trans, _iter, _end, _flags, _k, _ret)\
for (; \
- (_k) = bch2_btree_iter_peek_max_type(&(_iter), _end, _flags), \
+ (_k) = bch2_btree_iter_peek_max_type(_trans, &(_iter), _end, _flags), \
!((_ret) = bkey_err(_k)) && (_k).k; \
- bch2_btree_iter_advance(&(_iter)))
+ bch2_btree_iter_advance(_trans, &(_iter)))
#define for_each_btree_key_norestart(_trans, _iter, _btree_id, \
_start, _flags, _k, _ret) \
for_each_btree_key_max_norestart(_trans, _iter, _btree_id, _start,\
SPOS_MAX, _flags, _k, _ret)
-#define for_each_btree_key_reverse_norestart(_trans, _iter, _btree_id, \
- _start, _flags, _k, _ret) \
- for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
- (_start), (_flags)); \
- (_k) = bch2_btree_iter_peek_prev_type(&(_iter), _flags), \
- !((_ret) = bkey_err(_k)) && (_k).k; \
- bch2_btree_iter_rewind(&(_iter)))
+#define for_each_btree_key_reverse_norestart(_trans, _iter, _btree_id, \
+ _start, _flags, _k, _ret) \
+ for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
+ (_start), (_flags)); \
+ (_k) = bch2_btree_iter_peek_prev_type(_trans, &(_iter), _flags), \
+ !((_ret) = bkey_err(_k)) && (_k).k; \
+ bch2_btree_iter_rewind(_trans, &(_iter)))
-#define for_each_btree_key_continue_norestart(_iter, _flags, _k, _ret) \
- for_each_btree_key_max_continue_norestart(_iter, SPOS_MAX, _flags, _k, _ret)
+#define for_each_btree_key_continue_norestart(_trans, _iter, _flags, _k, _ret) \
+ for_each_btree_key_max_continue_norestart(_trans, _iter, SPOS_MAX, _flags, _k, _ret)
/*
* This should not be used in a fastpath, without first trying _do in
diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c
index edce59433375..2b186584a291 100644
--- a/fs/bcachefs/btree_key_cache.c
+++ b/fs/bcachefs/btree_key_cache.c
@@ -287,6 +287,19 @@ err:
return ret;
}
+static noinline_for_stack void do_trace_key_cache_fill(struct btree_trans *trans,
+ struct btree_path *ck_path,
+ struct bkey_s_c k)
+{
+ struct printbuf buf = PRINTBUF;
+
+ bch2_bpos_to_text(&buf, ck_path->pos);
+ prt_char(&buf, ' ');
+ bch2_bkey_val_to_text(&buf, trans->c, k);
+ trace_key_cache_fill(trans, buf.buf);
+ printbuf_exit(&buf);
+}
+
static noinline int btree_key_cache_fill(struct btree_trans *trans,
struct btree_path *ck_path,
unsigned flags)
@@ -306,7 +319,7 @@ static noinline int btree_key_cache_fill(struct btree_trans *trans,
BTREE_ITER_key_cache_fill|
BTREE_ITER_cached_nofill);
iter.flags &= ~BTREE_ITER_with_journal;
- k = bch2_btree_iter_peek_slot(&iter);
+ k = bch2_btree_iter_peek_slot(trans, &iter);
ret = bkey_err(k);
if (ret)
goto err;
@@ -320,18 +333,11 @@ static noinline int btree_key_cache_fill(struct btree_trans *trans,
if (ret)
goto err;
- if (trace_key_cache_fill_enabled()) {
- struct printbuf buf = PRINTBUF;
-
- bch2_bpos_to_text(&buf, ck_path->pos);
- prt_char(&buf, ' ');
- bch2_bkey_val_to_text(&buf, trans->c, k);
- trace_key_cache_fill(trans, buf.buf);
- printbuf_exit(&buf);
- }
+ if (trace_key_cache_fill_enabled())
+ do_trace_key_cache_fill(trans, ck_path, k);
out:
/* We're not likely to need this iterator again: */
- bch2_set_btree_iter_dontneed(&iter);
+ bch2_set_btree_iter_dontneed(trans, &iter);
err:
bch2_trans_iter_exit(trans, &iter);
return ret;
@@ -412,7 +418,7 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans,
BTREE_ITER_intent);
b_iter.flags &= ~BTREE_ITER_with_key_cache;
- ret = bch2_btree_iter_traverse(&c_iter);
+ ret = bch2_btree_iter_traverse(trans, &c_iter);
if (ret)
goto out;
@@ -444,7 +450,7 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans,
!test_bit(JOURNAL_space_low, &c->journal.flags))
commit_flags |= BCH_TRANS_COMMIT_no_journal_res;
- struct bkey_s_c btree_k = bch2_btree_iter_peek_slot(&b_iter);
+ struct bkey_s_c btree_k = bch2_btree_iter_peek_slot(trans, &b_iter);
ret = bkey_err(btree_k);
if (ret)
goto err;
diff --git a/fs/bcachefs/btree_node_scan.c b/fs/bcachefs/btree_node_scan.c
index 25d54b77cdc2..8c9fdb7263fe 100644
--- a/fs/bcachefs/btree_node_scan.c
+++ b/fs/bcachefs/btree_node_scan.c
@@ -271,7 +271,7 @@ static int read_btree_nodes_worker(void *p)
err:
bio_put(bio);
free_page((unsigned long) buf);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
closure_put(w->cl);
kfree(w);
return 0;
@@ -291,7 +291,7 @@ static int read_btree_nodes(struct find_btree_nodes *f)
struct find_btree_nodes_worker *w = kmalloc(sizeof(*w), GFP_KERNEL);
if (!w) {
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
ret = -ENOMEM;
goto err;
}
@@ -303,14 +303,14 @@ static int read_btree_nodes(struct find_btree_nodes *f)
struct task_struct *t = kthread_create(read_btree_nodes_worker, w, "read_btree_nodes/%s", ca->name);
ret = PTR_ERR_OR_ZERO(t);
if (ret) {
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
kfree(w);
bch_err_msg(c, ret, "starting kthread");
break;
}
closure_get(&cl);
- percpu_ref_get(&ca->io_ref);
+ percpu_ref_get(&ca->io_ref[READ]);
wake_up_process(t);
}
err:
diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h
index 77578da2d23f..023c472dc9ee 100644
--- a/fs/bcachefs/btree_types.h
+++ b/fs/bcachefs/btree_types.h
@@ -367,7 +367,6 @@ static inline unsigned long btree_path_ip_allocated(struct btree_path *path)
* @nodes_intent_locked - bitmask indicating which locks are intent locks
*/
struct btree_iter {
- struct btree_trans *trans;
btree_path_idx_t path;
btree_path_idx_t update_path;
btree_path_idx_t key_cache_path;
diff --git a/fs/bcachefs/btree_update.c b/fs/bcachefs/btree_update.c
index c05394f56424..1e6b7836cc01 100644
--- a/fs/bcachefs/btree_update.c
+++ b/fs/bcachefs/btree_update.c
@@ -126,7 +126,7 @@ int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans,
struct bpos new_pos)
{
struct bch_fs *c = trans->c;
- struct btree_iter old_iter, new_iter = { NULL };
+ struct btree_iter old_iter, new_iter = {};
struct bkey_s_c old_k, new_k;
snapshot_id_list s;
struct bkey_i *update;
@@ -140,7 +140,7 @@ int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans,
bch2_trans_iter_init(trans, &old_iter, id, old_pos,
BTREE_ITER_not_extents|
BTREE_ITER_all_snapshots);
- while ((old_k = bch2_btree_iter_prev(&old_iter)).k &&
+ while ((old_k = bch2_btree_iter_prev(trans, &old_iter)).k &&
!(ret = bkey_err(old_k)) &&
bkey_eq(old_pos, old_k.k->p)) {
struct bpos whiteout_pos =
@@ -296,7 +296,7 @@ static int bch2_trans_update_extent(struct btree_trans *trans,
BTREE_ITER_intent|
BTREE_ITER_with_updates|
BTREE_ITER_not_extents);
- k = bch2_btree_iter_peek_max(&iter, POS(insert->k.p.inode, U64_MAX));
+ k = bch2_btree_iter_peek_max(trans, &iter, POS(insert->k.p.inode, U64_MAX));
if ((ret = bkey_err(k)))
goto err;
if (!k.k)
@@ -322,8 +322,8 @@ static int bch2_trans_update_extent(struct btree_trans *trans,
if (done)
goto out;
next:
- bch2_btree_iter_advance(&iter);
- k = bch2_btree_iter_peek_max(&iter, POS(insert->k.p.inode, U64_MAX));
+ bch2_btree_iter_advance(trans, &iter);
+ k = bch2_btree_iter_peek_max(trans, &iter, POS(insert->k.p.inode, U64_MAX));
if ((ret = bkey_err(k)))
goto err;
if (!k.k)
@@ -592,13 +592,13 @@ int bch2_bkey_get_empty_slot(struct btree_trans *trans, struct btree_iter *iter,
enum btree_id btree, struct bpos end)
{
bch2_trans_iter_init(trans, iter, btree, end, BTREE_ITER_intent);
- struct bkey_s_c k = bch2_btree_iter_peek_prev(iter);
+ struct bkey_s_c k = bch2_btree_iter_peek_prev(trans, iter);
int ret = bkey_err(k);
if (ret)
goto err;
- bch2_btree_iter_advance(iter);
- k = bch2_btree_iter_peek_slot(iter);
+ bch2_btree_iter_advance(trans, iter);
+ k = bch2_btree_iter_peek_slot(trans, iter);
ret = bkey_err(k);
if (ret)
goto err;
@@ -634,7 +634,7 @@ int bch2_btree_insert_nonextent(struct btree_trans *trans,
BTREE_ITER_cached|
BTREE_ITER_not_extents|
BTREE_ITER_intent);
- ret = bch2_btree_iter_traverse(&iter) ?:
+ ret = bch2_btree_iter_traverse(trans, &iter) ?:
bch2_trans_update(trans, &iter, k, flags);
bch2_trans_iter_exit(trans, &iter);
return ret;
@@ -646,7 +646,7 @@ int bch2_btree_insert_trans(struct btree_trans *trans, enum btree_id id,
struct btree_iter iter;
bch2_trans_iter_init(trans, &iter, id, bkey_start_pos(&k->k),
BTREE_ITER_intent|flags);
- int ret = bch2_btree_iter_traverse(&iter) ?:
+ int ret = bch2_btree_iter_traverse(trans, &iter) ?:
bch2_trans_update(trans, &iter, k, flags);
bch2_trans_iter_exit(trans, &iter);
return ret;
@@ -695,7 +695,7 @@ int bch2_btree_delete(struct btree_trans *trans,
bch2_trans_iter_init(trans, &iter, btree, pos,
BTREE_ITER_cached|
BTREE_ITER_intent);
- ret = bch2_btree_iter_traverse(&iter) ?:
+ ret = bch2_btree_iter_traverse(trans, &iter) ?:
bch2_btree_delete_at(trans, &iter, update_flags);
bch2_trans_iter_exit(trans, &iter);
@@ -713,7 +713,7 @@ int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id id,
int ret = 0;
bch2_trans_iter_init(trans, &iter, id, start, BTREE_ITER_intent);
- while ((k = bch2_btree_iter_peek_max(&iter, end)).k) {
+ while ((k = bch2_btree_iter_peek_max(trans, &iter, end)).k) {
struct disk_reservation disk_res =
bch2_disk_reservation_init(trans->c, 0);
struct bkey_i delete;
@@ -808,7 +808,7 @@ int bch2_btree_bit_mod(struct btree_trans *trans, enum btree_id btree,
struct btree_iter iter;
bch2_trans_iter_init(trans, &iter, btree, pos, BTREE_ITER_intent);
- int ret = bch2_btree_iter_traverse(&iter) ?:
+ int ret = bch2_btree_iter_traverse(trans, &iter) ?:
bch2_btree_bit_mod_iter(trans, &iter, set);
bch2_trans_iter_exit(trans, &iter);
return ret;
diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c
index bf7e1dac7f46..55fbeeb8eaaa 100644
--- a/fs/bcachefs/btree_update_interior.c
+++ b/fs/bcachefs/btree_update_interior.c
@@ -2147,7 +2147,7 @@ static int get_iter_to_node(struct btree_trans *trans, struct btree_iter *iter,
bch2_trans_node_iter_init(trans, iter, b->c.btree_id, b->key.k.p,
BTREE_MAX_DEPTH, b->c.level,
BTREE_ITER_intent);
- int ret = bch2_btree_iter_traverse(iter);
+ int ret = bch2_btree_iter_traverse(trans, iter);
if (ret)
goto err;
@@ -2239,7 +2239,7 @@ static int bch2_btree_node_rewrite_key(struct btree_trans *trans,
bch2_trans_node_iter_init(trans, &iter,
btree, k->k.p,
BTREE_MAX_DEPTH, level, 0);
- struct btree *b = bch2_btree_iter_peek_node(&iter);
+ struct btree *b = bch2_btree_iter_peek_node(trans, &iter);
int ret = PTR_ERR_OR_ZERO(b);
if (ret)
goto out;
@@ -2262,7 +2262,7 @@ int bch2_btree_node_rewrite_pos(struct btree_trans *trans,
/* Traverse one depth lower to get a pointer to the node itself: */
struct btree_iter iter;
bch2_trans_node_iter_init(trans, &iter, btree, pos, 0, level - 1, 0);
- struct btree *b = bch2_btree_iter_peek_node(&iter);
+ struct btree *b = bch2_btree_iter_peek_node(trans, &iter);
int ret = PTR_ERR_OR_ZERO(b);
if (ret)
goto err;
@@ -2406,7 +2406,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
bool skip_triggers)
{
struct bch_fs *c = trans->c;
- struct btree_iter iter2 = { NULL };
+ struct btree_iter iter2 = {};
struct btree *parent;
int ret;
@@ -2430,7 +2430,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
parent = btree_node_parent(btree_iter_path(trans, iter), b);
if (parent) {
- bch2_trans_copy_iter(&iter2, iter);
+ bch2_trans_copy_iter(trans, &iter2, iter);
iter2.path = bch2_btree_path_make_mut(trans, iter2.path,
iter2.flags & BTREE_ITER_intent,
@@ -2444,7 +2444,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
trans->paths_sorted = false;
- ret = bch2_btree_iter_traverse(&iter2) ?:
+ ret = bch2_btree_iter_traverse(trans, &iter2) ?:
bch2_trans_update(trans, &iter2, new_key, BTREE_TRIGGER_norun);
if (ret)
goto err;
diff --git a/fs/bcachefs/btree_write_buffer.c b/fs/bcachefs/btree_write_buffer.c
index 2c09d19dd621..adbe576ec77e 100644
--- a/fs/bcachefs/btree_write_buffer.c
+++ b/fs/bcachefs/btree_write_buffer.c
@@ -144,7 +144,7 @@ static inline int wb_flush_one(struct btree_trans *trans, struct btree_iter *ite
EBUG_ON(!trans->c->btree_write_buffer.flushing.pin.seq);
EBUG_ON(trans->c->btree_write_buffer.flushing.pin.seq > wb->journal_seq);
- ret = bch2_btree_iter_traverse(iter);
+ ret = bch2_btree_iter_traverse(trans, iter);
if (ret)
return ret;
@@ -208,7 +208,7 @@ btree_write_buffered_insert(struct btree_trans *trans,
trans->journal_res.seq = wb->journal_seq;
- ret = bch2_btree_iter_traverse(&iter) ?:
+ ret = bch2_btree_iter_traverse(trans, &iter) ?:
bch2_trans_update(trans, &iter, &wb->k,
BTREE_UPDATE_internal_snapshot_node);
bch2_trans_iter_exit(trans, &iter);
@@ -285,7 +285,7 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
struct bch_fs *c = trans->c;
struct journal *j = &c->journal;
struct btree_write_buffer *wb = &c->btree_write_buffer;
- struct btree_iter iter = { NULL };
+ struct btree_iter iter = {};
size_t overwritten = 0, fast = 0, slowpath = 0, could_not_insert = 0;
bool write_locked = false;
bool accounting_replay_done = test_bit(BCH_FS_accounting_replay_done, &c->flags);
@@ -368,7 +368,7 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
write_locked = false;
ret = lockrestart_do(trans,
- bch2_btree_iter_traverse(&iter) ?:
+ bch2_btree_iter_traverse(trans, &iter) ?:
bch2_foreground_maybe_merge(trans, iter.path, 0,
BCH_WATERMARK_reclaim|
BCH_TRANS_COMMIT_journal_reclaim|
@@ -385,7 +385,7 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
BTREE_ITER_intent|BTREE_ITER_all_snapshots);
}
- bch2_btree_iter_set_pos(&iter, k->k.k.p);
+ bch2_btree_iter_set_pos(trans, &iter, k->k.k.p);
btree_iter_path(trans, &iter)->preserve = false;
bool accounting_accumulated = false;
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index 0903311cc71e..fea61e60a9ee 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -30,6 +30,12 @@
void bch2_dev_usage_read_fast(struct bch_dev *ca, struct bch_dev_usage *usage)
{
+ for (unsigned i = 0; i < BCH_DATA_NR; i++)
+ usage->buckets[i] = percpu_u64_get(&ca->usage->d[i].buckets);
+}
+
+void bch2_dev_usage_full_read_fast(struct bch_dev *ca, struct bch_dev_usage_full *usage)
+{
memset(usage, 0, sizeof(*usage));
acc_u64s_percpu((u64 *) usage, (u64 __percpu *) ca->usage, dev_usage_u64s());
}
@@ -75,7 +81,7 @@ bch2_fs_usage_read_short(struct bch_fs *c)
void bch2_dev_usage_to_text(struct printbuf *out,
struct bch_dev *ca,
- struct bch_dev_usage *usage)
+ struct bch_dev_usage_full *usage)
{
if (out->nr_tabstops < 5) {
printbuf_tabstops_reset(out);
@@ -365,7 +371,7 @@ found:
struct btree_iter iter;
bch2_trans_node_iter_init(trans, &iter, btree, new->k.p, 0, level,
BTREE_ITER_intent|BTREE_ITER_all_snapshots);
- ret = bch2_btree_iter_traverse(&iter) ?:
+ ret = bch2_btree_iter_traverse(trans, &iter) ?:
bch2_trans_update(trans, &iter, new,
BTREE_UPDATE_internal_snapshot_node|
BTREE_TRIGGER_norun);
@@ -707,7 +713,7 @@ err:
struct disk_accounting_pos acc;
memset(&acc, 0, sizeof(acc));
acc.type = BCH_DISK_ACCOUNTING_replicas;
- memcpy(&acc.replicas, &m->r.e, replicas_entry_bytes(&m->r.e));
+ unsafe_memcpy(&acc.replicas, &m->r.e, replicas_entry_bytes(&m->r.e), "VLA");
gc_stripe_unlock(m);
acc.replicas.data_type = data_type;
@@ -1132,7 +1138,7 @@ int bch2_trans_mark_dev_sbs_flags(struct bch_fs *c,
for_each_online_member(c, ca) {
int ret = bch2_trans_mark_dev_sb(c, ca, flags);
if (ret) {
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
return ret;
}
}
@@ -1331,7 +1337,7 @@ void bch2_dev_buckets_free(struct bch_dev *ca)
int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
{
- ca->usage = alloc_percpu(struct bch_dev_usage);
+ ca->usage = alloc_percpu(struct bch_dev_usage_full);
if (!ca->usage)
return -BCH_ERR_ENOMEM_usage_init;
diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h
index c5363256e363..1c38b165f48b 100644
--- a/fs/bcachefs/buckets.h
+++ b/fs/bcachefs/buckets.h
@@ -172,7 +172,16 @@ static inline struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca)
return ret;
}
-void bch2_dev_usage_to_text(struct printbuf *, struct bch_dev *, struct bch_dev_usage *);
+void bch2_dev_usage_full_read_fast(struct bch_dev *, struct bch_dev_usage_full *);
+static inline struct bch_dev_usage_full bch2_dev_usage_full_read(struct bch_dev *ca)
+{
+ struct bch_dev_usage_full ret;
+
+ bch2_dev_usage_full_read_fast(ca, &ret);
+ return ret;
+}
+
+void bch2_dev_usage_to_text(struct printbuf *, struct bch_dev *, struct bch_dev_usage_full *);
static inline u64 bch2_dev_buckets_reserved(struct bch_dev *ca, enum bch_watermark watermark)
{
@@ -207,7 +216,7 @@ static inline u64 dev_buckets_free(struct bch_dev *ca,
enum bch_watermark watermark)
{
return max_t(s64, 0,
- usage.d[BCH_DATA_free].buckets -
+ usage.buckets[BCH_DATA_free]-
ca->nr_open_buckets -
bch2_dev_buckets_reserved(ca, watermark));
}
@@ -217,10 +226,10 @@ static inline u64 __dev_buckets_available(struct bch_dev *ca,
enum bch_watermark watermark)
{
return max_t(s64, 0,
- usage.d[BCH_DATA_free].buckets
- + usage.d[BCH_DATA_cached].buckets
- + usage.d[BCH_DATA_need_gc_gens].buckets
- + usage.d[BCH_DATA_need_discard].buckets
+ usage.buckets[BCH_DATA_free]
+ + usage.buckets[BCH_DATA_cached]
+ + usage.buckets[BCH_DATA_need_gc_gens]
+ + usage.buckets[BCH_DATA_need_discard]
- ca->nr_open_buckets
- bch2_dev_buckets_reserved(ca, watermark));
}
diff --git a/fs/bcachefs/buckets_types.h b/fs/bcachefs/buckets_types.h
index 900b8680c8b5..0aed2500ade3 100644
--- a/fs/bcachefs/buckets_types.h
+++ b/fs/bcachefs/buckets_types.h
@@ -54,7 +54,12 @@ struct bucket_gens {
u8 b[] __counted_by(nbuckets);
};
+/* Only info on bucket countns: */
struct bch_dev_usage {
+ u64 buckets[BCH_DATA_NR];
+};
+
+struct bch_dev_usage_full {
struct bch_dev_usage_type {
u64 buckets;
u64 sectors; /* _compressed_ sectors: */
diff --git a/fs/bcachefs/chardev.c b/fs/bcachefs/chardev.c
index 584f4a3eb670..5891b3a1e61c 100644
--- a/fs/bcachefs/chardev.c
+++ b/fs/bcachefs/chardev.c
@@ -350,8 +350,8 @@ static ssize_t bch2_data_job_read(struct file *file, char __user *buf,
if (ctx->arg.op == BCH_DATA_OP_scrub) {
struct bch_dev *ca = bch2_dev_tryget(c, ctx->arg.scrub.dev);
if (ca) {
- struct bch_dev_usage u;
- bch2_dev_usage_read_fast(ca, &u);
+ struct bch_dev_usage_full u;
+ bch2_dev_usage_full_read_fast(ca, &u);
for (unsigned i = BCH_DATA_btree; i < ARRAY_SIZE(u.d); i++)
if (ctx->arg.scrub.data_types & BIT(i))
e.p.sectors_total += u.d[i].sectors;
@@ -473,7 +473,7 @@ static long bch2_ioctl_dev_usage(struct bch_fs *c,
struct bch_ioctl_dev_usage __user *user_arg)
{
struct bch_ioctl_dev_usage arg;
- struct bch_dev_usage src;
+ struct bch_dev_usage_full src;
struct bch_dev *ca;
unsigned i;
@@ -493,7 +493,7 @@ static long bch2_ioctl_dev_usage(struct bch_fs *c,
if (IS_ERR(ca))
return PTR_ERR(ca);
- src = bch2_dev_usage_read(ca);
+ src = bch2_dev_usage_full_read(ca);
arg.state = ca->mi.state;
arg.bucket_size = ca->mi.bucket_size;
@@ -514,7 +514,7 @@ static long bch2_ioctl_dev_usage_v2(struct bch_fs *c,
struct bch_ioctl_dev_usage_v2 __user *user_arg)
{
struct bch_ioctl_dev_usage_v2 arg;
- struct bch_dev_usage src;
+ struct bch_dev_usage_full src;
struct bch_dev *ca;
int ret = 0;
@@ -534,7 +534,7 @@ static long bch2_ioctl_dev_usage_v2(struct bch_fs *c,
if (IS_ERR(ca))
return PTR_ERR(ca);
- src = bch2_dev_usage_read(ca);
+ src = bch2_dev_usage_full_read(ca);
arg.state = ca->mi.state;
arg.bucket_size = ca->mi.bucket_size;
@@ -615,7 +615,7 @@ static long bch2_ioctl_disk_get_idx(struct bch_fs *c,
for_each_online_member(c, ca)
if (ca->dev == dev) {
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
return ca->dev_idx;
}
diff --git a/fs/bcachefs/compress.c b/fs/bcachefs/compress.c
index 85fc90342492..28ed32449913 100644
--- a/fs/bcachefs/compress.c
+++ b/fs/bcachefs/compress.c
@@ -371,13 +371,14 @@ static int attempt_compress(struct bch_fs *c,
};
zlib_set_workspace(&strm, workspace);
- zlib_deflateInit2(&strm,
+ if (zlib_deflateInit2(&strm,
compression.level
? clamp_t(unsigned, compression.level,
Z_BEST_SPEED, Z_BEST_COMPRESSION)
: Z_DEFAULT_COMPRESSION,
Z_DEFLATED, -MAX_WBITS, DEF_MEM_LEVEL,
- Z_DEFAULT_STRATEGY);
+ Z_DEFAULT_STRATEGY) != Z_OK)
+ return 0;
if (zlib_deflate(&strm, Z_FINISH) != Z_STREAM_END)
return 0;
diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c
index fe400dfc5d76..de02ebf847ec 100644
--- a/fs/bcachefs/data_update.c
+++ b/fs/bcachefs/data_update.c
@@ -216,7 +216,7 @@ static int __bch2_data_update_index_update(struct btree_trans *trans,
bch2_trans_begin(trans);
- k = bch2_btree_iter_peek_slot(&iter);
+ k = bch2_btree_iter_peek_slot(trans, &iter);
ret = bkey_err(k);
if (ret)
goto err;
@@ -398,7 +398,7 @@ restart_drop_extra_replicas:
BCH_TRANS_COMMIT_no_enospc|
m->data_opts.btree_insert_flags);
if (!ret) {
- bch2_btree_iter_set_pos(&iter, next_pos);
+ bch2_btree_iter_set_pos(trans, &iter, next_pos);
this_cpu_add(c->counters[BCH_COUNTER_io_move_finish], new->k.size);
if (trace_io_move_finish_enabled())
@@ -426,7 +426,7 @@ nowork:
count_event(c, io_move_fail);
- bch2_btree_iter_advance(&iter);
+ bch2_btree_iter_advance(trans, &iter);
goto next;
}
out:
@@ -497,7 +497,7 @@ static int bch2_update_unwritten_extent(struct btree_trans *trans,
bch2_trans_iter_init(trans, &iter, update->btree_id, update->op.pos,
BTREE_ITER_slots);
ret = lockrestart_do(trans, ({
- k = bch2_btree_iter_peek_slot(&iter);
+ k = bch2_btree_iter_peek_slot(trans, &iter);
bkey_err(k);
}));
bch2_trans_iter_exit(trans, &iter);
diff --git a/fs/bcachefs/debug.c b/fs/bcachefs/debug.c
index 788af88f6979..5a8bc7013512 100644
--- a/fs/bcachefs/debug.c
+++ b/fs/bcachefs/debug.c
@@ -57,7 +57,7 @@ static bool bch2_btree_verify_replica(struct bch_fs *c, struct btree *b,
submit_bio_wait(bio);
bio_put(bio);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
memcpy(n_ondisk, n_sorted, btree_buf_bytes(b));
@@ -297,7 +297,7 @@ out:
if (bio)
bio_put(bio);
kvfree(n_ondisk);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
}
#ifdef CONFIG_DEBUG_FS
diff --git a/fs/bcachefs/dirent.c b/fs/bcachefs/dirent.c
index d7f9f79318a2..bf53a029f356 100644
--- a/fs/bcachefs/dirent.c
+++ b/fs/bcachefs/dirent.c
@@ -417,8 +417,8 @@ int bch2_dirent_rename(struct btree_trans *trans,
enum bch_rename_mode mode)
{
struct qstr src_name_lookup, dst_name_lookup;
- struct btree_iter src_iter = { NULL };
- struct btree_iter dst_iter = { NULL };
+ struct btree_iter src_iter = {};
+ struct btree_iter dst_iter = {};
struct bkey_s_c old_src, old_dst = bkey_s_c_null;
struct bkey_i_dirent *new_src = NULL, *new_dst = NULL;
struct bpos dst_pos =
@@ -586,16 +586,16 @@ out_set_src:
}
if (delete_src) {
- bch2_btree_iter_set_snapshot(&src_iter, old_src.k->p.snapshot);
- ret = bch2_btree_iter_traverse(&src_iter) ?:
+ bch2_btree_iter_set_snapshot(trans, &src_iter, old_src.k->p.snapshot);
+ ret = bch2_btree_iter_traverse(trans, &src_iter) ?:
bch2_btree_delete_at(trans, &src_iter, BTREE_UPDATE_internal_snapshot_node);
if (ret)
goto out;
}
if (delete_dst) {
- bch2_btree_iter_set_snapshot(&dst_iter, old_dst.k->p.snapshot);
- ret = bch2_btree_iter_traverse(&dst_iter) ?:
+ bch2_btree_iter_set_snapshot(trans, &dst_iter, old_dst.k->p.snapshot);
+ ret = bch2_btree_iter_traverse(trans, &dst_iter) ?:
bch2_btree_delete_at(trans, &dst_iter, BTREE_UPDATE_internal_snapshot_node);
if (ret)
goto out;
@@ -642,7 +642,7 @@ u64 bch2_dirent_lookup(struct bch_fs *c, subvol_inum dir,
const struct qstr *name, subvol_inum *inum)
{
struct btree_trans *trans = bch2_trans_get(c);
- struct btree_iter iter = { NULL };
+ struct btree_iter iter = {};
int ret = lockrestart_do(trans,
bch2_dirent_lookup_trans(trans, &iter, dir, hash_info, name, inum, 0));
@@ -771,7 +771,7 @@ int bch2_fsck_remove_dirent(struct btree_trans *trans, struct bpos pos)
bch2_trans_iter_init(trans, &iter, BTREE_ID_dirents, pos, BTREE_ITER_intent);
- ret = bch2_btree_iter_traverse(&iter) ?:
+ ret = bch2_btree_iter_traverse(trans, &iter) ?:
bch2_hash_delete_at(trans, bch2_dirent_hash_desc,
&dir_hash_info, &iter,
BTREE_UPDATE_internal_snapshot_node);
diff --git a/fs/bcachefs/disk_accounting.c b/fs/bcachefs/disk_accounting.c
index a59f6c12529b..b007319b72e9 100644
--- a/fs/bcachefs/disk_accounting.c
+++ b/fs/bcachefs/disk_accounting.c
@@ -739,7 +739,7 @@ int bch2_accounting_read(struct bch_fs *c)
struct disk_accounting_pos next;
memset(&next, 0, sizeof(next));
next.type = acc_k.type + 1;
- bch2_btree_iter_set_pos(&iter, disk_accounting_pos_to_bpos(&next));
+ bch2_btree_iter_set_pos(trans, &iter, disk_accounting_pos_to_bpos(&next));
continue;
}
@@ -930,7 +930,7 @@ void bch2_verify_accounting_clean(struct bch_fs *c)
struct disk_accounting_pos next;
memset(&next, 0, sizeof(next));
next.type = acc_k.type + 1;
- bch2_btree_iter_set_pos(&iter, disk_accounting_pos_to_bpos(&next));
+ bch2_btree_iter_set_pos(trans, &iter, disk_accounting_pos_to_bpos(&next));
continue;
}
diff --git a/fs/bcachefs/disk_groups.c b/fs/bcachefs/disk_groups.c
index 5df8de0b8c02..1186280b29e9 100644
--- a/fs/bcachefs/disk_groups.c
+++ b/fs/bcachefs/disk_groups.c
@@ -555,9 +555,9 @@ void bch2_target_to_text(struct printbuf *out, struct bch_fs *c, unsigned v)
? rcu_dereference(c->devs[t.dev])
: NULL;
- if (ca && percpu_ref_tryget(&ca->io_ref)) {
+ if (ca && percpu_ref_tryget(&ca->io_ref[READ])) {
prt_printf(out, "/dev/%s", ca->name);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
} else if (ca) {
prt_printf(out, "offline device %u", t.dev);
} else {
diff --git a/fs/bcachefs/ec.c b/fs/bcachefs/ec.c
index 6faeda7ad03d..a396865e8b17 100644
--- a/fs/bcachefs/ec.c
+++ b/fs/bcachefs/ec.c
@@ -105,6 +105,7 @@ struct ec_bio {
struct bch_dev *ca;
struct ec_stripe_buf *buf;
size_t idx;
+ int rw;
u64 submit_time;
struct bio bio;
};
@@ -462,7 +463,8 @@ int bch2_trigger_stripe(struct btree_trans *trans,
return ret;
if (gc)
- memcpy(&gc->r.e, &acc.replicas, replicas_entry_bytes(&acc.replicas));
+ unsafe_memcpy(&gc->r.e, &acc.replicas,
+ replicas_entry_bytes(&acc.replicas), "VLA");
}
if (old_s) {
@@ -703,6 +705,7 @@ static void ec_block_endio(struct bio *bio)
struct bch_extent_ptr *ptr = &v->ptrs[ec_bio->idx];
struct bch_dev *ca = ec_bio->ca;
struct closure *cl = bio->bi_private;
+ int rw = ec_bio->rw;
bch2_account_io_completion(ca, bio_data_dir(bio),
ec_bio->submit_time, !bio->bi_status);
@@ -724,7 +727,7 @@ static void ec_block_endio(struct bio *bio)
}
bio_put(&ec_bio->bio);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[rw]);
closure_put(cl);
}
@@ -775,6 +778,7 @@ static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
ec_bio->ca = ca;
ec_bio->buf = buf;
ec_bio->idx = idx;
+ ec_bio->rw = rw;
ec_bio->submit_time = local_clock();
ec_bio->bio.bi_iter.bi_sector = ptr->offset + buf->offset + (offset >> 9);
@@ -784,14 +788,14 @@ static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
bch2_bio_map(&ec_bio->bio, buf->data[idx] + offset, b);
closure_get(cl);
- percpu_ref_get(&ca->io_ref);
+ percpu_ref_get(&ca->io_ref[rw]);
submit_bio(&ec_bio->bio);
offset += b;
}
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[rw]);
}
static int get_stripe_key_trans(struct btree_trans *trans, u64 idx,
@@ -1264,7 +1268,7 @@ static void zero_out_rest_of_ec_bucket(struct bch_fs *c,
ob->sectors_free,
GFP_KERNEL, 0);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[WRITE]);
if (ret)
s->err = ret;
@@ -1836,7 +1840,7 @@ static int __get_existing_stripe(struct btree_trans *trans,
ret = 1;
}
out:
- bch2_set_btree_iter_dontneed(&iter);
+ bch2_set_btree_iter_dontneed(trans, &iter);
err:
bch2_trans_iter_exit(trans, &iter);
return ret;
@@ -1949,7 +1953,7 @@ static int __bch2_ec_stripe_head_reserve(struct btree_trans *trans, struct ec_st
if (bkey_gt(k.k->p, POS(0, U32_MAX))) {
if (start_pos.offset) {
start_pos = min_pos;
- bch2_btree_iter_set_pos(&iter, start_pos);
+ bch2_btree_iter_set_pos(trans, &iter, start_pos);
continue;
}
diff --git a/fs/bcachefs/error.c b/fs/bcachefs/error.c
index d4dfd13a8076..baf5dfb32298 100644
--- a/fs/bcachefs/error.c
+++ b/fs/bcachefs/error.c
@@ -34,7 +34,7 @@ bool __bch2_inconsistent_error(struct bch_fs *c, struct printbuf *out)
journal_cur_seq(&c->journal));
return true;
case BCH_ON_ERROR_panic:
- bch2_print_string_as_lines(KERN_ERR, out->buf);
+ bch2_print_string_as_lines_nonblocking(KERN_ERR, out->buf);
panic(bch2_fmt(c, "panic after error"));
return true;
default:
@@ -45,6 +45,8 @@ bool __bch2_inconsistent_error(struct bch_fs *c, struct printbuf *out)
bool bch2_inconsistent_error(struct bch_fs *c)
{
struct printbuf buf = PRINTBUF;
+ buf.atomic++;
+
printbuf_indent_add_nextline(&buf, 2);
bool ret = __bch2_inconsistent_error(c, &buf);
@@ -59,6 +61,7 @@ static bool bch2_fs_trans_inconsistent(struct bch_fs *c, struct btree_trans *tra
const char *fmt, va_list args)
{
struct printbuf buf = PRINTBUF;
+ buf.atomic++;
bch2_log_msg_start(c, &buf);
@@ -68,7 +71,7 @@ static bool bch2_fs_trans_inconsistent(struct bch_fs *c, struct btree_trans *tra
if (trans)
bch2_trans_updates_to_text(&buf, trans);
bool ret = __bch2_inconsistent_error(c, &buf);
- bch2_print_string_as_lines(KERN_ERR, buf.buf);
+ bch2_print_string_as_lines_nonblocking(KERN_ERR, buf.buf);
printbuf_exit(&buf);
return ret;
diff --git a/fs/bcachefs/extent_update.c b/fs/bcachefs/extent_update.c
index 6aac579a692a..6bb42985306e 100644
--- a/fs/bcachefs/extent_update.c
+++ b/fs/bcachefs/extent_update.c
@@ -112,7 +112,7 @@ int bch2_extent_atomic_end(struct btree_trans *trans,
unsigned nr_iters = 0;
int ret;
- ret = bch2_btree_iter_traverse(iter);
+ ret = bch2_btree_iter_traverse(trans, iter);
if (ret)
return ret;
@@ -126,9 +126,9 @@ int bch2_extent_atomic_end(struct btree_trans *trans,
if (ret < 0)
return ret;
- bch2_trans_copy_iter(&copy, iter);
+ bch2_trans_copy_iter(trans, &copy, iter);
- for_each_btree_key_max_continue_norestart(copy, insert->k.p, 0, k, ret) {
+ for_each_btree_key_max_continue_norestart(trans, copy, insert->k.p, 0, k, ret) {
unsigned offset = 0;
if (bkey_gt(bkey_start_pos(&insert->k), bkey_start_pos(k.k)))
diff --git a/fs/bcachefs/fs-io-buffered.c b/fs/bcachefs/fs-io-buffered.c
index a03e2c780cba..19d4599918dc 100644
--- a/fs/bcachefs/fs-io-buffered.c
+++ b/fs/bcachefs/fs-io-buffered.c
@@ -183,12 +183,12 @@ static void bchfs_read(struct btree_trans *trans,
if (ret)
goto err;
- bch2_btree_iter_set_snapshot(&iter, snapshot);
+ bch2_btree_iter_set_snapshot(trans, &iter, snapshot);
- bch2_btree_iter_set_pos(&iter,
+ bch2_btree_iter_set_pos(trans, &iter,
POS(inum.inum, rbio->bio.bi_iter.bi_sector));
- k = bch2_btree_iter_peek_slot(&iter);
+ k = bch2_btree_iter_peek_slot(trans, &iter);
ret = bkey_err(k);
if (ret)
goto err;
diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c
index c80ed3a54e70..65c2c33d253d 100644
--- a/fs/bcachefs/fs-io.c
+++ b/fs/bcachefs/fs-io.c
@@ -48,7 +48,7 @@ static void nocow_flush_endio(struct bio *_bio)
struct nocow_flush *bio = container_of(_bio, struct nocow_flush, bio);
closure_put(bio->cl);
- percpu_ref_put(&bio->ca->io_ref);
+ percpu_ref_put(&bio->ca->io_ref[WRITE]);
bio_put(&bio->bio);
}
@@ -71,7 +71,7 @@ void bch2_inode_flush_nocow_writes_async(struct bch_fs *c,
for_each_set_bit(dev, devs.d, BCH_SB_MEMBERS_MAX) {
rcu_read_lock();
ca = rcu_dereference(c->devs[dev]);
- if (ca && !percpu_ref_tryget(&ca->io_ref))
+ if (ca && !percpu_ref_tryget(&ca->io_ref[WRITE]))
ca = NULL;
rcu_read_unlock();
@@ -636,9 +636,9 @@ static noinline int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
if (ret)
goto bkey_err;
- bch2_btree_iter_set_snapshot(&iter, snapshot);
+ bch2_btree_iter_set_snapshot(trans, &iter, snapshot);
- k = bch2_btree_iter_peek_slot(&iter);
+ k = bch2_btree_iter_peek_slot(trans, &iter);
if ((ret = bkey_err(k)))
goto bkey_err;
@@ -649,13 +649,13 @@ static noinline int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
/* already reserved */
if (bkey_extent_is_reservation(k) &&
bch2_bkey_nr_ptrs_fully_allocated(k) >= opts.data_replicas) {
- bch2_btree_iter_advance(&iter);
+ bch2_btree_iter_advance(trans, &iter);
continue;
}
if (bkey_extent_is_data(k.k) &&
!(mode & FALLOC_FL_ZERO_RANGE)) {
- bch2_btree_iter_advance(&iter);
+ bch2_btree_iter_advance(trans, &iter);
continue;
}
@@ -676,7 +676,7 @@ static noinline int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
if (ret)
goto bkey_err;
}
- bch2_btree_iter_set_pos(&iter, POS(iter.pos.inode, hole_start));
+ bch2_btree_iter_set_pos(trans, &iter, POS(iter.pos.inode, hole_start));
if (ret)
goto bkey_err;
diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c
index fc834bdf1f52..5a41b1a8e54f 100644
--- a/fs/bcachefs/fs.c
+++ b/fs/bcachefs/fs.c
@@ -88,7 +88,7 @@ int __must_check bch2_write_inode(struct bch_fs *c,
void *p, unsigned fields)
{
struct btree_trans *trans = bch2_trans_get(c);
- struct btree_iter iter = { NULL };
+ struct btree_iter iter = {};
struct bch_inode_unpacked inode_u;
int ret;
retry:
@@ -1075,7 +1075,7 @@ int bch2_setattr_nonsize(struct mnt_idmap *idmap,
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct bch_qid qid;
struct btree_trans *trans;
- struct btree_iter inode_iter = { NULL };
+ struct btree_iter inode_iter = {};
struct bch_inode_unpacked inode_u;
struct posix_acl *acl = NULL;
kuid_t kuid;
@@ -1330,9 +1330,9 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info,
if (ret)
continue;
- bch2_btree_iter_set_snapshot(&iter, snapshot);
+ bch2_btree_iter_set_snapshot(trans, &iter, snapshot);
- k = bch2_btree_iter_peek_max(&iter, end);
+ k = bch2_btree_iter_peek_max(trans, &iter, end);
ret = bkey_err(k);
if (ret)
continue;
@@ -1342,7 +1342,7 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info,
if (!bkey_extent_is_data(k.k) &&
k.k->type != KEY_TYPE_reservation) {
- bch2_btree_iter_advance(&iter);
+ bch2_btree_iter_advance(trans, &iter);
continue;
}
@@ -1380,7 +1380,7 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info,
bkey_copy(prev.k, cur.k);
have_extent = true;
- bch2_btree_iter_set_pos(&iter,
+ bch2_btree_iter_set_pos(trans, &iter,
POS(iter.pos.inode, iter.pos.offset + sectors));
}
bch2_trans_iter_exit(trans, &iter);
@@ -1697,17 +1697,17 @@ retry:
if (ret)
goto err;
- bch2_btree_iter_set_snapshot(&iter1, snapshot);
- bch2_btree_iter_set_snapshot(&iter2, snapshot);
+ bch2_btree_iter_set_snapshot(trans, &iter1, snapshot);
+ bch2_btree_iter_set_snapshot(trans, &iter2, snapshot);
ret = bch2_inode_find_by_inum_trans(trans, inode_inum(inode), &inode_u);
if (ret)
goto err;
if (inode_u.bi_dir == dir->ei_inode.bi_inum) {
- bch2_btree_iter_set_pos(&iter1, POS(inode_u.bi_dir, inode_u.bi_dir_offset));
+ bch2_btree_iter_set_pos(trans, &iter1, POS(inode_u.bi_dir, inode_u.bi_dir_offset));
- k = bch2_btree_iter_peek_slot(&iter1);
+ k = bch2_btree_iter_peek_slot(trans, &iter1);
ret = bkey_err(k);
if (ret)
goto err;
@@ -1731,7 +1731,7 @@ retry:
* File with multiple hardlinks and our backref is to the wrong
* directory - linear search:
*/
- for_each_btree_key_continue_norestart(iter2, 0, k, ret) {
+ for_each_btree_key_continue_norestart(trans, iter2, 0, k, ret) {
if (k.k->p.inode > dir->ei_inode.bi_inum)
break;
@@ -2237,7 +2237,7 @@ got_sb:
/* XXX: create an anonymous device for multi device filesystems */
sb->s_bdev = bdev;
sb->s_dev = bdev->bd_dev;
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
break;
}
diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c
index 52320295dcf6..18308f3d64a1 100644
--- a/fs/bcachefs/fsck.c
+++ b/fs/bcachefs/fsck.c
@@ -186,7 +186,7 @@ static int lookup_lostfound(struct btree_trans *trans, u32 snapshot,
{
struct bch_fs *c = trans->c;
struct qstr lostfound_str = QSTR("lost+found");
- struct btree_iter lostfound_iter = { NULL };
+ struct btree_iter lostfound_iter = {};
u64 inum = 0;
unsigned d_type = 0;
int ret;
@@ -295,8 +295,8 @@ create_lostfound:
if (ret)
goto err;
- bch2_btree_iter_set_snapshot(&lostfound_iter, snapshot);
- ret = bch2_btree_iter_traverse(&lostfound_iter);
+ bch2_btree_iter_set_snapshot(trans, &lostfound_iter, snapshot);
+ ret = bch2_btree_iter_traverse(trans, &lostfound_iter);
if (ret)
goto err;
@@ -544,7 +544,7 @@ static int reconstruct_subvol(struct btree_trans *trans, u32 snapshotid, u32 sub
new_inode.bi_subvol = subvolid;
int ret = bch2_inode_create(trans, &inode_iter, &new_inode, snapshotid, cpu) ?:
- bch2_btree_iter_traverse(&inode_iter) ?:
+ bch2_btree_iter_traverse(trans, &inode_iter) ?:
bch2_inode_write(trans, &inode_iter, &new_inode);
bch2_trans_iter_exit(trans, &inode_iter);
if (ret)
@@ -609,7 +609,7 @@ static int reconstruct_inode(struct btree_trans *trans, enum btree_id btree, u32
struct btree_iter iter = {};
bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, SPOS(inum, U64_MAX, snapshot), 0);
- struct bkey_s_c k = bch2_btree_iter_peek_prev_min(&iter, POS(inum, 0));
+ struct bkey_s_c k = bch2_btree_iter_peek_prev_min(trans, &iter, POS(inum, 0));
bch2_trans_iter_exit(trans, &iter);
int ret = bkey_err(k);
if (ret)
@@ -1557,7 +1557,7 @@ static int overlapping_extents_found(struct btree_trans *trans,
{
struct bch_fs *c = trans->c;
struct printbuf buf = PRINTBUF;
- struct btree_iter iter1, iter2 = { NULL };
+ struct btree_iter iter1, iter2 = {};
struct bkey_s_c k1, k2;
int ret;
@@ -1566,7 +1566,7 @@ static int overlapping_extents_found(struct btree_trans *trans,
bch2_trans_iter_init(trans, &iter1, btree, pos1,
BTREE_ITER_all_snapshots|
BTREE_ITER_not_extents);
- k1 = bch2_btree_iter_peek_max(&iter1, POS(pos1.inode, U64_MAX));
+ k1 = bch2_btree_iter_peek_max(trans, &iter1, POS(pos1.inode, U64_MAX));
ret = bkey_err(k1);
if (ret)
goto err;
@@ -1586,12 +1586,12 @@ static int overlapping_extents_found(struct btree_trans *trans,
goto err;
}
- bch2_trans_copy_iter(&iter2, &iter1);
+ bch2_trans_copy_iter(trans, &iter2, &iter1);
while (1) {
- bch2_btree_iter_advance(&iter2);
+ bch2_btree_iter_advance(trans, &iter2);
- k2 = bch2_btree_iter_peek_max(&iter2, POS(pos1.inode, U64_MAX));
+ k2 = bch2_btree_iter_peek_max(trans, &iter2, POS(pos1.inode, U64_MAX));
ret = bkey_err(k2);
if (ret)
goto err;
@@ -1791,9 +1791,9 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
struct btree_iter iter2;
- bch2_trans_copy_iter(&iter2, iter);
- bch2_btree_iter_set_snapshot(&iter2, i->snapshot);
- ret = bch2_btree_iter_traverse(&iter2) ?:
+ bch2_trans_copy_iter(trans, &iter2, iter);
+ bch2_btree_iter_set_snapshot(trans, &iter2, i->snapshot);
+ ret = bch2_btree_iter_traverse(trans, &iter2) ?:
bch2_btree_delete_at(trans, &iter2,
BTREE_UPDATE_internal_snapshot_node);
bch2_trans_iter_exit(trans, &iter2);
@@ -2185,7 +2185,7 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
BTREE_ID_dirents,
SPOS(k.k->p.inode, k.k->p.offset, *i),
BTREE_ITER_intent);
- ret = bch2_btree_iter_traverse(&delete_iter) ?:
+ ret = bch2_btree_iter_traverse(trans, &delete_iter) ?:
bch2_hash_delete_at(trans, bch2_dirent_hash_desc,
hash_info,
&delete_iter,
@@ -2412,7 +2412,7 @@ static int check_subvol_path(struct btree_trans *trans, struct btree_iter *iter,
bch2_trans_iter_exit(trans, &parent_iter);
bch2_trans_iter_init(trans, &parent_iter,
BTREE_ID_subvolumes, POS(0, parent), 0);
- k = bch2_btree_iter_peek_slot(&parent_iter);
+ k = bch2_btree_iter_peek_slot(trans, &parent_iter);
ret = bkey_err(k);
if (ret)
goto err;
diff --git a/fs/bcachefs/inode.c b/fs/bcachefs/inode.c
index 80051073f613..b51d98cf8a80 100644
--- a/fs/bcachefs/inode.c
+++ b/fs/bcachefs/inode.c
@@ -940,7 +940,7 @@ int bch2_inode_create(struct btree_trans *trans,
BTREE_ITER_intent);
struct bkey_s_c k;
again:
- while ((k = bch2_btree_iter_peek(iter)).k &&
+ while ((k = bch2_btree_iter_peek(trans, iter)).k &&
!(ret = bkey_err(k)) &&
bkey_lt(k.k->p, POS(0, max))) {
if (pos < iter->pos.offset)
@@ -951,7 +951,7 @@ again:
* we've found just one:
*/
pos = iter->pos.offset + 1;
- bch2_btree_iter_set_pos(iter, POS(0, pos));
+ bch2_btree_iter_set_pos(trans, iter, POS(0, pos));
}
if (!ret && pos < max)
@@ -967,12 +967,12 @@ again:
/* Retry from start */
pos = start = min;
- bch2_btree_iter_set_pos(iter, POS(0, pos));
+ bch2_btree_iter_set_pos(trans, iter, POS(0, pos));
le32_add_cpu(&cursor->v.gen, 1);
goto again;
found_slot:
- bch2_btree_iter_set_pos(iter, SPOS(0, pos, snapshot));
- k = bch2_btree_iter_peek_slot(iter);
+ bch2_btree_iter_set_pos(trans, iter, SPOS(0, pos, snapshot));
+ k = bch2_btree_iter_peek_slot(trans, iter);
ret = bkey_err(k);
if (ret) {
bch2_trans_iter_exit(trans, iter);
@@ -1009,9 +1009,9 @@ static int bch2_inode_delete_keys(struct btree_trans *trans,
if (ret)
goto err;
- bch2_btree_iter_set_snapshot(&iter, snapshot);
+ bch2_btree_iter_set_snapshot(trans, &iter, snapshot);
- k = bch2_btree_iter_peek_max(&iter, end);
+ k = bch2_btree_iter_peek_max(trans, &iter, end);
ret = bkey_err(k);
if (ret)
goto err;
@@ -1042,7 +1042,7 @@ err:
int bch2_inode_rm(struct bch_fs *c, subvol_inum inum)
{
struct btree_trans *trans = bch2_trans_get(c);
- struct btree_iter iter = { NULL };
+ struct btree_iter iter = {};
struct bkey_s_c k;
u32 snapshot;
int ret;
@@ -1207,7 +1207,7 @@ int bch2_inum_opts_get(struct btree_trans *trans, subvol_inum inum, struct bch_i
static noinline int __bch2_inode_rm_snapshot(struct btree_trans *trans, u64 inum, u32 snapshot)
{
struct bch_fs *c = trans->c;
- struct btree_iter iter = { NULL };
+ struct btree_iter iter = {};
struct bkey_i_inode_generation delete;
struct bch_inode_unpacked inode_u;
struct bkey_s_c k;
diff --git a/fs/bcachefs/io_misc.c b/fs/bcachefs/io_misc.c
index 6b842c8d21be..cc07729a4b62 100644
--- a/fs/bcachefs/io_misc.c
+++ b/fs/bcachefs/io_misc.c
@@ -43,7 +43,7 @@ int bch2_extent_fallocate(struct btree_trans *trans,
bch2_bkey_buf_init(&new);
closure_init_stack(&cl);
- k = bch2_btree_iter_peek_slot(iter);
+ k = bch2_btree_iter_peek_slot(trans, iter);
ret = bkey_err(k);
if (ret)
return ret;
@@ -164,12 +164,12 @@ int bch2_fpunch_at(struct btree_trans *trans, struct btree_iter *iter,
if (ret)
continue;
- bch2_btree_iter_set_snapshot(iter, snapshot);
+ bch2_btree_iter_set_snapshot(trans, iter, snapshot);
/*
* peek_max() doesn't have ideal semantics for extents:
*/
- k = bch2_btree_iter_peek_max(iter, end_pos);
+ k = bch2_btree_iter_peek_max(trans, iter, end_pos);
if (!k.k)
break;
@@ -230,7 +230,7 @@ static int truncate_set_isize(struct btree_trans *trans,
u64 new_i_size,
bool warn)
{
- struct btree_iter iter = { NULL };
+ struct btree_iter iter = {};
struct bch_inode_unpacked inode_u;
int ret;
@@ -399,7 +399,7 @@ case LOGGED_OP_FINSERT_start:
if (ret)
goto err;
} else {
- bch2_btree_iter_set_pos(&iter, POS(inum.inum, src_offset));
+ bch2_btree_iter_set_pos(trans, &iter, POS(inum.inum, src_offset));
ret = bch2_fpunch_at(trans, &iter, inum, src_offset + len, i_sectors_delta);
if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
@@ -425,12 +425,12 @@ case LOGGED_OP_FINSERT_shift_extents:
if (ret)
goto btree_err;
- bch2_btree_iter_set_snapshot(&iter, snapshot);
- bch2_btree_iter_set_pos(&iter, SPOS(inum.inum, pos, snapshot));
+ bch2_btree_iter_set_snapshot(trans, &iter, snapshot);
+ bch2_btree_iter_set_pos(trans, &iter, SPOS(inum.inum, pos, snapshot));
k = insert
- ? bch2_btree_iter_peek_prev_min(&iter, POS(inum.inum, 0))
- : bch2_btree_iter_peek_max(&iter, POS(inum.inum, U64_MAX));
+ ? bch2_btree_iter_peek_prev_min(trans, &iter, POS(inum.inum, 0))
+ : bch2_btree_iter_peek_max(trans, &iter, POS(inum.inum, U64_MAX));
if ((ret = bkey_err(k)))
goto btree_err;
diff --git a/fs/bcachefs/io_read.c b/fs/bcachefs/io_read.c
index fd01e67b3e84..417bb0c7bbfa 100644
--- a/fs/bcachefs/io_read.c
+++ b/fs/bcachefs/io_read.c
@@ -394,7 +394,7 @@ static inline struct bch_read_bio *bch2_rbio_free(struct bch_read_bio *rbio)
if (rbio->have_ioref) {
struct bch_dev *ca = bch2_dev_have_ref(rbio->c, rbio->pick.ptr.dev);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
}
if (rbio->split) {
@@ -909,7 +909,7 @@ static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans,
prt_printf(&buf, "memory gen: %u", gen);
- ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter)));
+ ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(trans, &iter)));
if (!ret) {
prt_newline(&buf);
bch2_bkey_val_to_text(&buf, c, k);
@@ -1003,7 +1003,7 @@ retry_pick:
unlikely(dev_ptr_stale(ca, &pick.ptr))) {
read_from_stale_dirty_pointer(trans, ca, k, pick.ptr);
bch2_mark_io_failure(failed, &pick, false);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
goto retry_pick;
}
@@ -1036,7 +1036,7 @@ retry_pick:
*/
if (pick.crc.compressed_size > u->op.wbio.bio.bi_iter.bi_size) {
if (ca)
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
rbio->ret = -BCH_ERR_data_read_buffer_too_small;
goto out_read_done;
}
@@ -1285,12 +1285,12 @@ int __bch2_read(struct btree_trans *trans, struct bch_read_bio *rbio,
if (ret)
goto err;
- bch2_btree_iter_set_snapshot(&iter, snapshot);
+ bch2_btree_iter_set_snapshot(trans, &iter, snapshot);
- bch2_btree_iter_set_pos(&iter,
+ bch2_btree_iter_set_pos(trans, &iter,
POS(inum.inum, bvec_iter.bi_sector));
- k = bch2_btree_iter_peek_slot(&iter);
+ k = bch2_btree_iter_peek_slot(trans, &iter);
ret = bkey_err(k);
if (ret)
goto err;
diff --git a/fs/bcachefs/io_write.c b/fs/bcachefs/io_write.c
index 07b55839768e..a418fa62f09d 100644
--- a/fs/bcachefs/io_write.c
+++ b/fs/bcachefs/io_write.c
@@ -168,9 +168,9 @@ int bch2_sum_sector_overwrites(struct btree_trans *trans,
*i_sectors_delta = 0;
*disk_sectors_delta = 0;
- bch2_trans_copy_iter(&iter, extent_iter);
+ bch2_trans_copy_iter(trans, &iter, extent_iter);
- for_each_btree_key_max_continue_norestart(iter,
+ for_each_btree_key_max_continue_norestart(trans, iter,
new->k.p, BTREE_ITER_slots, old, ret) {
s64 sectors = min(new->k.p.offset, old.k->p.offset) -
max(bkey_start_offset(&new->k),
@@ -292,7 +292,7 @@ int bch2_extent_update(struct btree_trans *trans,
* path already traversed at iter->pos because
* bch2_trans_extent_update() will use it to attempt extent merging
*/
- ret = __bch2_btree_iter_traverse(iter);
+ ret = __bch2_btree_iter_traverse(trans, iter);
if (ret)
return ret;
@@ -337,7 +337,7 @@ int bch2_extent_update(struct btree_trans *trans,
if (i_sectors_delta_total)
*i_sectors_delta_total += i_sectors_delta;
- bch2_btree_iter_set_pos(iter, next_pos);
+ bch2_btree_iter_set_pos(trans, iter, next_pos);
return 0;
}
@@ -445,6 +445,11 @@ void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
BUG_ON(c->opts.nochanges);
bkey_for_each_ptr(ptrs, ptr) {
+ /*
+ * XXX: btree writes should be using io_ref[WRITE], but we
+ * aren't retrying failed btree writes yet (due to device
+ * removal/ro):
+ */
struct bch_dev *ca = nocow
? bch2_dev_have_ref(c, ptr->dev)
: bch2_dev_get_ioref(c, ptr->dev, type == BCH_DATA_btree ? READ : WRITE);
@@ -697,12 +702,19 @@ static void bch2_write_endio(struct bio *bio)
bch2_account_io_completion(ca, BCH_MEMBER_ERROR_write,
wbio->submit_time, !bio->bi_status);
- if (bio->bi_status) {
- bch_err_inum_offset_ratelimited(ca,
- op->pos.inode,
- wbio->inode_offset << 9,
- "data write error: %s",
- bch2_blk_status_to_str(bio->bi_status));
+ if (unlikely(bio->bi_status)) {
+ if (ca)
+ bch_err_inum_offset_ratelimited(ca,
+ op->pos.inode,
+ wbio->inode_offset << 9,
+ "data write error: %s",
+ bch2_blk_status_to_str(bio->bi_status));
+ else
+ bch_err_inum_offset_ratelimited(c,
+ op->pos.inode,
+ wbio->inode_offset << 9,
+ "data write error: %s",
+ bch2_blk_status_to_str(bio->bi_status));
set_bit(wbio->dev, op->failed.d);
op->flags |= BCH_WRITE_io_error;
}
@@ -715,7 +727,7 @@ static void bch2_write_endio(struct bio *bio)
}
if (wbio->have_ioref)
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[WRITE]);
if (wbio->bounce)
bch2_bio_free_pages_pool(c, bio);
@@ -1293,7 +1305,7 @@ retry:
if (ret)
break;
- k = bch2_btree_iter_peek_slot(&iter);
+ k = bch2_btree_iter_peek_slot(trans, &iter);
ret = bkey_err(k);
if (ret)
break;
@@ -1377,7 +1389,7 @@ retry:
bch2_keylist_push(&op->insert_keys);
if (op->flags & BCH_WRITE_submitted)
break;
- bch2_btree_iter_advance(&iter);
+ bch2_btree_iter_advance(trans, &iter);
}
out:
bch2_trans_iter_exit(trans, &iter);
@@ -1414,7 +1426,7 @@ err:
return;
err_get_ioref:
darray_for_each(buckets, i)
- percpu_ref_put(&bch2_dev_have_ref(c, i->b.inode)->io_ref);
+ percpu_ref_put(&bch2_dev_have_ref(c, i->b.inode)->io_ref[WRITE]);
/* Fall back to COW path: */
goto out;
diff --git a/fs/bcachefs/journal.c b/fs/bcachefs/journal.c
index 8a36d5536668..d8f74b6d0a75 100644
--- a/fs/bcachefs/journal.c
+++ b/fs/bcachefs/journal.c
@@ -1315,7 +1315,7 @@ int bch2_fs_journal_alloc(struct bch_fs *c)
int ret = bch2_dev_journal_alloc(ca, true);
if (ret) {
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
return ret;
}
}
@@ -1404,6 +1404,14 @@ int bch2_fs_journal_start(struct journal *j, u64 cur_seq)
nr = cur_seq - last_seq;
+ /*
+ * Extra fudge factor, in case we crashed when the journal pin fifo was
+ * nearly or completely full. We'll need to be able to open additional
+ * journal entries (at least a few) in order for journal replay to get
+ * going:
+ */
+ nr += nr / 4;
+
if (nr + 1 > j->pin.size) {
free_fifo(&j->pin);
init_fifo(&j->pin, roundup_pow_of_two(nr + 1), GFP_KERNEL);
@@ -1461,11 +1469,9 @@ int bch2_fs_journal_start(struct journal *j, u64 cur_seq)
j->reservations.idx = journal_cur_seq(j);
c->last_bucket_seq_cleanup = journal_cur_seq(j);
-
- bch2_journal_space_available(j);
spin_unlock(&j->lock);
- return bch2_journal_reclaim_start(j);
+ return 0;
}
/* init/exit: */
diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c
index 2debc213e47c..1b7961f4f609 100644
--- a/fs/bcachefs/journal_io.c
+++ b/fs/bcachefs/journal_io.c
@@ -1218,7 +1218,7 @@ static CLOSURE_CALLBACK(bch2_journal_read_device)
out:
bch_verbose(c, "journal read done on device %s, ret %i", ca->name, ret);
kvfree(buf.data);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
closure_return(cl);
return;
err:
@@ -1253,7 +1253,7 @@ int bch2_journal_read(struct bch_fs *c,
if ((ca->mi.state == BCH_MEMBER_STATE_rw ||
ca->mi.state == BCH_MEMBER_STATE_ro) &&
- percpu_ref_tryget(&ca->io_ref))
+ percpu_ref_tryget(&ca->io_ref[READ]))
closure_call(&ca->journal.read,
bch2_journal_read_device,
system_unbound_wq,
@@ -1768,7 +1768,7 @@ static void journal_write_endio(struct bio *bio)
}
closure_put(&w->io);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[WRITE]);
}
static CLOSURE_CALLBACK(journal_write_submit)
@@ -1843,7 +1843,7 @@ static CLOSURE_CALLBACK(journal_write_preflush)
if (w->separate_flush) {
for_each_rw_member(c, ca) {
- percpu_ref_get(&ca->io_ref);
+ percpu_ref_get(&ca->io_ref[WRITE]);
struct journal_device *ja = &ca->journal;
struct bio *bio = &ja->bio[w->idx]->bio;
diff --git a/fs/bcachefs/migrate.c b/fs/bcachefs/migrate.c
index 57ad662871ba..90dcf80bd64a 100644
--- a/fs/bcachefs/migrate.c
+++ b/fs/bcachefs/migrate.c
@@ -130,7 +130,7 @@ static int bch2_dev_metadata_drop(struct bch_fs *c,
retry:
ret = 0;
while (bch2_trans_begin(trans),
- (b = bch2_btree_iter_peek_node(&iter)) &&
+ (b = bch2_btree_iter_peek_node(trans, &iter)) &&
!(ret = PTR_ERR_OR_ZERO(b))) {
bch2_progress_update_iter(trans, progress, &iter, "dropping metadata");
@@ -154,7 +154,7 @@ retry:
if (ret)
break;
next:
- bch2_btree_iter_next_node(&iter);
+ bch2_btree_iter_next_node(trans, &iter);
}
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c
index 5d41260e10da..fc396b9fa754 100644
--- a/fs/bcachefs/move.c
+++ b/fs/bcachefs/move.c
@@ -545,7 +545,7 @@ static struct bkey_s_c bch2_lookup_indirect_extent_for_move(struct btree_trans *
BTREE_ID_reflink, reflink_pos,
BTREE_ITER_not_extents);
- struct bkey_s_c k = bch2_btree_iter_peek(iter);
+ struct bkey_s_c k = bch2_btree_iter_peek(trans, iter);
if (!k.k || bkey_err(k)) {
bch2_trans_iter_exit(trans, iter);
return k;
@@ -603,7 +603,7 @@ static int bch2_move_data_btree(struct moving_context *ctxt,
bch2_trans_begin(trans);
- k = bch2_btree_iter_peek(&iter);
+ k = bch2_btree_iter_peek(trans, &iter);
if (!k.k)
break;
@@ -681,7 +681,7 @@ next:
if (ctxt->stats)
atomic64_add(k.k->size, &ctxt->stats->sectors_seen);
next_nondata:
- bch2_btree_iter_advance(&iter);
+ bch2_btree_iter_advance(trans, &iter);
}
bch2_trans_iter_exit(trans, &reflink_iter);
@@ -794,7 +794,7 @@ static int __bch2_move_data_phys(struct moving_context *ctxt,
bch2_trans_begin(trans);
- k = bch2_btree_iter_peek(&bp_iter);
+ k = bch2_btree_iter_peek(trans, &bp_iter);
ret = bkey_err(k);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
continue;
@@ -876,7 +876,7 @@ static int __bch2_move_data_phys(struct moving_context *ctxt,
if (ctxt->stats)
atomic64_add(sectors, &ctxt->stats->sectors_seen);
next:
- bch2_btree_iter_advance(&bp_iter);
+ bch2_btree_iter_advance(trans, &bp_iter);
}
err:
bch2_trans_iter_exit(trans, &bp_iter);
@@ -991,7 +991,7 @@ static int bch2_move_btree(struct bch_fs *c,
retry:
ret = 0;
while (bch2_trans_begin(trans),
- (b = bch2_btree_iter_peek_node(&iter)) &&
+ (b = bch2_btree_iter_peek_node(trans, &iter)) &&
!(ret = PTR_ERR_OR_ZERO(b))) {
if (kthread && kthread_should_stop())
break;
@@ -1011,7 +1011,7 @@ retry:
if (ret)
break;
next:
- bch2_btree_iter_next_node(&iter);
+ bch2_btree_iter_next_node(trans, &iter);
}
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c
index 5126c870ce5b..159410c50861 100644
--- a/fs/bcachefs/movinggc.c
+++ b/fs/bcachefs/movinggc.c
@@ -280,7 +280,11 @@ unsigned long bch2_copygc_wait_amount(struct bch_fs *c)
s64 wait = S64_MAX, fragmented_allowed, fragmented;
for_each_rw_member(c, ca) {
- struct bch_dev_usage usage = bch2_dev_usage_read(ca);
+ struct bch_dev_usage_full usage_full = bch2_dev_usage_full_read(ca);
+ struct bch_dev_usage usage;
+
+ for (unsigned i = 0; i < BCH_DATA_NR; i++)
+ usage.buckets[i] = usage_full.d[i].buckets;
fragmented_allowed = ((__dev_buckets_available(ca, usage, BCH_WATERMARK_stripe) *
ca->mi.bucket_size) >> 1);
@@ -288,7 +292,7 @@ unsigned long bch2_copygc_wait_amount(struct bch_fs *c)
for (unsigned i = 0; i < BCH_DATA_NR; i++)
if (data_type_movable(i))
- fragmented += usage.d[i].fragmented;
+ fragmented += usage_full.d[i].fragmented;
wait = min(wait, max(0LL, fragmented_allowed - fragmented));
}
diff --git a/fs/bcachefs/namei.c b/fs/bcachefs/namei.c
index ee7251709fb9..0d65ea96f7a2 100644
--- a/fs/bcachefs/namei.c
+++ b/fs/bcachefs/namei.c
@@ -28,8 +28,8 @@ int bch2_create_trans(struct btree_trans *trans,
unsigned flags)
{
struct bch_fs *c = trans->c;
- struct btree_iter dir_iter = { NULL };
- struct btree_iter inode_iter = { NULL };
+ struct btree_iter dir_iter = {};
+ struct btree_iter inode_iter = {};
subvol_inum new_inum = dir;
u64 now = bch2_current_time(c);
u64 cpu = raw_smp_processor_id();
@@ -127,8 +127,8 @@ int bch2_create_trans(struct btree_trans *trans,
if (ret)
goto err;
- bch2_btree_iter_set_snapshot(&dir_iter, dir_snapshot);
- ret = bch2_btree_iter_traverse(&dir_iter);
+ bch2_btree_iter_set_snapshot(trans, &dir_iter, dir_snapshot);
+ ret = bch2_btree_iter_traverse(trans, &dir_iter);
if (ret)
goto err;
}
@@ -177,9 +177,9 @@ int bch2_create_trans(struct btree_trans *trans,
new_inode->bi_depth = dir_u->bi_depth + 1;
inode_iter.flags &= ~BTREE_ITER_all_snapshots;
- bch2_btree_iter_set_snapshot(&inode_iter, snapshot);
+ bch2_btree_iter_set_snapshot(trans, &inode_iter, snapshot);
- ret = bch2_btree_iter_traverse(&inode_iter) ?:
+ ret = bch2_btree_iter_traverse(trans, &inode_iter) ?:
bch2_inode_write(trans, &inode_iter, new_inode);
err:
bch2_trans_iter_exit(trans, &inode_iter);
@@ -193,8 +193,8 @@ int bch2_link_trans(struct btree_trans *trans,
const struct qstr *name)
{
struct bch_fs *c = trans->c;
- struct btree_iter dir_iter = { NULL };
- struct btree_iter inode_iter = { NULL };
+ struct btree_iter dir_iter = {};
+ struct btree_iter inode_iter = {};
struct bch_hash_info dir_hash;
u64 now = bch2_current_time(c);
u64 dir_offset = 0;
@@ -253,9 +253,9 @@ int bch2_unlink_trans(struct btree_trans *trans,
bool deleting_subvol)
{
struct bch_fs *c = trans->c;
- struct btree_iter dir_iter = { NULL };
- struct btree_iter dirent_iter = { NULL };
- struct btree_iter inode_iter = { NULL };
+ struct btree_iter dir_iter = {};
+ struct btree_iter dirent_iter = {};
+ struct btree_iter inode_iter = {};
struct bch_hash_info dir_hash;
subvol_inum inum;
u64 now = bch2_current_time(c);
@@ -301,7 +301,7 @@ int bch2_unlink_trans(struct btree_trans *trans,
if (ret)
goto err;
- k = bch2_btree_iter_peek_slot(&dirent_iter);
+ k = bch2_btree_iter_peek_slot(trans, &dirent_iter);
ret = bkey_err(k);
if (ret)
goto err;
@@ -310,8 +310,8 @@ int bch2_unlink_trans(struct btree_trans *trans,
* If we're deleting a subvolume, we need to really delete the
* dirent, not just emit a whiteout in the current snapshot:
*/
- bch2_btree_iter_set_snapshot(&dirent_iter, k.k->p.snapshot);
- ret = bch2_btree_iter_traverse(&dirent_iter);
+ bch2_btree_iter_set_snapshot(trans, &dirent_iter, k.k->p.snapshot);
+ ret = bch2_btree_iter_traverse(trans, &dirent_iter);
if (ret)
goto err;
} else {
@@ -390,10 +390,10 @@ int bch2_rename_trans(struct btree_trans *trans,
enum bch_rename_mode mode)
{
struct bch_fs *c = trans->c;
- struct btree_iter src_dir_iter = { NULL };
- struct btree_iter dst_dir_iter = { NULL };
- struct btree_iter src_inode_iter = { NULL };
- struct btree_iter dst_inode_iter = { NULL };
+ struct btree_iter src_dir_iter = {};
+ struct btree_iter dst_dir_iter = {};
+ struct btree_iter src_inode_iter = {};
+ struct btree_iter dst_inode_iter = {};
struct bch_hash_info src_hash, dst_hash;
subvol_inum src_inum, dst_inum;
u64 src_offset, dst_offset;
@@ -666,7 +666,7 @@ static int bch2_check_dirent_inode_dirent(struct btree_trans *trans,
{
struct bch_fs *c = trans->c;
struct printbuf buf = PRINTBUF;
- struct btree_iter bp_iter = { NULL };
+ struct btree_iter bp_iter = {};
int ret = 0;
if (inode_points_to_dirent(target, d))
diff --git a/fs/bcachefs/quota.c b/fs/bcachefs/quota.c
index 8b857fc33244..3d4755d73af7 100644
--- a/fs/bcachefs/quota.c
+++ b/fs/bcachefs/quota.c
@@ -516,7 +516,7 @@ static int bch2_fs_quota_read_inode(struct btree_trans *trans,
bch2_quota_acct(c, bch_qid(&u), Q_INO, 1,
KEY_TYPE_QUOTA_NOCHECK);
advance:
- bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
+ bch2_btree_iter_set_pos(trans, iter, bpos_nosnap_successor(iter->pos));
return 0;
}
diff --git a/fs/bcachefs/rebalance.c b/fs/bcachefs/rebalance.c
index b9bde04b66c0..c63fa53f30d2 100644
--- a/fs/bcachefs/rebalance.c
+++ b/fs/bcachefs/rebalance.c
@@ -233,7 +233,7 @@ int bch2_set_rebalance_needs_scan_trans(struct btree_trans *trans, u64 inum)
bch2_trans_iter_init(trans, &iter, BTREE_ID_rebalance_work,
SPOS(inum, REBALANCE_WORK_SCAN_OFFSET, U32_MAX),
BTREE_ITER_intent);
- k = bch2_btree_iter_peek_slot(&iter);
+ k = bch2_btree_iter_peek_slot(trans, &iter);
ret = bkey_err(k);
if (ret)
goto err;
@@ -281,7 +281,7 @@ static int bch2_clear_rebalance_needs_scan(struct btree_trans *trans, u64 inum,
bch2_trans_iter_init(trans, &iter, BTREE_ID_rebalance_work,
SPOS(inum, REBALANCE_WORK_SCAN_OFFSET, U32_MAX),
BTREE_ITER_intent);
- k = bch2_btree_iter_peek_slot(&iter);
+ k = bch2_btree_iter_peek_slot(trans, &iter);
ret = bkey_err(k);
if (ret)
goto err;
@@ -301,7 +301,7 @@ static struct bkey_s_c next_rebalance_entry(struct btree_trans *trans,
struct btree_iter *work_iter)
{
return !kthread_should_stop()
- ? bch2_btree_iter_peek(work_iter)
+ ? bch2_btree_iter_peek(trans, work_iter)
: bkey_s_c_null;
}
@@ -335,7 +335,7 @@ static struct bkey_s_c next_rebalance_extent(struct btree_trans *trans,
work_pos.inode ? BTREE_ID_extents : BTREE_ID_reflink,
work_pos,
BTREE_ITER_all_snapshots);
- struct bkey_s_c k = bch2_btree_iter_peek_slot(extent_iter);
+ struct bkey_s_c k = bch2_btree_iter_peek_slot(trans, extent_iter);
if (bkey_err(k))
return k;
@@ -511,7 +511,7 @@ static int do_rebalance(struct moving_context *ctxt)
struct btree_trans *trans = ctxt->trans;
struct bch_fs *c = trans->c;
struct bch_fs_rebalance *r = &c->rebalance;
- struct btree_iter rebalance_work_iter, extent_iter = { NULL };
+ struct btree_iter rebalance_work_iter, extent_iter = {};
struct bkey_s_c k;
int ret = 0;
@@ -552,7 +552,7 @@ static int do_rebalance(struct moving_context *ctxt)
if (ret)
break;
- bch2_btree_iter_advance(&rebalance_work_iter);
+ bch2_btree_iter_advance(trans, &rebalance_work_iter);
}
bch2_trans_iter_exit(trans, &extent_iter);
diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c
index 266c5770c824..79fd18a5a07c 100644
--- a/fs/bcachefs/recovery.c
+++ b/fs/bcachefs/recovery.c
@@ -198,7 +198,7 @@ static int bch2_journal_replay_accounting_key(struct btree_trans *trans,
bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p,
BTREE_MAX_DEPTH, k->level,
BTREE_ITER_intent);
- int ret = bch2_btree_iter_traverse(&iter);
+ int ret = bch2_btree_iter_traverse(trans, &iter);
if (ret)
goto out;
@@ -261,7 +261,7 @@ static int bch2_journal_replay_key(struct btree_trans *trans,
bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p,
BTREE_MAX_DEPTH, k->level,
iter_flags);
- ret = bch2_btree_iter_traverse(&iter);
+ ret = bch2_btree_iter_traverse(trans, &iter);
if (ret)
goto out;
@@ -270,7 +270,7 @@ static int bch2_journal_replay_key(struct btree_trans *trans,
bch2_trans_iter_exit(trans, &iter);
bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p,
BTREE_MAX_DEPTH, 0, iter_flags);
- ret = bch2_btree_iter_traverse(&iter) ?:
+ ret = bch2_btree_iter_traverse(trans, &iter) ?:
bch2_btree_increase_depth(trans, iter.path, 0) ?:
-BCH_ERR_transaction_restart_nested;
goto out;
diff --git a/fs/bcachefs/reflink.c b/fs/bcachefs/reflink.c
index ee23f1f93acc..710178e3da4c 100644
--- a/fs/bcachefs/reflink.c
+++ b/fs/bcachefs/reflink.c
@@ -495,7 +495,7 @@ static int bch2_make_extent_indirect(struct btree_trans *trans,
bool reflink_p_may_update_opts_field)
{
struct bch_fs *c = trans->c;
- struct btree_iter reflink_iter = { NULL };
+ struct btree_iter reflink_iter = {};
struct bkey_s_c k;
struct bkey_i *r_v;
struct bkey_i_reflink_p *r_p;
@@ -507,7 +507,7 @@ static int bch2_make_extent_indirect(struct btree_trans *trans,
bch2_trans_iter_init(trans, &reflink_iter, BTREE_ID_reflink, POS_MAX,
BTREE_ITER_intent);
- k = bch2_btree_iter_peek_prev(&reflink_iter);
+ k = bch2_btree_iter_peek_prev(trans, &reflink_iter);
ret = bkey_err(k);
if (ret)
goto err;
@@ -569,12 +569,13 @@ err:
return ret;
}
-static struct bkey_s_c get_next_src(struct btree_iter *iter, struct bpos end)
+static struct bkey_s_c get_next_src(struct btree_trans *trans,
+ struct btree_iter *iter, struct bpos end)
{
struct bkey_s_c k;
int ret;
- for_each_btree_key_max_continue_norestart(*iter, end, 0, k, ret) {
+ for_each_btree_key_max_continue_norestart(trans, *iter, end, 0, k, ret) {
if (bkey_extent_is_unwritten(k))
continue;
@@ -583,7 +584,7 @@ static struct bkey_s_c get_next_src(struct btree_iter *iter, struct bpos end)
}
if (bkey_ge(iter->pos, end))
- bch2_btree_iter_set_pos(iter, end);
+ bch2_btree_iter_set_pos(trans, iter, end);
return ret ? bkey_s_c_err(ret) : bkey_s_c_null;
}
@@ -647,27 +648,27 @@ s64 bch2_remap_range(struct bch_fs *c,
if (ret)
continue;
- bch2_btree_iter_set_snapshot(&src_iter, src_snapshot);
+ bch2_btree_iter_set_snapshot(trans, &src_iter, src_snapshot);
ret = bch2_subvolume_get_snapshot(trans, dst_inum.subvol,
&dst_snapshot);
if (ret)
continue;
- bch2_btree_iter_set_snapshot(&dst_iter, dst_snapshot);
+ bch2_btree_iter_set_snapshot(trans, &dst_iter, dst_snapshot);
if (dst_inum.inum < src_inum.inum) {
/* Avoid some lock cycle transaction restarts */
- ret = bch2_btree_iter_traverse(&dst_iter);
+ ret = bch2_btree_iter_traverse(trans, &dst_iter);
if (ret)
continue;
}
dst_done = dst_iter.pos.offset - dst_start.offset;
src_want = POS(src_start.inode, src_start.offset + dst_done);
- bch2_btree_iter_set_pos(&src_iter, src_want);
+ bch2_btree_iter_set_pos(trans, &src_iter, src_want);
- src_k = get_next_src(&src_iter, src_end);
+ src_k = get_next_src(trans, &src_iter, src_end);
ret = bkey_err(src_k);
if (ret)
continue;
@@ -738,7 +739,7 @@ s64 bch2_remap_range(struct bch_fs *c,
do {
struct bch_inode_unpacked inode_u;
- struct btree_iter inode_iter = { NULL };
+ struct btree_iter inode_iter = {};
bch2_trans_begin(trans);
diff --git a/fs/bcachefs/sb-members.h b/fs/bcachefs/sb-members.h
index 38261638a611..06bb41a3f360 100644
--- a/fs/bcachefs/sb-members.h
+++ b/fs/bcachefs/sb-members.h
@@ -20,7 +20,7 @@ struct bch_member bch2_sb_member_get(struct bch_sb *sb, int i);
static inline bool bch2_dev_is_online(struct bch_dev *ca)
{
- return !percpu_ref_is_zero(&ca->io_ref);
+ return !percpu_ref_is_zero(&ca->io_ref[READ]);
}
static inline struct bch_dev *bch2_dev_rcu(struct bch_fs *, unsigned);
@@ -156,33 +156,34 @@ static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, struct bch_dev
static inline struct bch_dev *bch2_get_next_online_dev(struct bch_fs *c,
struct bch_dev *ca,
- unsigned state_mask)
+ unsigned state_mask,
+ int rw)
{
rcu_read_lock();
if (ca)
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[rw]);
while ((ca = __bch2_next_dev(c, ca, NULL)) &&
(!((1 << ca->mi.state) & state_mask) ||
- !percpu_ref_tryget(&ca->io_ref)))
+ !percpu_ref_tryget(&ca->io_ref[rw])))
;
rcu_read_unlock();
return ca;
}
-#define __for_each_online_member(_c, _ca, state_mask) \
+#define __for_each_online_member(_c, _ca, state_mask, rw) \
for (struct bch_dev *_ca = NULL; \
- (_ca = bch2_get_next_online_dev(_c, _ca, state_mask));)
+ (_ca = bch2_get_next_online_dev(_c, _ca, state_mask, rw));)
#define for_each_online_member(c, ca) \
- __for_each_online_member(c, ca, ~0)
+ __for_each_online_member(c, ca, ~0, READ)
#define for_each_rw_member(c, ca) \
- __for_each_online_member(c, ca, BIT(BCH_MEMBER_STATE_rw))
+ __for_each_online_member(c, ca, BIT(BCH_MEMBER_STATE_rw), WRITE)
#define for_each_readable_member(c, ca) \
- __for_each_online_member(c, ca, BIT( BCH_MEMBER_STATE_rw)|BIT(BCH_MEMBER_STATE_ro))
+ __for_each_online_member(c, ca, BIT( BCH_MEMBER_STATE_rw)|BIT(BCH_MEMBER_STATE_ro), READ)
static inline bool bch2_dev_exists(const struct bch_fs *c, unsigned dev)
{
@@ -287,7 +288,7 @@ static inline struct bch_dev *bch2_dev_get_ioref(struct bch_fs *c, unsigned dev,
rcu_read_lock();
struct bch_dev *ca = bch2_dev_rcu(c, dev);
- if (ca && !percpu_ref_tryget(&ca->io_ref))
+ if (ca && !percpu_ref_tryget(&ca->io_ref[rw]))
ca = NULL;
rcu_read_unlock();
@@ -297,7 +298,7 @@ static inline struct bch_dev *bch2_dev_get_ioref(struct bch_fs *c, unsigned dev,
return ca;
if (ca)
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[rw]);
return NULL;
}
diff --git a/fs/bcachefs/snapshot.c b/fs/bcachefs/snapshot.c
index 0c65065b08ec..b7de29aed839 100644
--- a/fs/bcachefs/snapshot.c
+++ b/fs/bcachefs/snapshot.c
@@ -843,9 +843,6 @@ static int check_snapshot_exists(struct btree_trans *trans, u32 id)
{
struct bch_fs *c = trans->c;
- if (bch2_snapshot_exists(c, id))
- return 0;
-
/* Do we need to reconstruct the snapshot_tree entry as well? */
struct btree_iter iter;
struct bkey_s_c k;
@@ -1074,9 +1071,9 @@ static inline void normalize_snapshot_child_pointers(struct bch_snapshot *s)
static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id)
{
struct bch_fs *c = trans->c;
- struct btree_iter iter, p_iter = (struct btree_iter) { NULL };
- struct btree_iter c_iter = (struct btree_iter) { NULL };
- struct btree_iter tree_iter = (struct btree_iter) { NULL };
+ struct btree_iter iter, p_iter = {};
+ struct btree_iter c_iter = {};
+ struct btree_iter tree_iter = {};
struct bkey_s_c_snapshot s;
u32 parent_id, child_id;
unsigned i;
@@ -1193,13 +1190,13 @@ static int create_snapids(struct btree_trans *trans, u32 parent, u32 tree,
bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots,
POS_MIN, BTREE_ITER_intent);
- k = bch2_btree_iter_peek(&iter);
+ k = bch2_btree_iter_peek(trans, &iter);
ret = bkey_err(k);
if (ret)
goto err;
for (i = 0; i < nr_snapids; i++) {
- k = bch2_btree_iter_prev_slot(&iter);
+ k = bch2_btree_iter_prev_slot(trans, &iter);
ret = bkey_err(k);
if (ret)
goto err;
diff --git a/fs/bcachefs/str_hash.c b/fs/bcachefs/str_hash.c
index 602afca2f5ef..a90bf7b8a2b4 100644
--- a/fs/bcachefs/str_hash.c
+++ b/fs/bcachefs/str_hash.c
@@ -195,7 +195,7 @@ int __bch2_str_hash_check_key(struct btree_trans *trans,
struct btree_iter *k_iter, struct bkey_s_c hash_k)
{
struct bch_fs *c = trans->c;
- struct btree_iter iter = { NULL };
+ struct btree_iter iter = {};
struct printbuf buf = PRINTBUF;
struct bkey_s_c k;
int ret = 0;
diff --git a/fs/bcachefs/str_hash.h b/fs/bcachefs/str_hash.h
index 575ad1e03904..09a354a26c3b 100644
--- a/fs/bcachefs/str_hash.h
+++ b/fs/bcachefs/str_hash.h
@@ -231,11 +231,11 @@ int bch2_hash_needs_whiteout(struct btree_trans *trans,
struct bkey_s_c k;
int ret;
- bch2_trans_copy_iter(&iter, start);
+ bch2_trans_copy_iter(trans, &iter, start);
- bch2_btree_iter_advance(&iter);
+ bch2_btree_iter_advance(trans, &iter);
- for_each_btree_key_continue_norestart(iter, BTREE_ITER_slots, k, ret) {
+ for_each_btree_key_continue_norestart(trans, iter, BTREE_ITER_slots, k, ret) {
if (k.k->type != desc.key_type &&
k.k->type != KEY_TYPE_hash_whiteout)
break;
@@ -280,7 +280,7 @@ struct bkey_s_c bch2_hash_set_or_get_in_snapshot(struct btree_trans *trans,
}
if (!slot.path && !(flags & STR_HASH_must_replace))
- bch2_trans_copy_iter(&slot, iter);
+ bch2_trans_copy_iter(trans, &slot, iter);
if (k.k->type != KEY_TYPE_hash_whiteout)
goto not_found;
diff --git a/fs/bcachefs/subvolume.c b/fs/bcachefs/subvolume.c
index cd0d8e5e44e7..5537283d0bea 100644
--- a/fs/bcachefs/subvolume.c
+++ b/fs/bcachefs/subvolume.c
@@ -275,7 +275,7 @@ int bch2_subvol_has_children(struct btree_trans *trans, u32 subvol)
struct btree_iter iter;
bch2_trans_iter_init(trans, &iter, BTREE_ID_subvolume_children, POS(subvol, 0), 0);
- struct bkey_s_c k = bch2_btree_iter_peek(&iter);
+ struct bkey_s_c k = bch2_btree_iter_peek(trans, &iter);
bch2_trans_iter_exit(trans, &iter);
return bkey_err(k) ?: k.k && k.k->p.inode == subvol
@@ -574,7 +574,7 @@ int bch2_subvolume_create(struct btree_trans *trans, u64 inode,
bool ro)
{
struct bch_fs *c = trans->c;
- struct btree_iter dst_iter, src_iter = (struct btree_iter) { NULL };
+ struct btree_iter dst_iter, src_iter = {};
struct bkey_i_subvolume *new_subvol = NULL;
struct bkey_i_subvolume *src_subvol = NULL;
u32 parent = 0, new_nodes[2], snapshot_subvols[2];
diff --git a/fs/bcachefs/subvolume.h b/fs/bcachefs/subvolume.h
index 910f6196700e..f640c1e3d639 100644
--- a/fs/bcachefs/subvolume.h
+++ b/fs/bcachefs/subvolume.h
@@ -33,16 +33,16 @@ int bch2_subvol_is_ro_trans(struct btree_trans *, u32);
int bch2_subvol_is_ro(struct bch_fs *, u32);
static inline struct bkey_s_c
-bch2_btree_iter_peek_in_subvolume_max_type(struct btree_iter *iter, struct bpos end,
- u32 subvolid, unsigned flags)
+bch2_btree_iter_peek_in_subvolume_max_type(struct btree_trans *trans, struct btree_iter *iter,
+ struct bpos end, u32 subvolid, unsigned flags)
{
u32 snapshot;
- int ret = bch2_subvolume_get_snapshot(iter->trans, subvolid, &snapshot);
+ int ret = bch2_subvolume_get_snapshot(trans, subvolid, &snapshot);
if (ret)
return bkey_s_c_err(ret);
- bch2_btree_iter_set_snapshot(iter, snapshot);
- return bch2_btree_iter_peek_max_type(iter, end, flags);
+ bch2_btree_iter_set_snapshot(trans, iter, snapshot);
+ return bch2_btree_iter_peek_max_type(trans, iter, end, flags);
}
#define for_each_btree_key_in_subvolume_max_continue(_trans, _iter, \
@@ -53,14 +53,14 @@ bch2_btree_iter_peek_in_subvolume_max_type(struct btree_iter *iter, struct bpos
\
do { \
_ret3 = lockrestart_do(_trans, ({ \
- (_k) = bch2_btree_iter_peek_in_subvolume_max_type(&(_iter), \
+ (_k) = bch2_btree_iter_peek_in_subvolume_max_type(trans, &(_iter),\
_end, _subvolid, (_flags)); \
if (!(_k).k) \
break; \
\
bkey_err(_k) ?: (_do); \
})); \
- } while (!_ret3 && bch2_btree_iter_advance(&(_iter))); \
+ } while (!_ret3 && bch2_btree_iter_advance(_trans, &(_iter))); \
\
bch2_trans_iter_exit((_trans), &(_iter)); \
_ret3; \
diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c
index 572b06bfa0b8..e27422b6d9c6 100644
--- a/fs/bcachefs/super-io.c
+++ b/fs/bcachefs/super-io.c
@@ -248,7 +248,7 @@ struct bch_sb_field *bch2_sb_field_resize_id(struct bch_sb_handle *sb,
struct bch_sb_handle *dev_sb = &ca->disk_sb;
if (bch2_sb_realloc(dev_sb, le32_to_cpu(dev_sb->sb->u64s) + d)) {
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
return NULL;
}
}
@@ -945,7 +945,7 @@ static void write_super_endio(struct bio *bio)
}
closure_put(&ca->fs->sb_write);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
}
static void read_back_super(struct bch_fs *c, struct bch_dev *ca)
@@ -963,7 +963,7 @@ static void read_back_super(struct bch_fs *c, struct bch_dev *ca)
this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_sb], bio_sectors(bio));
- percpu_ref_get(&ca->io_ref);
+ percpu_ref_get(&ca->io_ref[READ]);
closure_bio_submit(bio, &c->sb_write);
}
@@ -989,7 +989,7 @@ static void write_one_super(struct bch_fs *c, struct bch_dev *ca, unsigned idx)
this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_sb],
bio_sectors(bio));
- percpu_ref_get(&ca->io_ref);
+ percpu_ref_get(&ca->io_ref[READ]);
closure_bio_submit(bio, &c->sb_write);
}
@@ -1014,13 +1014,20 @@ int bch2_write_super(struct bch_fs *c)
closure_init_stack(cl);
memset(&sb_written, 0, sizeof(sb_written));
+ /*
+ * Note: we do writes to RO devices here, and we might want to change
+ * that in the future.
+ *
+ * For now, we expect to be able to call write_super() when we're not
+ * yet RW:
+ */
for_each_online_member(c, ca) {
ret = darray_push(&online_devices, ca);
if (bch2_fs_fatal_err_on(ret, c, "%s: error allocating online devices", __func__)) {
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
goto out;
}
- percpu_ref_get(&ca->io_ref);
+ percpu_ref_get(&ca->io_ref[READ]);
}
/* Make sure we're using the new magic numbers: */
@@ -1186,7 +1193,7 @@ out:
/* Make new options visible after they're persistent: */
bch2_sb_update(c);
darray_for_each(online_devices, ca)
- percpu_ref_put(&(*ca)->io_ref);
+ percpu_ref_put(&(*ca)->io_ref[READ]);
darray_exit(&online_devices);
printbuf_exit(&err);
return ret;
diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c
index 20208f3c5d8b..a58edde43bee 100644
--- a/fs/bcachefs/super.c
+++ b/fs/bcachefs/super.c
@@ -185,6 +185,7 @@ static void bch2_dev_unlink(struct bch_dev *);
static void bch2_dev_free(struct bch_dev *);
static int bch2_dev_alloc(struct bch_fs *, unsigned);
static int bch2_dev_sysfs_online(struct bch_fs *, struct bch_dev *);
+static void bch2_dev_io_ref_stop(struct bch_dev *, int);
static void __bch2_dev_read_only(struct bch_fs *, struct bch_dev *);
struct bch_fs *bch2_dev_to_fs(dev_t dev)
@@ -294,8 +295,10 @@ static void __bch2_fs_read_only(struct bch_fs *c)
/*
* After stopping journal:
*/
- for_each_member_device(c, ca)
+ for_each_member_device(c, ca) {
+ bch2_dev_io_ref_stop(ca, WRITE);
bch2_dev_allocator_remove(c, ca);
+ }
}
#ifndef BCH_WRITE_REF_DEBUG
@@ -465,10 +468,6 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
if (ret)
goto err;
- ret = bch2_fs_mark_dirty(c);
- if (ret)
- goto err;
-
clear_bit(BCH_FS_clean_shutdown, &c->flags);
/*
@@ -480,10 +479,24 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
set_bit(JOURNAL_need_flush_write, &c->journal.flags);
set_bit(JOURNAL_running, &c->journal.flags);
- for_each_rw_member(c, ca)
+ __for_each_online_member(c, ca, BIT(BCH_MEMBER_STATE_rw), READ) {
bch2_dev_allocator_add(c, ca);
+ percpu_ref_reinit(&ca->io_ref[WRITE]);
+ }
bch2_recalc_capacity(c);
+ ret = bch2_fs_mark_dirty(c);
+ if (ret)
+ goto err;
+
+ spin_lock(&c->journal.lock);
+ bch2_journal_space_available(&c->journal);
+ spin_unlock(&c->journal.lock);
+
+ ret = bch2_journal_reclaim_start(&c->journal);
+ if (ret)
+ goto err;
+
set_bit(BCH_FS_rw, &c->flags);
set_bit(BCH_FS_was_rw, &c->flags);
@@ -495,11 +508,6 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
atomic_long_inc(&c->writes[i]);
}
#endif
-
- ret = bch2_journal_reclaim_start(&c->journal);
- if (ret)
- goto err;
-
if (!early) {
ret = bch2_fs_read_write_late(c);
if (ret)
@@ -675,6 +683,7 @@ void bch2_fs_free(struct bch_fs *c)
if (ca) {
EBUG_ON(atomic_long_read(&ca->ref) != 1);
+ bch2_dev_io_ref_stop(ca, READ);
bch2_free_super(&ca->disk_sb);
bch2_dev_free(ca);
}
@@ -1199,6 +1208,15 @@ static int bch2_dev_in_fs(struct bch_sb_handle *fs,
/* Device startup/shutdown: */
+static void bch2_dev_io_ref_stop(struct bch_dev *ca, int rw)
+{
+ if (!percpu_ref_is_zero(&ca->io_ref[rw])) {
+ reinit_completion(&ca->io_ref_completion[rw]);
+ percpu_ref_kill(&ca->io_ref[rw]);
+ wait_for_completion(&ca->io_ref_completion[rw]);
+ }
+}
+
static void bch2_dev_release(struct kobject *kobj)
{
struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
@@ -1208,6 +1226,9 @@ static void bch2_dev_release(struct kobject *kobj)
static void bch2_dev_free(struct bch_dev *ca)
{
+ WARN_ON(!percpu_ref_is_zero(&ca->io_ref[WRITE]));
+ WARN_ON(!percpu_ref_is_zero(&ca->io_ref[READ]));
+
cancel_work_sync(&ca->io_error_work);
bch2_dev_unlink(ca);
@@ -1226,7 +1247,8 @@ static void bch2_dev_free(struct bch_dev *ca)
bch2_time_stats_quantiles_exit(&ca->io_latency[WRITE]);
bch2_time_stats_quantiles_exit(&ca->io_latency[READ]);
- percpu_ref_exit(&ca->io_ref);
+ percpu_ref_exit(&ca->io_ref[WRITE]);
+ percpu_ref_exit(&ca->io_ref[READ]);
#ifndef CONFIG_BCACHEFS_DEBUG
percpu_ref_exit(&ca->ref);
#endif
@@ -1238,14 +1260,12 @@ static void __bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca)
lockdep_assert_held(&c->state_lock);
- if (percpu_ref_is_zero(&ca->io_ref))
+ if (percpu_ref_is_zero(&ca->io_ref[READ]))
return;
__bch2_dev_read_only(c, ca);
- reinit_completion(&ca->io_ref_completion);
- percpu_ref_kill(&ca->io_ref);
- wait_for_completion(&ca->io_ref_completion);
+ bch2_dev_io_ref_stop(ca, READ);
bch2_dev_unlink(ca);
@@ -1262,11 +1282,18 @@ static void bch2_dev_ref_complete(struct percpu_ref *ref)
}
#endif
-static void bch2_dev_io_ref_complete(struct percpu_ref *ref)
+static void bch2_dev_io_ref_read_complete(struct percpu_ref *ref)
+{
+ struct bch_dev *ca = container_of(ref, struct bch_dev, io_ref[READ]);
+
+ complete(&ca->io_ref_completion[READ]);
+}
+
+static void bch2_dev_io_ref_write_complete(struct percpu_ref *ref)
{
- struct bch_dev *ca = container_of(ref, struct bch_dev, io_ref);
+ struct bch_dev *ca = container_of(ref, struct bch_dev, io_ref[WRITE]);
- complete(&ca->io_ref_completion);
+ complete(&ca->io_ref_completion[WRITE]);
}
static void bch2_dev_unlink(struct bch_dev *ca)
@@ -1330,7 +1357,8 @@ static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c,
kobject_init(&ca->kobj, &bch2_dev_ktype);
init_completion(&ca->ref_completion);
- init_completion(&ca->io_ref_completion);
+ init_completion(&ca->io_ref_completion[READ]);
+ init_completion(&ca->io_ref_completion[WRITE]);
INIT_WORK(&ca->io_error_work, bch2_io_error_work);
@@ -1356,7 +1384,9 @@ static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c,
bch2_dev_allocator_background_init(ca);
- if (percpu_ref_init(&ca->io_ref, bch2_dev_io_ref_complete,
+ if (percpu_ref_init(&ca->io_ref[READ], bch2_dev_io_ref_read_complete,
+ PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
+ percpu_ref_init(&ca->io_ref[WRITE], bch2_dev_io_ref_write_complete,
PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
!(ca->sb_read_scratch = kmalloc(BCH_SB_READ_SCRATCH_BUF_SIZE, GFP_KERNEL)) ||
bch2_dev_buckets_alloc(c, ca) ||
@@ -1419,7 +1449,8 @@ static int __bch2_dev_attach_bdev(struct bch_dev *ca, struct bch_sb_handle *sb)
return -BCH_ERR_device_size_too_small;
}
- BUG_ON(!percpu_ref_is_zero(&ca->io_ref));
+ BUG_ON(!percpu_ref_is_zero(&ca->io_ref[READ]));
+ BUG_ON(!percpu_ref_is_zero(&ca->io_ref[WRITE]));
ret = bch2_dev_journal_init(ca, sb->sb);
if (ret)
@@ -1438,7 +1469,7 @@ static int __bch2_dev_attach_bdev(struct bch_dev *ca, struct bch_sb_handle *sb)
ca->dev = ca->disk_sb.bdev->bd_dev;
- percpu_ref_reinit(&ca->io_ref);
+ percpu_ref_reinit(&ca->io_ref[READ]);
return 0;
}
@@ -1568,6 +1599,8 @@ static bool bch2_fs_may_start(struct bch_fs *c)
static void __bch2_dev_read_only(struct bch_fs *c, struct bch_dev *ca)
{
+ bch2_dev_io_ref_stop(ca, WRITE);
+
/*
* The allocator thread itself allocates btree nodes, so stop it first:
*/
@@ -1584,6 +1617,10 @@ static void __bch2_dev_read_write(struct bch_fs *c, struct bch_dev *ca)
bch2_dev_allocator_add(c, ca);
bch2_recalc_capacity(c);
+
+ if (percpu_ref_is_zero(&ca->io_ref[WRITE]))
+ percpu_ref_reinit(&ca->io_ref[WRITE]);
+
bch2_dev_do_discards(ca);
}
@@ -1731,7 +1768,7 @@ int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
return 0;
err:
if (ca->mi.state == BCH_MEMBER_STATE_rw &&
- !percpu_ref_is_zero(&ca->io_ref))
+ !percpu_ref_is_zero(&ca->io_ref[READ]))
__bch2_dev_read_write(c, ca);
up_write(&c->state_lock);
return ret;
diff --git a/fs/bcachefs/tests.c b/fs/bcachefs/tests.c
index 6c6469814637..c265b102267a 100644
--- a/fs/bcachefs/tests.c
+++ b/fs/bcachefs/tests.c
@@ -43,7 +43,7 @@ static int test_delete(struct bch_fs *c, u64 nr)
BTREE_ITER_intent);
ret = commit_do(trans, NULL, NULL, 0,
- bch2_btree_iter_traverse(&iter) ?:
+ bch2_btree_iter_traverse(trans, &iter) ?:
bch2_trans_update(trans, &iter, &k.k_i, 0));
bch_err_msg(c, ret, "update error");
if (ret)
@@ -51,7 +51,7 @@ static int test_delete(struct bch_fs *c, u64 nr)
pr_info("deleting once");
ret = commit_do(trans, NULL, NULL, 0,
- bch2_btree_iter_traverse(&iter) ?:
+ bch2_btree_iter_traverse(trans, &iter) ?:
bch2_btree_delete_at(trans, &iter, 0));
bch_err_msg(c, ret, "delete error (first)");
if (ret)
@@ -59,7 +59,7 @@ static int test_delete(struct bch_fs *c, u64 nr)
pr_info("deleting twice");
ret = commit_do(trans, NULL, NULL, 0,
- bch2_btree_iter_traverse(&iter) ?:
+ bch2_btree_iter_traverse(trans, &iter) ?:
bch2_btree_delete_at(trans, &iter, 0));
bch_err_msg(c, ret, "delete error (second)");
if (ret)
@@ -84,7 +84,7 @@ static int test_delete_written(struct bch_fs *c, u64 nr)
BTREE_ITER_intent);
ret = commit_do(trans, NULL, NULL, 0,
- bch2_btree_iter_traverse(&iter) ?:
+ bch2_btree_iter_traverse(trans, &iter) ?:
bch2_trans_update(trans, &iter, &k.k_i, 0));
bch_err_msg(c, ret, "update error");
if (ret)
@@ -94,7 +94,7 @@ static int test_delete_written(struct bch_fs *c, u64 nr)
bch2_journal_flush_all_pins(&c->journal);
ret = commit_do(trans, NULL, NULL, 0,
- bch2_btree_iter_traverse(&iter) ?:
+ bch2_btree_iter_traverse(trans, &iter) ?:
bch2_btree_delete_at(trans, &iter, 0));
bch_err_msg(c, ret, "delete error");
if (ret)
@@ -349,10 +349,10 @@ static int test_peek_end(struct bch_fs *c, u64 nr)
bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs,
SPOS(0, 0, U32_MAX), 0);
- lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX))));
+ lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(trans, &iter, POS(0, U64_MAX))));
BUG_ON(k.k);
- lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX))));
+ lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(trans, &iter, POS(0, U64_MAX))));
BUG_ON(k.k);
bch2_trans_iter_exit(trans, &iter);
@@ -369,10 +369,10 @@ static int test_peek_end_extents(struct bch_fs *c, u64 nr)
bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
SPOS(0, 0, U32_MAX), 0);
- lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX))));
+ lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(trans, &iter, POS(0, U64_MAX))));
BUG_ON(k.k);
- lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX))));
+ lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(trans, &iter, POS(0, U64_MAX))));
BUG_ON(k.k);
bch2_trans_iter_exit(trans, &iter);
@@ -488,7 +488,7 @@ static int test_snapshot_filter(struct bch_fs *c, u32 snapid_lo, u32 snapid_hi)
trans = bch2_trans_get(c);
bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs,
SPOS(0, 0, snapid_lo), 0);
- lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX))));
+ lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(trans, &iter, POS(0, U64_MAX))));
BUG_ON(k.k->p.snapshot != U32_MAX);
@@ -602,9 +602,9 @@ static int rand_lookup(struct bch_fs *c, u64 nr)
SPOS(0, 0, U32_MAX), 0);
for (i = 0; i < nr; i++) {
- bch2_btree_iter_set_pos(&iter, SPOS(0, test_rand(), U32_MAX));
+ bch2_btree_iter_set_pos(trans, &iter, SPOS(0, test_rand(), U32_MAX));
- lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek(&iter)));
+ lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek(trans, &iter)));
ret = bkey_err(k);
if (ret)
break;
@@ -623,9 +623,9 @@ static int rand_mixed_trans(struct btree_trans *trans,
struct bkey_s_c k;
int ret;
- bch2_btree_iter_set_pos(iter, SPOS(0, pos, U32_MAX));
+ bch2_btree_iter_set_pos(trans, iter, SPOS(0, pos, U32_MAX));
- k = bch2_btree_iter_peek(iter);
+ k = bch2_btree_iter_peek(trans, iter);
ret = bkey_err(k);
bch_err_msg(trans->c, ret, "lookup error");
if (ret)
@@ -672,7 +672,7 @@ static int __do_delete(struct btree_trans *trans, struct bpos pos)
bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, pos,
BTREE_ITER_intent);
- k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX));
+ k = bch2_btree_iter_peek_max(trans, &iter, POS(0, U64_MAX));
ret = bkey_err(k);
if (ret)
goto err;
diff --git a/fs/bcachefs/xattr.c b/fs/bcachefs/xattr.c
index f9667b944c0d..651da52b2cbc 100644
--- a/fs/bcachefs/xattr.c
+++ b/fs/bcachefs/xattr.c
@@ -168,7 +168,7 @@ int bch2_xattr_set(struct btree_trans *trans, subvol_inum inum,
int type, int flags)
{
struct bch_fs *c = trans->c;
- struct btree_iter inode_iter = { NULL };
+ struct btree_iter inode_iter = {};
int ret;
ret = bch2_subvol_is_ro_trans(trans, inum.subvol) ?:
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index 83a60126de0f..14d0cc894000 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -128,10 +128,11 @@ retry:
ret = security_path_mkdir(&path, subdir, 0700);
if (ret < 0)
goto mkdir_error;
- subdir = ERR_PTR(cachefiles_inject_write_error());
- if (!IS_ERR(subdir))
+ ret = cachefiles_inject_write_error();
+ if (ret == 0)
subdir = vfs_mkdir(&nop_mnt_idmap, d_inode(dir), subdir, 0700);
- ret = PTR_ERR(subdir);
+ else
+ subdir = ERR_PTR(ret);
if (IS_ERR(subdir)) {
trace_cachefiles_vfs_error(NULL, d_inode(dir), ret,
cachefiles_trace_mkdir_error);
diff --git a/fs/exec.c b/fs/exec.c
index f45859ad13ac..5d1c0d2dc403 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1227,13 +1227,12 @@ int begin_new_exec(struct linux_binprm * bprm)
*/
bprm->point_of_no_return = true;
- /*
- * Make this the only thread in the thread group.
- */
+ /* Make this the only thread in the thread group */
retval = de_thread(me);
if (retval)
goto out;
-
+ /* see the comment in check_unsafe_exec() */
+ current->fs->in_exec = 0;
/*
* Cancel any io_uring activity across execve
*/
@@ -1495,6 +1494,8 @@ static void free_bprm(struct linux_binprm *bprm)
}
free_arg_pages(bprm);
if (bprm->cred) {
+ /* in case exec fails before de_thread() succeeds */
+ current->fs->in_exec = 0;
mutex_unlock(&current->signal->cred_guard_mutex);
abort_creds(bprm->cred);
}
@@ -1616,6 +1617,10 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
* suid exec because the differently privileged task
* will be able to manipulate the current directory, etc.
* It would be nice to force an unshare instead...
+ *
+ * Otherwise we set fs->in_exec = 1 to deny clone(CLONE_FS)
+ * from another sub-thread until de_thread() succeeds, this
+ * state is protected by cred_guard_mutex we hold.
*/
n_fs = 1;
spin_lock(&p->fs->lock);
@@ -1860,7 +1865,6 @@ static int bprm_execve(struct linux_binprm *bprm)
sched_mm_cid_after_execve(current);
/* execve succeeded */
- current->fs->in_exec = 0;
current->in_execve = 0;
rseq_execve(current);
user_events_execve(current);
@@ -1879,7 +1883,6 @@ out:
force_fatal_sig(SIGSEGV);
sched_mm_cid_after_execve(current);
- current->fs->in_exec = 0;
current->in_execve = 0;
return retval;
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index b5845c4846b8..128dd092916b 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -608,4 +608,5 @@ struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
}
EXPORT_SYMBOL_GPL(exportfs_decode_fh);
+MODULE_DESCRIPTION("Code mapping from inodes to file handles");
MODULE_LICENSE("GPL");
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 51e31df4c546..6dcbaa218b7a 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -32,6 +32,100 @@ MODULE_ALIAS("devname:fuse");
static struct kmem_cache *fuse_req_cachep;
+const unsigned long fuse_timeout_timer_freq =
+ secs_to_jiffies(FUSE_TIMEOUT_TIMER_FREQ);
+
+bool fuse_request_expired(struct fuse_conn *fc, struct list_head *list)
+{
+ struct fuse_req *req;
+
+ req = list_first_entry_or_null(list, struct fuse_req, list);
+ if (!req)
+ return false;
+ return time_is_before_jiffies(req->create_time + fc->timeout.req_timeout);
+}
+
+bool fuse_fpq_processing_expired(struct fuse_conn *fc, struct list_head *processing)
+{
+ int i;
+
+ for (i = 0; i < FUSE_PQ_HASH_SIZE; i++)
+ if (fuse_request_expired(fc, &processing[i]))
+ return true;
+
+ return false;
+}
+
+/*
+ * Check if any requests aren't being completed by the time the request timeout
+ * elapses. To do so, we:
+ * - check the fiq pending list
+ * - check the bg queue
+ * - check the fpq io and processing lists
+ *
+ * To make this fast, we only check against the head request on each list since
+ * these are generally queued in order of creation time (eg newer requests get
+ * queued to the tail). We might miss a few edge cases (eg requests transitioning
+ * between lists, re-sent requests at the head of the pending list having a
+ * later creation time than other requests on that list, etc.) but that is fine
+ * since if the request never gets fulfilled, it will eventually be caught.
+ */
+void fuse_check_timeout(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct fuse_conn *fc = container_of(dwork, struct fuse_conn,
+ timeout.work);
+ struct fuse_iqueue *fiq = &fc->iq;
+ struct fuse_dev *fud;
+ struct fuse_pqueue *fpq;
+ bool expired = false;
+
+ if (!atomic_read(&fc->num_waiting))
+ goto out;
+
+ spin_lock(&fiq->lock);
+ expired = fuse_request_expired(fc, &fiq->pending);
+ spin_unlock(&fiq->lock);
+ if (expired)
+ goto abort_conn;
+
+ spin_lock(&fc->bg_lock);
+ expired = fuse_request_expired(fc, &fc->bg_queue);
+ spin_unlock(&fc->bg_lock);
+ if (expired)
+ goto abort_conn;
+
+ spin_lock(&fc->lock);
+ if (!fc->connected) {
+ spin_unlock(&fc->lock);
+ return;
+ }
+ list_for_each_entry(fud, &fc->devices, entry) {
+ fpq = &fud->pq;
+ spin_lock(&fpq->lock);
+ if (fuse_request_expired(fc, &fpq->io) ||
+ fuse_fpq_processing_expired(fc, fpq->processing)) {
+ spin_unlock(&fpq->lock);
+ spin_unlock(&fc->lock);
+ goto abort_conn;
+ }
+
+ spin_unlock(&fpq->lock);
+ }
+ spin_unlock(&fc->lock);
+
+ if (fuse_uring_request_expired(fc))
+ goto abort_conn;
+
+out:
+ queue_delayed_work(system_wq, &fc->timeout.work,
+ fuse_timeout_timer_freq);
+ return;
+
+abort_conn:
+ fuse_abort_conn(fc);
+}
+
static void fuse_request_init(struct fuse_mount *fm, struct fuse_req *req)
{
INIT_LIST_HEAD(&req->list);
@@ -40,6 +134,7 @@ static void fuse_request_init(struct fuse_mount *fm, struct fuse_req *req)
refcount_set(&req->count, 1);
__set_bit(FR_PENDING, &req->flags);
req->fm = fm;
+ req->create_time = jiffies;
}
static struct fuse_req *fuse_request_alloc(struct fuse_mount *fm, gfp_t flags)
@@ -407,6 +502,24 @@ static int queue_interrupt(struct fuse_req *req)
return 0;
}
+bool fuse_remove_pending_req(struct fuse_req *req, spinlock_t *lock)
+{
+ spin_lock(lock);
+ if (test_bit(FR_PENDING, &req->flags)) {
+ /*
+ * FR_PENDING does not get cleared as the request will end
+ * up in destruction anyway.
+ */
+ list_del(&req->list);
+ spin_unlock(lock);
+ __fuse_put_request(req);
+ req->out.h.error = -EINTR;
+ return true;
+ }
+ spin_unlock(lock);
+ return false;
+}
+
static void request_wait_answer(struct fuse_req *req)
{
struct fuse_conn *fc = req->fm->fc;
@@ -428,22 +541,20 @@ static void request_wait_answer(struct fuse_req *req)
}
if (!test_bit(FR_FORCE, &req->flags)) {
+ bool removed;
+
/* Only fatal signals may interrupt this */
err = wait_event_killable(req->waitq,
test_bit(FR_FINISHED, &req->flags));
if (!err)
return;
- spin_lock(&fiq->lock);
- /* Request is not yet in userspace, bail out */
- if (test_bit(FR_PENDING, &req->flags)) {
- list_del(&req->list);
- spin_unlock(&fiq->lock);
- __fuse_put_request(req);
- req->out.h.error = -EINTR;
+ if (test_bit(FR_URING, &req->flags))
+ removed = fuse_uring_remove_pending_req(req);
+ else
+ removed = fuse_remove_pending_req(req, &fiq->lock);
+ if (removed)
return;
- }
- spin_unlock(&fiq->lock);
}
/*
@@ -1533,14 +1644,10 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
struct fuse_copy_state *cs)
{
struct fuse_notify_inval_entry_out outarg;
- int err = -ENOMEM;
- char *buf;
+ int err;
+ char *buf = NULL;
struct qstr name;
- buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
- if (!buf)
- goto err;
-
err = -EINVAL;
if (size < sizeof(outarg))
goto err;
@@ -1550,13 +1657,18 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
goto err;
err = -ENAMETOOLONG;
- if (outarg.namelen > FUSE_NAME_MAX)
+ if (outarg.namelen > fc->name_max)
goto err;
err = -EINVAL;
if (size != sizeof(outarg) + outarg.namelen + 1)
goto err;
+ err = -ENOMEM;
+ buf = kzalloc(outarg.namelen + 1, GFP_KERNEL);
+ if (!buf)
+ goto err;
+
name.name = buf;
name.len = outarg.namelen;
err = fuse_copy_one(cs, buf, outarg.namelen + 1);
@@ -1581,14 +1693,10 @@ static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
struct fuse_copy_state *cs)
{
struct fuse_notify_delete_out outarg;
- int err = -ENOMEM;
- char *buf;
+ int err;
+ char *buf = NULL;
struct qstr name;
- buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
- if (!buf)
- goto err;
-
err = -EINVAL;
if (size < sizeof(outarg))
goto err;
@@ -1598,13 +1706,18 @@ static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
goto err;
err = -ENAMETOOLONG;
- if (outarg.namelen > FUSE_NAME_MAX)
+ if (outarg.namelen > fc->name_max)
goto err;
err = -EINVAL;
if (size != sizeof(outarg) + outarg.namelen + 1)
goto err;
+ err = -ENOMEM;
+ buf = kzalloc(outarg.namelen + 1, GFP_KERNEL);
+ if (!buf)
+ goto err;
+
name.name = buf;
name.len = outarg.namelen;
err = fuse_copy_one(cs, buf, outarg.namelen + 1);
@@ -2275,6 +2388,9 @@ void fuse_abort_conn(struct fuse_conn *fc)
LIST_HEAD(to_end);
unsigned int i;
+ if (fc->timeout.req_timeout)
+ cancel_delayed_work(&fc->timeout.work);
+
/* Background queuing checks fc->connected under bg_lock */
spin_lock(&fc->bg_lock);
fc->connected = 0;
diff --git a/fs/fuse/dev_uring.c b/fs/fuse/dev_uring.c
index 82bf458fa9db..accdce2977c5 100644
--- a/fs/fuse/dev_uring.c
+++ b/fs/fuse/dev_uring.c
@@ -140,6 +140,33 @@ void fuse_uring_abort_end_requests(struct fuse_ring *ring)
}
}
+bool fuse_uring_request_expired(struct fuse_conn *fc)
+{
+ struct fuse_ring *ring = fc->ring;
+ struct fuse_ring_queue *queue;
+ int qid;
+
+ if (!ring)
+ return false;
+
+ for (qid = 0; qid < ring->nr_queues; qid++) {
+ queue = READ_ONCE(ring->queues[qid]);
+ if (!queue)
+ continue;
+
+ spin_lock(&queue->lock);
+ if (fuse_request_expired(fc, &queue->fuse_req_queue) ||
+ fuse_request_expired(fc, &queue->fuse_req_bg_queue) ||
+ fuse_fpq_processing_expired(fc, queue->fpq.processing)) {
+ spin_unlock(&queue->lock);
+ return true;
+ }
+ spin_unlock(&queue->lock);
+ }
+
+ return false;
+}
+
void fuse_uring_destruct(struct fuse_conn *fc)
{
struct fuse_ring *ring = fc->ring;
@@ -211,7 +238,6 @@ static struct fuse_ring *fuse_uring_create(struct fuse_conn *fc)
ring->nr_queues = nr_queues;
ring->fc = fc;
ring->max_payload_sz = max_payload_size;
- atomic_set(&ring->queue_refs, 0);
smp_store_release(&fc->ring, ring);
spin_unlock(&fc->lock);
@@ -726,8 +752,6 @@ static void fuse_uring_add_req_to_ring_ent(struct fuse_ring_ent *ent,
struct fuse_req *req)
{
struct fuse_ring_queue *queue = ent->queue;
- struct fuse_conn *fc = req->fm->fc;
- struct fuse_iqueue *fiq = &fc->iq;
lockdep_assert_held(&queue->lock);
@@ -737,9 +761,7 @@ static void fuse_uring_add_req_to_ring_ent(struct fuse_ring_ent *ent,
ent->state);
}
- spin_lock(&fiq->lock);
clear_bit(FR_PENDING, &req->flags);
- spin_unlock(&fiq->lock);
ent->fuse_req = req;
ent->state = FRRS_FUSE_REQ;
list_move(&ent->list, &queue->ent_w_req_queue);
@@ -1238,6 +1260,8 @@ void fuse_uring_queue_fuse_req(struct fuse_iqueue *fiq, struct fuse_req *req)
if (unlikely(queue->stopped))
goto err_unlock;
+ set_bit(FR_URING, &req->flags);
+ req->ring_queue = queue;
ent = list_first_entry_or_null(&queue->ent_avail_queue,
struct fuse_ring_ent, list);
if (ent)
@@ -1276,6 +1300,8 @@ bool fuse_uring_queue_bq_req(struct fuse_req *req)
return false;
}
+ set_bit(FR_URING, &req->flags);
+ req->ring_queue = queue;
list_add_tail(&req->list, &queue->fuse_req_bg_queue);
ent = list_first_entry_or_null(&queue->ent_avail_queue,
@@ -1306,6 +1332,13 @@ bool fuse_uring_queue_bq_req(struct fuse_req *req)
return true;
}
+bool fuse_uring_remove_pending_req(struct fuse_req *req)
+{
+ struct fuse_ring_queue *queue = req->ring_queue;
+
+ return fuse_remove_pending_req(req, &queue->lock);
+}
+
static const struct fuse_iqueue_ops fuse_io_uring_ops = {
/* should be send over io-uring as enhancement */
.send_forget = fuse_dev_queue_forget,
diff --git a/fs/fuse/dev_uring_i.h b/fs/fuse/dev_uring_i.h
index 2102b3d0c1ae..51a563922ce1 100644
--- a/fs/fuse/dev_uring_i.h
+++ b/fs/fuse/dev_uring_i.h
@@ -142,6 +142,8 @@ void fuse_uring_abort_end_requests(struct fuse_ring *ring);
int fuse_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags);
void fuse_uring_queue_fuse_req(struct fuse_iqueue *fiq, struct fuse_req *req);
bool fuse_uring_queue_bq_req(struct fuse_req *req);
+bool fuse_uring_remove_pending_req(struct fuse_req *req);
+bool fuse_uring_request_expired(struct fuse_conn *fc);
static inline void fuse_uring_abort(struct fuse_conn *fc)
{
@@ -172,12 +174,6 @@ static inline bool fuse_uring_ready(struct fuse_conn *fc)
#else /* CONFIG_FUSE_IO_URING */
-struct fuse_ring;
-
-static inline void fuse_uring_create(struct fuse_conn *fc)
-{
-}
-
static inline void fuse_uring_destruct(struct fuse_conn *fc)
{
}
@@ -200,6 +196,16 @@ static inline bool fuse_uring_ready(struct fuse_conn *fc)
return false;
}
+static inline bool fuse_uring_remove_pending_req(struct fuse_req *req)
+{
+ return false;
+}
+
+static inline bool fuse_uring_request_expired(struct fuse_conn *fc)
+{
+ return false;
+}
+
#endif /* CONFIG_FUSE_IO_URING */
#endif /* _FS_FUSE_DEV_URING_I_H */
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 85e4f894a59f..83ac192e7fdd 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -370,7 +370,7 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name
*inode = NULL;
err = -ENAMETOOLONG;
- if (name->len > FUSE_NAME_MAX)
+ if (name->len > fm->fc->name_max)
goto out;
@@ -1137,6 +1137,9 @@ static int fuse_link(struct dentry *entry, struct inode *newdir,
struct fuse_mount *fm = get_fuse_mount(inode);
FUSE_ARGS(args);
+ if (fm->fc->no_link)
+ goto out;
+
memset(&inarg, 0, sizeof(inarg));
inarg.oldnodeid = get_node_id(inode);
args.opcode = FUSE_LINK;
@@ -1151,6 +1154,12 @@ static int fuse_link(struct dentry *entry, struct inode *newdir,
else if (err == -EINTR)
fuse_invalidate_attr(inode);
+ if (err == -ENOSYS)
+ fm->fc->no_link = 1;
+out:
+ if (fm->fc->no_link)
+ return -EPERM;
+
return err;
}
diff --git a/fs/fuse/fuse_dev_i.h b/fs/fuse/fuse_dev_i.h
index 3b2bfe1248d3..b3c2e32254ba 100644
--- a/fs/fuse/fuse_dev_i.h
+++ b/fs/fuse/fuse_dev_i.h
@@ -61,6 +61,10 @@ int fuse_copy_out_args(struct fuse_copy_state *cs, struct fuse_args *args,
void fuse_dev_queue_forget(struct fuse_iqueue *fiq,
struct fuse_forget_link *forget);
void fuse_dev_queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req);
+bool fuse_remove_pending_req(struct fuse_req *req, spinlock_t *lock);
+
+bool fuse_request_expired(struct fuse_conn *fc, struct list_head *list);
+bool fuse_fpq_processing_expired(struct fuse_conn *fc, struct list_head *processing);
#endif
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index fee96fe7887b..d56d4fd956db 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -38,14 +38,34 @@
/** Bias for fi->writectr, meaning new writepages must not be sent */
#define FUSE_NOWRITE INT_MIN
-/** It could be as large as PATH_MAX, but would that have any uses? */
-#define FUSE_NAME_MAX 1024
+/** Maximum length of a filename, not including terminating null */
+
+/* maximum, small enough for FUSE_MIN_READ_BUFFER*/
+#define FUSE_NAME_LOW_MAX 1024
+/* maximum, but needs a request buffer > FUSE_MIN_READ_BUFFER */
+#define FUSE_NAME_MAX (PATH_MAX - 1)
/** Number of dentries for each connection in the control filesystem */
#define FUSE_CTL_NUM_DENTRIES 5
+/* Frequency (in seconds) of request timeout checks, if opted into */
+#define FUSE_TIMEOUT_TIMER_FREQ 15
+
+/** Frequency (in jiffies) of request timeout checks, if opted into */
+extern const unsigned long fuse_timeout_timer_freq;
+
/** Maximum of max_pages received in init_out */
extern unsigned int fuse_max_pages_limit;
+/*
+ * Default timeout (in seconds) for the server to reply to a request
+ * before the connection is aborted, if no timeout was specified on mount.
+ */
+extern unsigned int fuse_default_req_timeout;
+/*
+ * Max timeout (in seconds) for the server to reply to a request before
+ * the connection is aborted.
+ */
+extern unsigned int fuse_max_req_timeout;
/** List of active connections */
extern struct list_head fuse_conn_list;
@@ -378,6 +398,7 @@ struct fuse_io_priv {
* FR_FINISHED: request is finished
* FR_PRIVATE: request is on private list
* FR_ASYNC: request is asynchronous
+ * FR_URING: request is handled through fuse-io-uring
*/
enum fuse_req_flag {
FR_ISREPLY,
@@ -392,6 +413,7 @@ enum fuse_req_flag {
FR_FINISHED,
FR_PRIVATE,
FR_ASYNC,
+ FR_URING,
};
/**
@@ -441,7 +463,10 @@ struct fuse_req {
#ifdef CONFIG_FUSE_IO_URING
void *ring_entry;
+ void *ring_queue;
#endif
+ /** When (in jiffies) the request was created */
+ unsigned long create_time;
};
struct fuse_iqueue;
@@ -867,6 +892,9 @@ struct fuse_conn {
/* Use pages instead of pointer for kernel I/O */
unsigned int use_pages_for_kvec_io:1;
+ /* Is link not implemented by fs? */
+ unsigned int no_link:1;
+
/* Use io_uring for communication */
unsigned int io_uring;
@@ -900,6 +928,9 @@ struct fuse_conn {
/** Version counter for evict inode */
atomic64_t evict_ctr;
+ /* maximum file name length */
+ u32 name_max;
+
/** Called on final put */
void (*release)(struct fuse_conn *);
@@ -935,6 +966,15 @@ struct fuse_conn {
/** uring connection information*/
struct fuse_ring *ring;
#endif
+
+ /** Only used if the connection opts into request timeouts */
+ struct {
+ /* Worker for checking if any requests have timed out */
+ struct delayed_work work;
+
+ /* Request timeout (in jiffies). 0 = no timeout */
+ unsigned int req_timeout;
+ } timeout;
};
/*
@@ -1216,6 +1256,9 @@ void fuse_request_end(struct fuse_req *req);
void fuse_abort_conn(struct fuse_conn *fc);
void fuse_wait_aborted(struct fuse_conn *fc);
+/* Check if any requests timed out */
+void fuse_check_timeout(struct work_struct *work);
+
/**
* Invalidate inode attributes
*/
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index e9db2cb8c150..fd48e8d37f2e 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -37,6 +37,9 @@ DEFINE_MUTEX(fuse_mutex);
static int set_global_limit(const char *val, const struct kernel_param *kp);
unsigned int fuse_max_pages_limit = 256;
+/* default is no timeout */
+unsigned int fuse_default_req_timeout;
+unsigned int fuse_max_req_timeout;
unsigned max_user_bgreq;
module_param_call(max_user_bgreq, set_global_limit, param_get_uint,
@@ -979,6 +982,8 @@ void fuse_conn_init(struct fuse_conn *fc, struct fuse_mount *fm,
fc->user_ns = get_user_ns(user_ns);
fc->max_pages = FUSE_DEFAULT_MAX_PAGES_PER_REQ;
fc->max_pages_limit = fuse_max_pages_limit;
+ fc->name_max = FUSE_NAME_LOW_MAX;
+ fc->timeout.req_timeout = 0;
if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH))
fuse_backing_files_init(fc);
@@ -1007,6 +1012,8 @@ void fuse_conn_put(struct fuse_conn *fc)
if (IS_ENABLED(CONFIG_FUSE_DAX))
fuse_dax_conn_free(fc);
+ if (fc->timeout.req_timeout)
+ cancel_delayed_work_sync(&fc->timeout.work);
if (fiq->ops->release)
fiq->ops->release(fiq);
put_pid_ns(fc->pid_ns);
@@ -1257,6 +1264,34 @@ static void process_init_limits(struct fuse_conn *fc, struct fuse_init_out *arg)
spin_unlock(&fc->bg_lock);
}
+static void set_request_timeout(struct fuse_conn *fc, unsigned int timeout)
+{
+ fc->timeout.req_timeout = secs_to_jiffies(timeout);
+ INIT_DELAYED_WORK(&fc->timeout.work, fuse_check_timeout);
+ queue_delayed_work(system_wq, &fc->timeout.work,
+ fuse_timeout_timer_freq);
+}
+
+static void init_server_timeout(struct fuse_conn *fc, unsigned int timeout)
+{
+ if (!timeout && !fuse_max_req_timeout && !fuse_default_req_timeout)
+ return;
+
+ if (!timeout)
+ timeout = fuse_default_req_timeout;
+
+ if (fuse_max_req_timeout) {
+ if (timeout)
+ timeout = min(fuse_max_req_timeout, timeout);
+ else
+ timeout = fuse_max_req_timeout;
+ }
+
+ timeout = max(FUSE_TIMEOUT_TIMER_FREQ, timeout);
+
+ set_request_timeout(fc, timeout);
+}
+
struct fuse_init_args {
struct fuse_args args;
struct fuse_init_in in;
@@ -1275,6 +1310,7 @@ static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args,
ok = false;
else {
unsigned long ra_pages;
+ unsigned int timeout = 0;
process_init_limits(fc, arg);
@@ -1338,6 +1374,13 @@ static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args,
fc->max_pages =
min_t(unsigned int, fc->max_pages_limit,
max_t(unsigned int, arg->max_pages, 1));
+
+ /*
+ * PATH_MAX file names might need two pages for
+ * ops like rename
+ */
+ if (fc->max_pages > 1)
+ fc->name_max = FUSE_NAME_MAX;
}
if (IS_ENABLED(CONFIG_FUSE_DAX)) {
if (flags & FUSE_MAP_ALIGNMENT &&
@@ -1392,12 +1435,17 @@ static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args,
}
if (flags & FUSE_OVER_IO_URING && fuse_uring_enabled())
fc->io_uring = 1;
+
+ if (flags & FUSE_REQUEST_TIMEOUT)
+ timeout = arg->request_timeout;
} else {
ra_pages = fc->max_read / PAGE_SIZE;
fc->no_lock = 1;
fc->no_flock = 1;
}
+ init_server_timeout(fc, timeout);
+
fm->sb->s_bdi->ra_pages =
min(fm->sb->s_bdi->ra_pages, ra_pages);
fc->minor = arg->minor;
@@ -1439,7 +1487,8 @@ void fuse_send_init(struct fuse_mount *fm)
FUSE_HANDLE_KILLPRIV_V2 | FUSE_SETXATTR_EXT | FUSE_INIT_EXT |
FUSE_SECURITY_CTX | FUSE_CREATE_SUPP_GROUP |
FUSE_HAS_EXPIRE_ONLY | FUSE_DIRECT_IO_ALLOW_MMAP |
- FUSE_NO_EXPORT_SUPPORT | FUSE_HAS_RESEND | FUSE_ALLOW_IDMAP;
+ FUSE_NO_EXPORT_SUPPORT | FUSE_HAS_RESEND | FUSE_ALLOW_IDMAP |
+ FUSE_REQUEST_TIMEOUT;
#ifdef CONFIG_FUSE_DAX
if (fm->fc->dax)
flags |= FUSE_MAP_ALIGNMENT;
diff --git a/fs/fuse/sysctl.c b/fs/fuse/sysctl.c
index 63fb1e5bee30..e2d921abcb88 100644
--- a/fs/fuse/sysctl.c
+++ b/fs/fuse/sysctl.c
@@ -13,6 +13,12 @@ static struct ctl_table_header *fuse_table_header;
/* Bound by fuse_init_out max_pages, which is a u16 */
static unsigned int sysctl_fuse_max_pages_limit = 65535;
+/*
+ * fuse_init_out request timeouts are u16.
+ * This goes up to ~18 hours, which is plenty for a timeout.
+ */
+static unsigned int sysctl_fuse_req_timeout_limit = 65535;
+
static const struct ctl_table fuse_sysctl_table[] = {
{
.procname = "max_pages_limit",
@@ -23,6 +29,24 @@ static const struct ctl_table fuse_sysctl_table[] = {
.extra1 = SYSCTL_ONE,
.extra2 = &sysctl_fuse_max_pages_limit,
},
+ {
+ .procname = "default_request_timeout",
+ .data = &fuse_default_req_timeout,
+ .maxlen = sizeof(fuse_default_req_timeout),
+ .mode = 0644,
+ .proc_handler = proc_douintvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = &sysctl_fuse_req_timeout_limit,
+ },
+ {
+ .procname = "max_request_timeout",
+ .data = &fuse_max_req_timeout,
+ .maxlen = sizeof(fuse_max_req_timeout),
+ .mode = 0644,
+ .proc_handler = proc_douintvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = &sysctl_fuse_req_timeout_limit,
+ },
};
int fuse_sysctl_register(void)
diff --git a/fs/hostfs/hostfs.h b/fs/hostfs/hostfs.h
index 8b39c15c408c..15b2f094d36e 100644
--- a/fs/hostfs/hostfs.h
+++ b/fs/hostfs/hostfs.h
@@ -60,7 +60,7 @@ struct hostfs_stat {
unsigned int uid;
unsigned int gid;
unsigned long long size;
- struct hostfs_timespec atime, mtime, ctime;
+ struct hostfs_timespec atime, mtime, ctime, btime;
unsigned int blksize;
unsigned long long blocks;
struct {
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index a2c6b9051c5b..702c41317589 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -33,6 +33,7 @@ struct hostfs_inode_info {
struct inode vfs_inode;
struct mutex open_mutex;
dev_t dev;
+ struct hostfs_timespec btime;
};
static inline struct hostfs_inode_info *HOSTFS_I(struct inode *inode)
@@ -547,6 +548,7 @@ static int hostfs_inode_set(struct inode *ino, void *data)
}
HOSTFS_I(ino)->dev = dev;
+ HOSTFS_I(ino)->btime = st->btime;
ino->i_ino = st->ino;
ino->i_mode = st->mode;
return hostfs_inode_update(ino, st);
@@ -557,7 +559,10 @@ static int hostfs_inode_test(struct inode *inode, void *data)
const struct hostfs_stat *st = data;
dev_t dev = MKDEV(st->dev.maj, st->dev.min);
- return inode->i_ino == st->ino && HOSTFS_I(inode)->dev == dev;
+ return inode->i_ino == st->ino && HOSTFS_I(inode)->dev == dev &&
+ (inode->i_mode & S_IFMT) == (st->mode & S_IFMT) &&
+ HOSTFS_I(inode)->btime.tv_sec == st->btime.tv_sec &&
+ HOSTFS_I(inode)->btime.tv_nsec == st->btime.tv_nsec;
}
static struct inode *hostfs_iget(struct super_block *sb, char *name)
diff --git a/fs/hostfs/hostfs_user.c b/fs/hostfs/hostfs_user.c
index 97e9c40a9448..3bcd9f35e70b 100644
--- a/fs/hostfs/hostfs_user.c
+++ b/fs/hostfs/hostfs_user.c
@@ -18,39 +18,48 @@
#include "hostfs.h"
#include <utime.h>
-static void stat64_to_hostfs(const struct stat64 *buf, struct hostfs_stat *p)
+static void statx_to_hostfs(const struct statx *buf, struct hostfs_stat *p)
{
- p->ino = buf->st_ino;
- p->mode = buf->st_mode;
- p->nlink = buf->st_nlink;
- p->uid = buf->st_uid;
- p->gid = buf->st_gid;
- p->size = buf->st_size;
- p->atime.tv_sec = buf->st_atime;
- p->atime.tv_nsec = 0;
- p->ctime.tv_sec = buf->st_ctime;
- p->ctime.tv_nsec = 0;
- p->mtime.tv_sec = buf->st_mtime;
- p->mtime.tv_nsec = 0;
- p->blksize = buf->st_blksize;
- p->blocks = buf->st_blocks;
- p->rdev.maj = os_major(buf->st_rdev);
- p->rdev.min = os_minor(buf->st_rdev);
- p->dev.maj = os_major(buf->st_dev);
- p->dev.min = os_minor(buf->st_dev);
+ p->ino = buf->stx_ino;
+ p->mode = buf->stx_mode;
+ p->nlink = buf->stx_nlink;
+ p->uid = buf->stx_uid;
+ p->gid = buf->stx_gid;
+ p->size = buf->stx_size;
+ p->atime.tv_sec = buf->stx_atime.tv_sec;
+ p->atime.tv_nsec = buf->stx_atime.tv_nsec;
+ p->ctime.tv_sec = buf->stx_ctime.tv_sec;
+ p->ctime.tv_nsec = buf->stx_ctime.tv_nsec;
+ p->mtime.tv_sec = buf->stx_mtime.tv_sec;
+ p->mtime.tv_nsec = buf->stx_mtime.tv_nsec;
+ if (buf->stx_mask & STATX_BTIME) {
+ p->btime.tv_sec = buf->stx_btime.tv_sec;
+ p->btime.tv_nsec = buf->stx_btime.tv_nsec;
+ } else {
+ memset(&p->btime, 0, sizeof(p->btime));
+ }
+ p->blksize = buf->stx_blksize;
+ p->blocks = buf->stx_blocks;
+ p->rdev.maj = buf->stx_rdev_major;
+ p->rdev.min = buf->stx_rdev_minor;
+ p->dev.maj = buf->stx_dev_major;
+ p->dev.min = buf->stx_dev_minor;
}
int stat_file(const char *path, struct hostfs_stat *p, int fd)
{
- struct stat64 buf;
+ struct statx buf;
+ int flags = AT_SYMLINK_NOFOLLOW;
if (fd >= 0) {
- if (fstat64(fd, &buf) < 0)
- return -errno;
- } else if (lstat64(path, &buf) < 0) {
- return -errno;
+ flags |= AT_EMPTY_PATH;
+ path = "";
}
- stat64_to_hostfs(&buf, p);
+
+ if ((statx(fd, path, flags, STATX_BASIC_STATS | STATX_BTIME, &buf)) < 0)
+ return -errno;
+
+ statx_to_hostfs(&buf, p);
return 0;
}
diff --git a/fs/namespace.c b/fs/namespace.c
index 6100e5b962a6..14935a0500a2 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -2478,7 +2478,8 @@ struct vfsmount *clone_private_mount(const struct path *path)
struct mount *old_mnt = real_mount(path->mnt);
struct mount *new_mnt;
- scoped_guard(rwsem_read, &namespace_sem)
+ guard(rwsem_read)(&namespace_sem);
+
if (IS_MNT_UNBINDABLE(old_mnt))
return ERR_PTR(-EINVAL);
@@ -5326,8 +5327,10 @@ struct kstatmount {
struct mnt_idmap *idmap;
u64 mask;
struct path root;
- struct statmount sm;
struct seq_file seq;
+
+ /* Must be last --ends in a flexible-array member. */
+ struct statmount sm;
};
static u64 mnt_to_attr_flags(struct vfsmount *mnt)
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 3b0918ade53c..02c916a55020 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -546,6 +546,8 @@ int nfs_create_rpc_client(struct nfs_client *clp,
args.flags |= RPC_CLNT_CREATE_NOPING;
if (test_bit(NFS_CS_REUSEPORT, &clp->cl_flags))
args.flags |= RPC_CLNT_CREATE_REUSEPORT;
+ if (test_bit(NFS_CS_NETUNREACH_FATAL, &clp->cl_flags))
+ args.flags |= RPC_CLNT_CREATE_NETUNREACH_FATAL;
if (!IS_ERR(clp->cl_rpcclient))
return 0;
@@ -709,6 +711,9 @@ static int nfs_init_server(struct nfs_server *server,
if (ctx->flags & NFS_MOUNT_NORESVPORT)
set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
+ if (ctx->flags & NFS_MOUNT_NETUNREACH_FATAL)
+ __set_bit(NFS_CS_NETUNREACH_FATAL, &cl_init.init_flags);
+
/* Allocate or find a client reference we can use */
clp = nfs_get_client(&cl_init);
if (IS_ERR(clp))
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 4db912f56230..8bdbc4dca89c 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -79,6 +79,7 @@ static void nfs_mark_return_delegation(struct nfs_server *server,
struct nfs_delegation *delegation)
{
set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
+ set_bit(NFS4SERV_DELEGRETURN, &server->delegation_flags);
set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
}
@@ -306,7 +307,8 @@ nfs_start_delegation_return_locked(struct nfs_inode *nfsi)
if (delegation == NULL)
goto out;
spin_lock(&delegation->lock);
- if (!test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
+ if (delegation->inode &&
+ !test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
clear_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags);
/* Refcount matched in nfs_end_delegation_return() */
ret = nfs_get_delegation(delegation);
@@ -330,14 +332,16 @@ nfs_start_delegation_return(struct nfs_inode *nfsi)
}
static void nfs_abort_delegation_return(struct nfs_delegation *delegation,
- struct nfs_client *clp, int err)
+ struct nfs_server *server, int err)
{
-
spin_lock(&delegation->lock);
clear_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
if (err == -EAGAIN) {
set_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags);
- set_bit(NFS4CLNT_DELEGRETURN_DELAYED, &clp->cl_state);
+ set_bit(NFS4SERV_DELEGRETURN_DELAYED,
+ &server->delegation_flags);
+ set_bit(NFS4CLNT_DELEGRETURN_DELAYED,
+ &server->nfs_client->cl_state);
}
spin_unlock(&delegation->lock);
}
@@ -547,7 +551,7 @@ out:
*/
static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation *delegation, int issync)
{
- struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
+ struct nfs_server *server = NFS_SERVER(inode);
unsigned int mode = O_WRONLY | O_RDWR;
int err = 0;
@@ -569,11 +573,11 @@ static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation
/*
* Guard against state recovery
*/
- err = nfs4_wait_clnt_recover(clp);
+ err = nfs4_wait_clnt_recover(server->nfs_client);
}
if (err) {
- nfs_abort_delegation_return(delegation, clp, err);
+ nfs_abort_delegation_return(delegation, server, err);
goto out;
}
@@ -590,17 +594,6 @@ static bool nfs_delegation_need_return(struct nfs_delegation *delegation)
if (test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags))
ret = true;
- else if (test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags)) {
- struct inode *inode;
-
- spin_lock(&delegation->lock);
- inode = delegation->inode;
- if (inode && list_empty(&NFS_I(inode)->open_files))
- ret = true;
- spin_unlock(&delegation->lock);
- }
- if (ret)
- clear_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags) ||
test_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags) ||
test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
@@ -619,6 +612,9 @@ static int nfs_server_return_marked_delegations(struct nfs_server *server,
struct nfs_delegation *place_holder_deleg = NULL;
int err = 0;
+ if (!test_and_clear_bit(NFS4SERV_DELEGRETURN,
+ &server->delegation_flags))
+ return 0;
restart:
/*
* To avoid quadratic looping we hold a reference
@@ -670,6 +666,7 @@ restart:
cond_resched();
if (!err)
goto restart;
+ set_bit(NFS4SERV_DELEGRETURN, &server->delegation_flags);
set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
goto out;
}
@@ -684,6 +681,9 @@ static bool nfs_server_clear_delayed_delegations(struct nfs_server *server)
struct nfs_delegation *d;
bool ret = false;
+ if (!test_and_clear_bit(NFS4SERV_DELEGRETURN_DELAYED,
+ &server->delegation_flags))
+ goto out;
list_for_each_entry_rcu (d, &server->delegations, super_list) {
if (!test_bit(NFS_DELEGATION_RETURN_DELAYED, &d->flags))
continue;
@@ -691,6 +691,7 @@ static bool nfs_server_clear_delayed_delegations(struct nfs_server *server)
clear_bit(NFS_DELEGATION_RETURN_DELAYED, &d->flags);
ret = true;
}
+out:
return ret;
}
@@ -878,11 +879,25 @@ int nfs4_inode_make_writeable(struct inode *inode)
return nfs4_inode_return_delegation(inode);
}
-static void nfs_mark_return_if_closed_delegation(struct nfs_server *server,
- struct nfs_delegation *delegation)
+static void
+nfs_mark_return_if_closed_delegation(struct nfs_server *server,
+ struct nfs_delegation *delegation)
{
- set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
- set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
+ struct inode *inode;
+
+ if (test_bit(NFS_DELEGATION_RETURN, &delegation->flags) ||
+ test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags))
+ return;
+ spin_lock(&delegation->lock);
+ inode = delegation->inode;
+ if (!inode)
+ goto out;
+ if (list_empty(&NFS_I(inode)->open_files))
+ nfs_mark_return_delegation(server, delegation);
+ else
+ set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
+out:
+ spin_unlock(&delegation->lock);
}
static bool nfs_server_mark_return_all_delegations(struct nfs_server *server)
@@ -1276,6 +1291,7 @@ static void nfs_mark_test_expired_delegation(struct nfs_server *server,
return;
clear_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
set_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags);
+ set_bit(NFS4SERV_DELEGATION_EXPIRED, &server->delegation_flags);
set_bit(NFS4CLNT_DELEGATION_EXPIRED, &server->nfs_client->cl_state);
}
@@ -1354,6 +1370,9 @@ static int nfs_server_reap_expired_delegations(struct nfs_server *server,
nfs4_stateid stateid;
unsigned long gen = ++server->delegation_gen;
+ if (!test_and_clear_bit(NFS4SERV_DELEGATION_EXPIRED,
+ &server->delegation_flags))
+ return 0;
restart:
rcu_read_lock();
list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
@@ -1383,6 +1402,9 @@ restart:
goto restart;
}
nfs_inode_mark_test_expired_delegation(server,inode);
+ set_bit(NFS4SERV_DELEGATION_EXPIRED, &server->delegation_flags);
+ set_bit(NFS4CLNT_DELEGATION_EXPIRED,
+ &server->nfs_client->cl_state);
iput(inode);
return -EAGAIN;
}
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index bc957487f6ec..bd23fc736b39 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -666,6 +666,8 @@ static bool nfs_use_readdirplus(struct inode *dir, struct dir_context *ctx,
{
if (!nfs_server_capable(dir, NFS_CAP_READDIRPLUS))
return false;
+ if (NFS_SERVER(dir)->flags & NFS_MOUNT_FORCE_RDIRPLUS)
+ return true;
if (ctx->pos == 0 ||
cache_hits + cache_misses > NFS_READDIR_CACHE_USAGE_THRESHOLD)
return true;
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index 98b45b636be3..61ad269c825f 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -1154,10 +1154,14 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task,
rpc_wake_up(&tbl->slot_tbl_waitq);
goto reset;
/* RPC connection errors */
+ case -ENETDOWN:
+ case -ENETUNREACH:
+ if (test_bit(NFS_CS_NETUNREACH_FATAL, &clp->cl_flags))
+ return -NFS4ERR_FATAL_IOERROR;
+ fallthrough;
case -ECONNREFUSED:
case -EHOSTDOWN:
case -EHOSTUNREACH:
- case -ENETUNREACH:
case -EIO:
case -ETIMEDOUT:
case -EPIPE:
@@ -1183,6 +1187,7 @@ reset:
/* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
static int ff_layout_async_handle_error_v3(struct rpc_task *task,
+ struct nfs_client *clp,
struct pnfs_layout_segment *lseg,
u32 idx)
{
@@ -1200,6 +1205,11 @@ static int ff_layout_async_handle_error_v3(struct rpc_task *task,
case -EJUKEBOX:
nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
goto out_retry;
+ case -ENETDOWN:
+ case -ENETUNREACH:
+ if (test_bit(NFS_CS_NETUNREACH_FATAL, &clp->cl_flags))
+ return -NFS4ERR_FATAL_IOERROR;
+ fallthrough;
default:
dprintk("%s DS connection error %d\n", __func__,
task->tk_status);
@@ -1234,7 +1244,7 @@ static int ff_layout_async_handle_error(struct rpc_task *task,
switch (vers) {
case 3:
- return ff_layout_async_handle_error_v3(task, lseg, idx);
+ return ff_layout_async_handle_error_v3(task, clp, lseg, idx);
case 4:
return ff_layout_async_handle_error_v4(task, state, clp,
lseg, idx);
@@ -1264,6 +1274,7 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
case -ECONNRESET:
case -EHOSTDOWN:
case -EHOSTUNREACH:
+ case -ENETDOWN:
case -ENETUNREACH:
case -EADDRINUSE:
case -ENOBUFS:
@@ -1337,6 +1348,9 @@ static int ff_layout_read_done_cb(struct rpc_task *task,
return task->tk_status;
case -EAGAIN:
goto out_eagain;
+ case -NFS4ERR_FATAL_IOERROR:
+ task->tk_status = -EIO;
+ return 0;
}
return 0;
@@ -1507,6 +1521,9 @@ static int ff_layout_write_done_cb(struct rpc_task *task,
return task->tk_status;
case -EAGAIN:
return -EAGAIN;
+ case -NFS4ERR_FATAL_IOERROR:
+ task->tk_status = -EIO;
+ return 0;
}
if (hdr->res.verf->committed == NFS_FILE_SYNC ||
@@ -1551,6 +1568,9 @@ static int ff_layout_commit_done_cb(struct rpc_task *task,
case -EAGAIN:
rpc_restart_call_prepare(task);
return -EAGAIN;
+ case -NFS4ERR_FATAL_IOERROR:
+ task->tk_status = -EIO;
+ return 0;
}
ff_layout_set_layoutcommit(data->inode, data->lseg, data->lwb);
diff --git a/fs/nfs/fs_context.c b/fs/nfs/fs_context.c
index b069385eea17..13f71ca8c974 100644
--- a/fs/nfs/fs_context.c
+++ b/fs/nfs/fs_context.c
@@ -50,6 +50,7 @@ enum nfs_param {
Opt_clientaddr,
Opt_cto,
Opt_alignwrite,
+ Opt_fatal_neterrors,
Opt_fg,
Opt_fscache,
Opt_fscache_flag,
@@ -72,6 +73,8 @@ enum nfs_param {
Opt_posix,
Opt_proto,
Opt_rdirplus,
+ Opt_rdirplus_none,
+ Opt_rdirplus_force,
Opt_rdma,
Opt_resvport,
Opt_retrans,
@@ -96,6 +99,20 @@ enum nfs_param {
};
enum {
+ Opt_fatal_neterrors_default,
+ Opt_fatal_neterrors_enetunreach,
+ Opt_fatal_neterrors_none,
+};
+
+static const struct constant_table nfs_param_enums_fatal_neterrors[] = {
+ { "default", Opt_fatal_neterrors_default },
+ { "ENETDOWN:ENETUNREACH", Opt_fatal_neterrors_enetunreach },
+ { "ENETUNREACH:ENETDOWN", Opt_fatal_neterrors_enetunreach },
+ { "none", Opt_fatal_neterrors_none },
+ {}
+};
+
+enum {
Opt_local_lock_all,
Opt_local_lock_flock,
Opt_local_lock_none,
@@ -151,6 +168,8 @@ static const struct fs_parameter_spec nfs_fs_parameters[] = {
fsparam_string("clientaddr", Opt_clientaddr),
fsparam_flag_no("cto", Opt_cto),
fsparam_flag_no("alignwrite", Opt_alignwrite),
+ fsparam_enum("fatal_neterrors", Opt_fatal_neterrors,
+ nfs_param_enums_fatal_neterrors),
fsparam_flag ("fg", Opt_fg),
fsparam_flag_no("fsc", Opt_fscache_flag),
fsparam_string("fsc", Opt_fscache),
@@ -174,7 +193,8 @@ static const struct fs_parameter_spec nfs_fs_parameters[] = {
fsparam_u32 ("port", Opt_port),
fsparam_flag_no("posix", Opt_posix),
fsparam_string("proto", Opt_proto),
- fsparam_flag_no("rdirplus", Opt_rdirplus),
+ fsparam_flag_no("rdirplus", Opt_rdirplus), // rdirplus|nordirplus
+ fsparam_string("rdirplus", Opt_rdirplus), // rdirplus=...
fsparam_flag ("rdma", Opt_rdma),
fsparam_flag_no("resvport", Opt_resvport),
fsparam_u32 ("retrans", Opt_retrans),
@@ -288,6 +308,12 @@ static const struct constant_table nfs_xprtsec_policies[] = {
{}
};
+static const struct constant_table nfs_rdirplus_tokens[] = {
+ { "none", Opt_rdirplus_none },
+ { "force", Opt_rdirplus_force },
+ {}
+};
+
/*
* Sanity-check a server address provided by the mount command.
*
@@ -636,10 +662,25 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
ctx->flags &= ~NFS_MOUNT_NOACL;
break;
case Opt_rdirplus:
- if (result.negated)
+ if (result.negated) {
+ ctx->flags &= ~NFS_MOUNT_FORCE_RDIRPLUS;
ctx->flags |= NFS_MOUNT_NORDIRPLUS;
- else
- ctx->flags &= ~NFS_MOUNT_NORDIRPLUS;
+ } else if (!param->string) {
+ ctx->flags &= ~(NFS_MOUNT_NORDIRPLUS | NFS_MOUNT_FORCE_RDIRPLUS);
+ } else {
+ switch (lookup_constant(nfs_rdirplus_tokens, param->string, -1)) {
+ case Opt_rdirplus_none:
+ ctx->flags &= ~NFS_MOUNT_FORCE_RDIRPLUS;
+ ctx->flags |= NFS_MOUNT_NORDIRPLUS;
+ break;
+ case Opt_rdirplus_force:
+ ctx->flags &= ~NFS_MOUNT_NORDIRPLUS;
+ ctx->flags |= NFS_MOUNT_FORCE_RDIRPLUS;
+ break;
+ default:
+ goto out_invalid_value;
+ }
+ }
break;
case Opt_sharecache:
if (result.negated)
@@ -872,6 +913,25 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
goto out_of_bounds;
ctx->nfs_server.max_connect = result.uint_32;
break;
+ case Opt_fatal_neterrors:
+ trace_nfs_mount_assign(param->key, param->string);
+ switch (result.uint_32) {
+ case Opt_fatal_neterrors_default:
+ if (fc->net_ns != &init_net)
+ ctx->flags |= NFS_MOUNT_NETUNREACH_FATAL;
+ else
+ ctx->flags &= ~NFS_MOUNT_NETUNREACH_FATAL;
+ break;
+ case Opt_fatal_neterrors_enetunreach:
+ ctx->flags |= NFS_MOUNT_NETUNREACH_FATAL;
+ break;
+ case Opt_fatal_neterrors_none:
+ ctx->flags &= ~NFS_MOUNT_NETUNREACH_FATAL;
+ break;
+ default:
+ goto out_invalid_value;
+ }
+ break;
case Opt_lookupcache:
trace_nfs_mount_assign(param->key, param->string);
switch (result.uint_32) {
@@ -1651,6 +1711,9 @@ static int nfs_init_fs_context(struct fs_context *fc)
ctx->xprtsec.cert_serial = TLS_NO_CERT;
ctx->xprtsec.privkey_serial = TLS_NO_PRIVKEY;
+ if (fc->net_ns != &init_net)
+ ctx->flags |= NFS_MOUNT_NETUNREACH_FATAL;
+
fc->s_iflags |= SB_I_STABLE_WRITES;
}
fc->fs_private = ctx;
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 1aa67fca69b2..119e447758b9 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -74,6 +74,8 @@ nfs_fattr_to_ino_t(struct nfs_fattr *fattr)
int nfs_wait_bit_killable(struct wait_bit_key *key, int mode)
{
+ if (unlikely(nfs_current_task_exiting()))
+ return -EINTR;
schedule();
if (signal_pending_state(mode, current))
return -ERESTARTSYS;
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 1ac1d3eec517..ec8d32d0e2e9 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -912,6 +912,11 @@ static inline u32 nfs_stateid_hash(nfs4_stateid *stateid)
}
#endif
+static inline bool nfs_current_task_exiting(void)
+{
+ return (current->flags & PF_EXITING) != 0;
+}
+
static inline bool nfs_error_is_fatal(int err)
{
switch (err) {
diff --git a/fs/nfs/nfs3client.c b/fs/nfs/nfs3client.c
index b0c8a39c2bbd..0d7310c1ee0c 100644
--- a/fs/nfs/nfs3client.c
+++ b/fs/nfs/nfs3client.c
@@ -120,6 +120,8 @@ struct nfs_client *nfs3_set_ds_client(struct nfs_server *mds_srv,
if (mds_srv->flags & NFS_MOUNT_NORESVPORT)
__set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
+ if (test_bit(NFS_CS_NETUNREACH_FATAL, &mds_clp->cl_flags))
+ __set_bit(NFS_CS_NETUNREACH_FATAL, &cl_init.init_flags);
__set_bit(NFS_CS_DS, &cl_init.init_flags);
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 755ed3c37051..a4cb67573aa7 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -39,7 +39,7 @@ nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
__set_current_state(TASK_KILLABLE|TASK_FREEZABLE_UNSAFE);
schedule_timeout(NFS_JUKEBOX_RETRY_TIME);
res = -ERESTARTSYS;
- } while (!fatal_signal_pending(current));
+ } while (!fatal_signal_pending(current) && !nfs_current_task_exiting());
return res;
}
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index 1924c4a2077b..5cf52ece96ac 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -21,6 +21,8 @@
#define NFSDBG_FACILITY NFSDBG_PROC
static int nfs42_do_offload_cancel_async(struct file *dst, nfs4_stateid *std);
+static int nfs42_proc_offload_status(struct file *file, nfs4_stateid *stateid,
+ u64 *copied);
static void nfs42_set_netaddr(struct file *filep, struct nfs42_netaddr *naddr)
{
@@ -173,6 +175,20 @@ int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len)
return err;
}
+static void nfs4_copy_dequeue_callback(struct nfs_server *dst_server,
+ struct nfs_server *src_server,
+ struct nfs4_copy_state *copy)
+{
+ spin_lock(&dst_server->nfs_client->cl_lock);
+ list_del_init(&copy->copies);
+ spin_unlock(&dst_server->nfs_client->cl_lock);
+ if (dst_server != src_server) {
+ spin_lock(&src_server->nfs_client->cl_lock);
+ list_del_init(&copy->src_copies);
+ spin_unlock(&src_server->nfs_client->cl_lock);
+ }
+}
+
static int handle_async_copy(struct nfs42_copy_res *res,
struct nfs_server *dst_server,
struct nfs_server *src_server,
@@ -182,9 +198,12 @@ static int handle_async_copy(struct nfs42_copy_res *res,
bool *restart)
{
struct nfs4_copy_state *copy, *tmp_copy = NULL, *iter;
- int status = NFS4_OK;
struct nfs_open_context *dst_ctx = nfs_file_open_context(dst);
struct nfs_open_context *src_ctx = nfs_file_open_context(src);
+ struct nfs_client *clp = dst_server->nfs_client;
+ unsigned long timeout = 3 * HZ;
+ int status = NFS4_OK;
+ u64 copied;
copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_KERNEL);
if (!copy)
@@ -222,15 +241,12 @@ static int handle_async_copy(struct nfs42_copy_res *res,
spin_unlock(&src_server->nfs_client->cl_lock);
}
- status = wait_for_completion_interruptible(&copy->completion);
- spin_lock(&dst_server->nfs_client->cl_lock);
- list_del_init(&copy->copies);
- spin_unlock(&dst_server->nfs_client->cl_lock);
- if (dst_server != src_server) {
- spin_lock(&src_server->nfs_client->cl_lock);
- list_del_init(&copy->src_copies);
- spin_unlock(&src_server->nfs_client->cl_lock);
- }
+wait:
+ status = wait_for_completion_interruptible_timeout(&copy->completion,
+ timeout);
+ if (!status)
+ goto timeout;
+ nfs4_copy_dequeue_callback(dst_server, src_server, copy);
if (status == -ERESTARTSYS) {
goto out_cancel;
} else if (copy->flags || copy->error == NFS4ERR_PARTNER_NO_AUTH) {
@@ -240,6 +256,7 @@ static int handle_async_copy(struct nfs42_copy_res *res,
}
out:
res->write_res.count = copy->count;
+ /* Copy out the updated write verifier provided by CB_OFFLOAD. */
memcpy(&res->write_res.verifier, &copy->verf, sizeof(copy->verf));
status = -copy->error;
@@ -251,6 +268,39 @@ out_cancel:
if (!nfs42_files_from_same_server(src, dst))
nfs42_do_offload_cancel_async(src, src_stateid);
goto out_free;
+timeout:
+ timeout <<= 1;
+ if (timeout > (clp->cl_lease_time >> 1))
+ timeout = clp->cl_lease_time >> 1;
+ status = nfs42_proc_offload_status(dst, &copy->stateid, &copied);
+ if (status == -EINPROGRESS)
+ goto wait;
+ nfs4_copy_dequeue_callback(dst_server, src_server, copy);
+ switch (status) {
+ case 0:
+ /* The server recognized the copy stateid, so it hasn't
+ * rebooted. Don't overwrite the verifier returned in the
+ * COPY result. */
+ res->write_res.count = copied;
+ goto out_free;
+ case -EREMOTEIO:
+ /* COPY operation failed on the server. */
+ status = -EOPNOTSUPP;
+ res->write_res.count = copied;
+ goto out_free;
+ case -EBADF:
+ /* Server did not recognize the copy stateid. It has
+ * probably restarted and lost the plot. */
+ res->write_res.count = 0;
+ status = -EOPNOTSUPP;
+ break;
+ case -EOPNOTSUPP:
+ /* RFC 7862 REQUIREs server to support OFFLOAD_STATUS when
+ * it has signed up for an async COPY, so server is not
+ * spec-compliant. */
+ res->write_res.count = 0;
+ }
+ goto out_free;
}
static int process_copy_commit(struct file *dst, loff_t pos_dst,
@@ -582,6 +632,108 @@ static int nfs42_do_offload_cancel_async(struct file *dst,
return status;
}
+static int
+_nfs42_proc_offload_status(struct nfs_server *server, struct file *file,
+ struct nfs42_offload_data *data)
+{
+ struct nfs_open_context *ctx = nfs_file_open_context(file);
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OFFLOAD_STATUS],
+ .rpc_argp = &data->args,
+ .rpc_resp = &data->res,
+ .rpc_cred = ctx->cred,
+ };
+ int status;
+
+ status = nfs4_call_sync(server->client, server, &msg,
+ &data->args.osa_seq_args,
+ &data->res.osr_seq_res, 1);
+ trace_nfs4_offload_status(&data->args, status);
+ switch (status) {
+ case 0:
+ break;
+
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_BAD_STATEID:
+ case -NFS4ERR_OLD_STATEID:
+ /*
+ * Server does not recognize the COPY stateid. CB_OFFLOAD
+ * could have purged it, or server might have rebooted.
+ * Since COPY stateids don't have an associated inode,
+ * avoid triggering state recovery.
+ */
+ status = -EBADF;
+ break;
+ case -NFS4ERR_NOTSUPP:
+ case -ENOTSUPP:
+ case -EOPNOTSUPP:
+ server->caps &= ~NFS_CAP_OFFLOAD_STATUS;
+ status = -EOPNOTSUPP;
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * nfs42_proc_offload_status - Poll completion status of an async copy operation
+ * @dst: handle of file being copied into
+ * @stateid: copy stateid (from async COPY result)
+ * @copied: OUT: number of bytes copied so far
+ *
+ * Return values:
+ * %0: Server returned an NFS4_OK completion status
+ * %-EINPROGRESS: Server returned no completion status
+ * %-EREMOTEIO: Server returned an error completion status
+ * %-EBADF: Server did not recognize the copy stateid
+ * %-EOPNOTSUPP: Server does not support OFFLOAD_STATUS
+ * %-ERESTARTSYS: Wait interrupted by signal
+ *
+ * Other negative errnos indicate the client could not complete the
+ * request.
+ */
+static int
+nfs42_proc_offload_status(struct file *dst, nfs4_stateid *stateid, u64 *copied)
+{
+ struct inode *inode = file_inode(dst);
+ struct nfs_server *server = NFS_SERVER(inode);
+ struct nfs4_exception exception = {
+ .inode = inode,
+ };
+ struct nfs42_offload_data *data;
+ int status;
+
+ if (!(server->caps & NFS_CAP_OFFLOAD_STATUS))
+ return -EOPNOTSUPP;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ data->seq_server = server;
+ data->args.osa_src_fh = NFS_FH(inode);
+ memcpy(&data->args.osa_stateid, stateid,
+ sizeof(data->args.osa_stateid));
+ exception.stateid = &data->args.osa_stateid;
+ do {
+ status = _nfs42_proc_offload_status(server, dst, data);
+ if (status == -EOPNOTSUPP)
+ goto out;
+ status = nfs4_handle_exception(server, status, &exception);
+ } while (exception.retry);
+ if (status)
+ goto out;
+
+ *copied = data->res.osr_count;
+ if (!data->res.complete_count)
+ status = -EINPROGRESS;
+ else if (data->res.osr_complete != NFS_OK)
+ status = -EREMOTEIO;
+
+out:
+ kfree(data);
+ return status;
+}
+
static int _nfs42_proc_copy_notify(struct file *src, struct file *dst,
struct nfs42_copy_notify_args *args,
struct nfs42_copy_notify_res *res)
diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c
index 5072d7ea72e9..b1b663468249 100644
--- a/fs/nfs/nfs42xdr.c
+++ b/fs/nfs/nfs42xdr.c
@@ -35,6 +35,11 @@
#define encode_offload_cancel_maxsz (op_encode_hdr_maxsz + \
XDR_QUADLEN(NFS4_STATEID_SIZE))
#define decode_offload_cancel_maxsz (op_decode_hdr_maxsz)
+#define encode_offload_status_maxsz (op_encode_hdr_maxsz + \
+ XDR_QUADLEN(NFS4_STATEID_SIZE))
+#define decode_offload_status_maxsz (op_decode_hdr_maxsz + \
+ 2 /* osr_count */ + \
+ 2 /* osr_complete */)
#define encode_copy_notify_maxsz (op_encode_hdr_maxsz + \
XDR_QUADLEN(NFS4_STATEID_SIZE) + \
1 + /* nl4_type */ \
@@ -143,6 +148,14 @@
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_offload_cancel_maxsz)
+#define NFS4_enc_offload_status_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_offload_status_maxsz)
+#define NFS4_dec_offload_status_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_offload_status_maxsz)
#define NFS4_enc_copy_notify_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
@@ -345,6 +358,14 @@ static void encode_offload_cancel(struct xdr_stream *xdr,
encode_nfs4_stateid(xdr, &args->osa_stateid);
}
+static void encode_offload_status(struct xdr_stream *xdr,
+ const struct nfs42_offload_status_args *args,
+ struct compound_hdr *hdr)
+{
+ encode_op_hdr(xdr, OP_OFFLOAD_STATUS, decode_offload_status_maxsz, hdr);
+ encode_nfs4_stateid(xdr, &args->osa_stateid);
+}
+
static void encode_copy_notify(struct xdr_stream *xdr,
const struct nfs42_copy_notify_args *args,
struct compound_hdr *hdr)
@@ -570,6 +591,25 @@ static void nfs4_xdr_enc_offload_cancel(struct rpc_rqst *req,
}
/*
+ * Encode OFFLOAD_STATUS request
+ */
+static void nfs4_xdr_enc_offload_status(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const void *data)
+{
+ const struct nfs42_offload_status_args *args = data;
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->osa_seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->osa_seq_args, &hdr);
+ encode_putfh(xdr, args->osa_src_fh, &hdr);
+ encode_offload_status(xdr, args, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
* Encode COPY_NOTIFY request
*/
static void nfs4_xdr_enc_copy_notify(struct rpc_rqst *req,
@@ -921,6 +961,26 @@ static int decode_offload_cancel(struct xdr_stream *xdr,
return decode_op_hdr(xdr, OP_OFFLOAD_CANCEL);
}
+static int decode_offload_status(struct xdr_stream *xdr,
+ struct nfs42_offload_status_res *res)
+{
+ ssize_t result;
+ int status;
+
+ status = decode_op_hdr(xdr, OP_OFFLOAD_STATUS);
+ if (status)
+ return status;
+ /* osr_count */
+ if (xdr_stream_decode_u64(xdr, &res->osr_count) < 0)
+ return -EIO;
+ /* osr_complete<1> */
+ result = xdr_stream_decode_uint32_array(xdr, &res->osr_complete, 1);
+ if (result < 0)
+ return -EIO;
+ res->complete_count = result;
+ return 0;
+}
+
static int decode_copy_notify(struct xdr_stream *xdr,
struct nfs42_copy_notify_res *res)
{
@@ -1371,6 +1431,32 @@ out:
}
/*
+ * Decode OFFLOAD_STATUS response
+ */
+static int nfs4_xdr_dec_offload_status(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ void *data)
+{
+ struct nfs42_offload_status_res *res = data;
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->osr_seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_offload_status(xdr, res);
+
+out:
+ return status;
+}
+
+/*
* Decode COPY_NOTIFY response
*/
static int nfs4_xdr_dec_copy_notify(struct rpc_rqst *rqstp,
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 83378f69b35e..162c85a83a14 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -233,6 +233,8 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
__set_bit(NFS_CS_NO_RETRANS_TIMEOUT, &clp->cl_flags);
if (test_bit(NFS_CS_PNFS, &cl_init->init_flags))
__set_bit(NFS_CS_PNFS, &clp->cl_flags);
+ if (test_bit(NFS_CS_NETUNREACH_FATAL, &cl_init->init_flags))
+ __set_bit(NFS_CS_NETUNREACH_FATAL, &clp->cl_flags);
/*
* Set up the connection to the server before we add add to the
* global list.
@@ -937,6 +939,9 @@ static int nfs4_set_client(struct nfs_server *server,
__set_bit(NFS_CS_TSM_POSSIBLE, &cl_init.init_flags);
server->port = rpc_get_port((struct sockaddr *)addr);
+ if (server->flags & NFS_MOUNT_NETUNREACH_FATAL)
+ __set_bit(NFS_CS_NETUNREACH_FATAL, &cl_init.init_flags);
+
/* Allocate or find a client reference we can use */
clp = nfs_get_client(&cl_init);
if (IS_ERR(clp))
@@ -1011,6 +1016,8 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv,
if (mds_srv->flags & NFS_MOUNT_NORESVPORT)
__set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
+ if (test_bit(NFS_CS_NETUNREACH_FATAL, &mds_clp->cl_flags))
+ __set_bit(NFS_CS_NETUNREACH_FATAL, &cl_init.init_flags);
__set_bit(NFS_CS_PNFS, &cl_init.init_flags);
cl_init.max_connect = NFS_MAX_TRANSPORTS;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 70c8ea943019..970f28dbf253 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -195,6 +195,9 @@ static int nfs4_map_errors(int err)
return -EBUSY;
case -NFS4ERR_NOT_SAME:
return -ENOTSYNC;
+ case -ENETDOWN:
+ case -ENETUNREACH:
+ break;
default:
dprintk("%s could not handle NFSv4 error %d\n",
__func__, -err);
@@ -443,6 +446,8 @@ static int nfs4_delay_killable(long *timeout)
{
might_sleep();
+ if (unlikely(nfs_current_task_exiting()))
+ return -EINTR;
__set_current_state(TASK_KILLABLE|TASK_FREEZABLE_UNSAFE);
schedule_timeout(nfs4_update_delay(timeout));
if (!__fatal_signal_pending(current))
@@ -454,6 +459,8 @@ static int nfs4_delay_interruptible(long *timeout)
{
might_sleep();
+ if (unlikely(nfs_current_task_exiting()))
+ return -EINTR;
__set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE_UNSAFE);
schedule_timeout(nfs4_update_delay(timeout));
if (!signal_pending(current))
@@ -1774,7 +1781,8 @@ static void nfs_set_open_stateid_locked(struct nfs4_state *state,
rcu_read_unlock();
trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0);
- if (!fatal_signal_pending(current)) {
+ if (!fatal_signal_pending(current) &&
+ !nfs_current_task_exiting()) {
if (schedule_timeout(5*HZ) == 0)
status = -EAGAIN;
else
@@ -3576,7 +3584,7 @@ static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst,
write_sequnlock(&state->seqlock);
trace_nfs4_close_stateid_update_wait(state->inode, dst, 0);
- if (fatal_signal_pending(current))
+ if (fatal_signal_pending(current) || nfs_current_task_exiting())
status = -EINTR;
else
if (schedule_timeout(5*HZ) != 0)
@@ -9594,7 +9602,7 @@ static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
return;
trace_nfs4_sequence(clp, task->tk_status);
- if (task->tk_status < 0 && !task->tk_client->cl_shutdown) {
+ if (task->tk_status < 0 && clp->cl_cons_state >= 0) {
dprintk("%s ERROR %d\n", __func__, task->tk_status);
if (refcount_read(&clp->cl_count) == 1)
return;
@@ -10798,7 +10806,8 @@ static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
| NFS_CAP_CLONE
| NFS_CAP_LAYOUTERROR
| NFS_CAP_READ_PLUS
- | NFS_CAP_MOVEABLE,
+ | NFS_CAP_MOVEABLE
+ | NFS_CAP_OFFLOAD_STATUS,
.init_client = nfs41_init_client,
.shutdown_client = nfs41_shutdown_client,
.match_stateid = nfs41_match_stateid,
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 542cdf71229f..7612e977e80b 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1198,7 +1198,7 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
struct rpc_clnt *clnt = clp->cl_rpcclient;
bool swapon = false;
- if (clnt->cl_shutdown)
+ if (clp->cl_cons_state < 0)
return;
set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
@@ -1403,7 +1403,7 @@ int nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4_
dprintk("%s: scheduling stateid recovery for server %s\n", __func__,
clp->cl_hostname);
nfs4_schedule_state_manager(clp);
- return 0;
+ return clp->cl_cons_state < 0 ? clp->cl_cons_state : 0;
}
EXPORT_SYMBOL_GPL(nfs4_schedule_stateid_recovery);
@@ -2739,7 +2739,15 @@ out_error:
pr_warn_ratelimited("NFS: state manager%s%s failed on NFSv4 server %s"
" with error %d\n", section_sep, section,
clp->cl_hostname, -status);
- ssleep(1);
+ switch (status) {
+ case -ENETDOWN:
+ case -ENETUNREACH:
+ nfs_mark_client_ready(clp, -EIO);
+ break;
+ default:
+ ssleep(1);
+ break;
+ }
out_drain:
memalloc_nofs_restore(memflags);
nfs4_end_drain_session(clp);
diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h
index 22c973316f0b..bc67fe6801b1 100644
--- a/fs/nfs/nfs4trace.h
+++ b/fs/nfs/nfs4trace.h
@@ -2608,7 +2608,7 @@ TRACE_EVENT(nfs4_copy_notify,
)
);
-TRACE_EVENT(nfs4_offload_cancel,
+DECLARE_EVENT_CLASS(nfs4_offload_class,
TP_PROTO(
const struct nfs42_offload_status_args *args,
int error
@@ -2640,6 +2640,15 @@ TRACE_EVENT(nfs4_offload_cancel,
__entry->stateid_seq, __entry->stateid_hash
)
);
+#define DEFINE_NFS4_OFFLOAD_EVENT(name) \
+ DEFINE_EVENT(nfs4_offload_class, name, \
+ TP_PROTO( \
+ const struct nfs42_offload_status_args *args, \
+ int error \
+ ), \
+ TP_ARGS(args, error))
+DEFINE_NFS4_OFFLOAD_EVENT(nfs4_offload_cancel);
+DEFINE_NFS4_OFFLOAD_EVENT(nfs4_offload_status);
DECLARE_EVENT_CLASS(nfs4_xattr_event,
TP_PROTO(
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index e8ac3f615f93..55bef5fbfa47 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -82,9 +82,8 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
* we currently use size 2 (u64) out of (NFS4_OPAQUE_LIMIT >> 2)
*/
#define pagepad_maxsz (1)
-#define open_owner_id_maxsz (1 + 2 + 1 + 1 + 2)
-#define lock_owner_id_maxsz (1 + 1 + 4)
-#define decode_lockowner_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ))
+#define open_owner_id_maxsz (2 + 1 + 2 + 2)
+#define lock_owner_id_maxsz (2 + 1 + 2)
#define compound_encode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2))
#define compound_decode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2))
#define op_encode_hdr_maxsz (1)
@@ -185,7 +184,7 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
#define encode_claim_null_maxsz (1 + nfs4_name_maxsz)
#define encode_open_maxsz (op_encode_hdr_maxsz + \
2 + encode_share_access_maxsz + 2 + \
- open_owner_id_maxsz + \
+ 1 + open_owner_id_maxsz + \
encode_opentype_maxsz + \
encode_claim_null_maxsz)
#define decode_space_limit_maxsz (3)
@@ -255,13 +254,14 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
#define encode_link_maxsz (op_encode_hdr_maxsz + \
nfs4_name_maxsz)
#define decode_link_maxsz (op_decode_hdr_maxsz + decode_change_info_maxsz)
-#define encode_lockowner_maxsz (7)
+#define encode_lockowner_maxsz (2 + 1 + lock_owner_id_maxsz)
+
#define encode_lock_maxsz (op_encode_hdr_maxsz + \
7 + \
1 + encode_stateid_maxsz + 1 + \
encode_lockowner_maxsz)
#define decode_lock_denied_maxsz \
- (8 + decode_lockowner_maxsz)
+ (2 + 2 + 1 + 2 + 1 + lock_owner_id_maxsz)
#define decode_lock_maxsz (op_decode_hdr_maxsz + \
decode_lock_denied_maxsz)
#define encode_lockt_maxsz (op_encode_hdr_maxsz + 5 + \
@@ -617,7 +617,7 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
encode_lockowner_maxsz)
#define NFS4_dec_release_lockowner_sz \
(compound_decode_hdr_maxsz + \
- decode_lockowner_maxsz)
+ decode_release_lockowner_maxsz)
#define NFS4_enc_access_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
@@ -1412,7 +1412,7 @@ static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_opena
__be32 *p;
/*
* opcode 4, seqid 4, share_access 4, share_deny 4, clientid 8, ownerlen 4,
- * owner 4 = 32
+ * owner 28
*/
encode_nfs4_seqid(xdr, arg->seqid);
encode_share_access(xdr, arg->share_access);
@@ -5077,7 +5077,7 @@ static int decode_link(struct xdr_stream *xdr, struct nfs4_change_info *cinfo)
/*
* We create the owner, so we know a proper owner.id length is 4.
*/
-static int decode_lock_denied (struct xdr_stream *xdr, struct file_lock *fl)
+static int decode_lock_denied(struct xdr_stream *xdr, struct file_lock *fl)
{
uint64_t offset, length, clientid;
__be32 *p;
@@ -7702,6 +7702,7 @@ const struct rpc_procinfo nfs4_procedures[] = {
PROC42(CLONE, enc_clone, dec_clone),
PROC42(COPY, enc_copy, dec_copy),
PROC42(OFFLOAD_CANCEL, enc_offload_cancel, dec_offload_cancel),
+ PROC42(OFFLOAD_STATUS, enc_offload_status, dec_offload_status),
PROC42(COPY_NOTIFY, enc_copy_notify, dec_copy_notify),
PROC(LOOKUPP, enc_lookupp, dec_lookupp),
PROC42(LAYOUTERROR, enc_layouterror, dec_layouterror),
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index aeb715b4a690..9eea9e62afc9 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -454,8 +454,12 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
{ NFS_MOUNT_NONLM, ",nolock", "" },
{ NFS_MOUNT_NOACL, ",noacl", "" },
{ NFS_MOUNT_NORDIRPLUS, ",nordirplus", "" },
+ { NFS_MOUNT_FORCE_RDIRPLUS, ",rdirplus=force", "" },
{ NFS_MOUNT_UNSHARED, ",nosharecache", "" },
{ NFS_MOUNT_NORESVPORT, ",noresvport", "" },
+ { NFS_MOUNT_NETUNREACH_FATAL,
+ ",fatal_neterrors=ENETDOWN:ENETUNREACH",
+ ",fatal_neterrors=none" },
{ 0, NULL, NULL }
};
const struct proc_nfs_info *nfs_infop;
diff --git a/fs/nfs/sysfs.c b/fs/nfs/sysfs.c
index 7b59a40d40c0..37cb2b776435 100644
--- a/fs/nfs/sysfs.c
+++ b/fs/nfs/sysfs.c
@@ -14,6 +14,7 @@
#include <linux/rcupdate.h>
#include <linux/lockd/lockd.h>
+#include "internal.h"
#include "nfs4_fs.h"
#include "netns.h"
#include "sysfs.h"
@@ -228,6 +229,25 @@ static void shutdown_client(struct rpc_clnt *clnt)
rpc_cancel_tasks(clnt, -EIO, shutdown_match_client, NULL);
}
+/*
+ * Shut down the nfs_client only once all the superblocks
+ * have been shut down.
+ */
+static void shutdown_nfs_client(struct nfs_client *clp)
+{
+ struct nfs_server *server;
+ rcu_read_lock();
+ list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+ if (!(server->flags & NFS_MOUNT_SHUTDOWN)) {
+ rcu_read_unlock();
+ return;
+ }
+ }
+ rcu_read_unlock();
+ nfs_mark_client_ready(clp, -EIO);
+ shutdown_client(clp->cl_rpcclient);
+}
+
static ssize_t
shutdown_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
@@ -259,7 +279,6 @@ shutdown_store(struct kobject *kobj, struct kobj_attribute *attr,
server->flags |= NFS_MOUNT_SHUTDOWN;
shutdown_client(server->client);
- shutdown_client(server->nfs_client->cl_rpcclient);
if (!IS_ERR(server->client_acl))
shutdown_client(server->client_acl);
@@ -267,11 +286,44 @@ shutdown_store(struct kobject *kobj, struct kobj_attribute *attr,
if (server->nlm_host)
shutdown_client(server->nlm_host->h_rpcclnt);
out:
+ shutdown_nfs_client(server->nfs_client);
return count;
}
static struct kobj_attribute nfs_sysfs_attr_shutdown = __ATTR_RW(shutdown);
+#if IS_ENABLED(CONFIG_NFS_V4_1)
+static ssize_t
+implid_domain_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct nfs_server *server = container_of(kobj, struct nfs_server, kobj);
+ struct nfs41_impl_id *impl_id = server->nfs_client->cl_implid;
+
+ if (!impl_id || strlen(impl_id->domain) == 0)
+ return 0; //sysfs_emit(buf, "");
+ return sysfs_emit(buf, "%s\n", impl_id->domain);
+}
+
+static struct kobj_attribute nfs_sysfs_attr_implid_domain = __ATTR_RO(implid_domain);
+
+
+static ssize_t
+implid_name_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct nfs_server *server = container_of(kobj, struct nfs_server, kobj);
+ struct nfs41_impl_id *impl_id = server->nfs_client->cl_implid;
+
+ if (!impl_id || strlen(impl_id->name) == 0)
+ return 0; //sysfs_emit(buf, "");
+ return sysfs_emit(buf, "%s\n", impl_id->name);
+}
+
+static struct kobj_attribute nfs_sysfs_attr_implid_name = __ATTR_RO(implid_name);
+
+#endif /* IS_ENABLED(CONFIG_NFS_V4_1) */
+
#define RPC_CLIENT_NAME_SIZE 64
void nfs_sysfs_link_rpc_client(struct nfs_server *server,
@@ -309,6 +361,32 @@ static struct kobj_type nfs_sb_ktype = {
.child_ns_type = nfs_netns_object_child_ns_type,
};
+#if IS_ENABLED(CONFIG_NFS_V4_1)
+static void nfs_sysfs_add_nfsv41_server(struct nfs_server *server)
+{
+ int ret;
+
+ if (!server->nfs_client->cl_implid)
+ return;
+
+ ret = sysfs_create_file_ns(&server->kobj, &nfs_sysfs_attr_implid_domain.attr,
+ nfs_netns_server_namespace(&server->kobj));
+ if (ret < 0)
+ pr_warn("NFS: sysfs_create_file_ns for server-%d failed (%d)\n",
+ server->s_sysfs_id, ret);
+
+ ret = sysfs_create_file_ns(&server->kobj, &nfs_sysfs_attr_implid_name.attr,
+ nfs_netns_server_namespace(&server->kobj));
+ if (ret < 0)
+ pr_warn("NFS: sysfs_create_file_ns for server-%d failed (%d)\n",
+ server->s_sysfs_id, ret);
+}
+#else /* CONFIG_NFS_V4_1 */
+static inline void nfs_sysfs_add_nfsv41_server(struct nfs_server *server)
+{
+}
+#endif /* CONFIG_NFS_V4_1 */
+
void nfs_sysfs_add_server(struct nfs_server *server)
{
int ret;
@@ -325,6 +403,8 @@ void nfs_sysfs_add_server(struct nfs_server *server)
if (ret < 0)
pr_warn("NFS: sysfs_create_file_ns for server-%d failed (%d)\n",
server->s_sysfs_id, ret);
+
+ nfs_sysfs_add_nfsv41_server(server);
}
EXPORT_SYMBOL_GPL(nfs_sysfs_add_server);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index aa3d8bea3ec0..23df8b214474 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -579,8 +579,10 @@ retry:
while (!nfs_lock_request(head)) {
ret = nfs_wait_on_request(head);
- if (ret < 0)
+ if (ret < 0) {
+ nfs_release_request(head);
return ERR_PTR(ret);
+ }
}
/* Ensure that nobody removed the request before we locked it */
diff --git a/fs/ntfs3/attrib.c b/fs/ntfs3/attrib.c
index af94e3737470..e946f75eb540 100644
--- a/fs/ntfs3/attrib.c
+++ b/fs/ntfs3/attrib.c
@@ -2664,8 +2664,9 @@ int attr_set_compress(struct ntfs_inode *ni, bool compr)
attr->nres.run_off = cpu_to_le16(run_off);
}
- /* Update data attribute flags. */
+ /* Update attribute flags. */
if (compr) {
+ attr->flags &= ~ATTR_FLAG_SPARSED;
attr->flags |= ATTR_FLAG_COMPRESSED;
attr->nres.c_unit = NTFS_LZNT_CUNIT;
} else {
diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c
index 3f96a11804c9..9b6a3f8d2e7c 100644
--- a/fs/ntfs3/file.c
+++ b/fs/ntfs3/file.c
@@ -101,8 +101,26 @@ int ntfs_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry,
/* Allowed to change compression for empty files and for directories only. */
if (!is_dedup(ni) && !is_encrypted(ni) &&
(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) {
- /* Change compress state. */
- int err = ni_set_compress(inode, flags & FS_COMPR_FL);
+ int err = 0;
+ struct address_space *mapping = inode->i_mapping;
+
+ /* write out all data and wait. */
+ filemap_invalidate_lock(mapping);
+ err = filemap_write_and_wait(mapping);
+
+ if (err >= 0) {
+ /* Change compress state. */
+ bool compr = flags & FS_COMPR_FL;
+ err = ni_set_compress(inode, compr);
+
+ /* For files change a_ops too. */
+ if (!err)
+ mapping->a_ops = compr ? &ntfs_aops_cmpr :
+ &ntfs_aops;
+ }
+
+ filemap_invalidate_unlock(mapping);
+
if (err)
return err;
}
@@ -412,6 +430,7 @@ static int ntfs_extend(struct inode *inode, loff_t pos, size_t count,
}
if (extend_init && !is_compressed(ni)) {
+ WARN_ON(ni->i_valid >= pos);
err = ntfs_extend_initialized_size(file, ni, ni->i_valid, pos);
if (err)
goto out;
@@ -1228,21 +1247,22 @@ static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
ssize_t ret;
int err;
- err = check_write_restriction(inode);
- if (err)
- return err;
-
- if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) {
- ntfs_inode_warn(inode, "direct i/o + compressed not supported");
- return -EOPNOTSUPP;
- }
-
if (!inode_trylock(inode)) {
if (iocb->ki_flags & IOCB_NOWAIT)
return -EAGAIN;
inode_lock(inode);
}
+ ret = check_write_restriction(inode);
+ if (ret)
+ goto out;
+
+ if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) {
+ ntfs_inode_warn(inode, "direct i/o + compressed not supported");
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
ret = generic_write_checks(iocb, from);
if (ret <= 0)
goto out;
diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
index 5df6a0b5add9..b7a83200f2cc 100644
--- a/fs/ntfs3/frecord.c
+++ b/fs/ntfs3/frecord.c
@@ -281,63 +281,6 @@ struct ATTRIB *ni_enum_attr_ex(struct ntfs_inode *ni, struct ATTRIB *attr,
}
/*
- * ni_load_attr - Load attribute that contains given VCN.
- */
-struct ATTRIB *ni_load_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
- const __le16 *name, u8 name_len, CLST vcn,
- struct mft_inode **pmi)
-{
- struct ATTR_LIST_ENTRY *le;
- struct ATTRIB *attr;
- struct mft_inode *mi;
- struct ATTR_LIST_ENTRY *next;
-
- if (!ni->attr_list.size) {
- if (pmi)
- *pmi = &ni->mi;
- return mi_find_attr(ni, &ni->mi, NULL, type, name, name_len,
- NULL);
- }
-
- le = al_find_ex(ni, NULL, type, name, name_len, NULL);
- if (!le)
- return NULL;
-
- /*
- * Unfortunately ATTR_LIST_ENTRY contains only start VCN.
- * So to find the ATTRIB segment that contains 'vcn' we should
- * enumerate some entries.
- */
- if (vcn) {
- for (;; le = next) {
- next = al_find_ex(ni, le, type, name, name_len, NULL);
- if (!next || le64_to_cpu(next->vcn) > vcn)
- break;
- }
- }
-
- if (ni_load_mi(ni, le, &mi))
- return NULL;
-
- if (pmi)
- *pmi = mi;
-
- attr = mi_find_attr(ni, mi, NULL, type, name, name_len, &le->id);
- if (!attr)
- return NULL;
-
- if (!attr->non_res)
- return attr;
-
- if (le64_to_cpu(attr->nres.svcn) <= vcn &&
- vcn <= le64_to_cpu(attr->nres.evcn))
- return attr;
-
- _ntfs_bad_inode(&ni->vfs_inode);
- return NULL;
-}
-
-/*
* ni_load_all_mi - Load all subrecords.
*/
int ni_load_all_mi(struct ntfs_inode *ni)
@@ -3434,10 +3377,12 @@ int ni_set_compress(struct inode *inode, bool compr)
}
ni->std_fa = std->fa;
- if (compr)
+ if (compr) {
+ std->fa &= ~FILE_ATTRIBUTE_SPARSE_FILE;
std->fa |= FILE_ATTRIBUTE_COMPRESSED;
- else
+ } else {
std->fa &= ~FILE_ATTRIBUTE_COMPRESSED;
+ }
if (ni->std_fa != std->fa) {
ni->std_fa = std->fa;
diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c
index 938d351ebac7..df81f1f7330c 100644
--- a/fs/ntfs3/fsntfs.c
+++ b/fs/ntfs3/fsntfs.c
@@ -1035,34 +1035,6 @@ struct buffer_head *ntfs_bread(struct super_block *sb, sector_t block)
return NULL;
}
-int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer)
-{
- struct block_device *bdev = sb->s_bdev;
- u32 blocksize = sb->s_blocksize;
- u64 block = lbo >> sb->s_blocksize_bits;
- u32 off = lbo & (blocksize - 1);
- u32 op = blocksize - off;
-
- for (; bytes; block += 1, off = 0, op = blocksize) {
- struct buffer_head *bh = __bread(bdev, block, blocksize);
-
- if (!bh)
- return -EIO;
-
- if (op > bytes)
- op = bytes;
-
- memcpy(buffer, bh->b_data + off, op);
-
- put_bh(bh);
-
- bytes -= op;
- buffer = Add2Ptr(buffer, op);
- }
-
- return 0;
-}
-
int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
const void *buf, int wait)
{
diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
index 7eb9fae22f8d..78d20e4baa2c 100644
--- a/fs/ntfs3/index.c
+++ b/fs/ntfs3/index.c
@@ -618,7 +618,7 @@ static bool index_hdr_check(const struct INDEX_HDR *hdr, u32 bytes)
u32 off = le32_to_cpu(hdr->de_off);
if (!IS_ALIGNED(off, 8) || tot > bytes || end > tot ||
- off + sizeof(struct NTFS_DE) > end) {
+ size_add(off, sizeof(struct NTFS_DE)) > end) {
/* incorrect index buffer. */
return false;
}
@@ -736,7 +736,7 @@ fill_table:
if (end > total)
return NULL;
- if (off + sizeof(struct NTFS_DE) > end)
+ if (size_add(off, sizeof(struct NTFS_DE)) > end)
return NULL;
e = Add2Ptr(hdr, off);
diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
index a1e11228dafd..3e2957a1e360 100644
--- a/fs/ntfs3/inode.c
+++ b/fs/ntfs3/inode.c
@@ -1025,46 +1025,6 @@ int ntfs_sync_inode(struct inode *inode)
}
/*
- * writeback_inode - Helper function for ntfs_flush_inodes().
- *
- * This writes both the inode and the file data blocks, waiting
- * for in flight data blocks before the start of the call. It
- * does not wait for any io started during the call.
- */
-static int writeback_inode(struct inode *inode)
-{
- int ret = sync_inode_metadata(inode, 0);
-
- if (!ret)
- ret = filemap_fdatawrite(inode->i_mapping);
- return ret;
-}
-
-/*
- * ntfs_flush_inodes
- *
- * Write data and metadata corresponding to i1 and i2. The io is
- * started but we do not wait for any of it to finish.
- *
- * filemap_flush() is used for the block device, so if there is a dirty
- * page for a block already in flight, we will not wait and start the
- * io over again.
- */
-int ntfs_flush_inodes(struct super_block *sb, struct inode *i1,
- struct inode *i2)
-{
- int ret = 0;
-
- if (i1)
- ret = writeback_inode(i1);
- if (!ret && i2)
- ret = writeback_inode(i2);
- if (!ret)
- ret = filemap_flush(sb->s_bdev_file->f_mapping);
- return ret;
-}
-
-/*
* Helper function to read file.
*/
int inode_read_data(struct inode *inode, void *data, size_t bytes)
diff --git a/fs/ntfs3/ntfs.h b/fs/ntfs3/ntfs.h
index 241f2ffdd920..1ff13b6f9613 100644
--- a/fs/ntfs3/ntfs.h
+++ b/fs/ntfs3/ntfs.h
@@ -717,7 +717,7 @@ static inline struct NTFS_DE *hdr_first_de(const struct INDEX_HDR *hdr)
struct NTFS_DE *e;
u16 esize;
- if (de_off >= used || de_off + sizeof(struct NTFS_DE) > used )
+ if (de_off >= used || size_add(de_off, sizeof(struct NTFS_DE)) > used)
return NULL;
e = Add2Ptr(hdr, de_off);
diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
index 382820464dee..d628977e2556 100644
--- a/fs/ntfs3/ntfs_fs.h
+++ b/fs/ntfs3/ntfs_fs.h
@@ -530,9 +530,6 @@ struct ATTRIB *ni_find_attr(struct ntfs_inode *ni, struct ATTRIB *attr,
struct ATTRIB *ni_enum_attr_ex(struct ntfs_inode *ni, struct ATTRIB *attr,
struct ATTR_LIST_ENTRY **le,
struct mft_inode **mi);
-struct ATTRIB *ni_load_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
- const __le16 *name, u8 name_len, CLST vcn,
- struct mft_inode **pmi);
int ni_load_all_mi(struct ntfs_inode *ni);
bool ni_add_subrecord(struct ntfs_inode *ni, CLST rno, struct mft_inode **mi);
int ni_remove_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
@@ -619,7 +616,6 @@ enum NTFS_DIRTY_FLAGS {
NTFS_DIRTY_ERROR = 2,
};
int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty);
-int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer);
int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
const void *buffer, int wait);
int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
@@ -717,8 +713,6 @@ int ntfs_write_end(struct file *file, struct address_space *mapping, loff_t pos,
u32 len, u32 copied, struct folio *folio, void *fsdata);
int ntfs3_write_inode(struct inode *inode, struct writeback_control *wbc);
int ntfs_sync_inode(struct inode *inode);
-int ntfs_flush_inodes(struct super_block *sb, struct inode *i1,
- struct inode *i2);
int inode_read_data(struct inode *inode, void *data, size_t bytes);
int ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, const struct cpu_str *uni,
diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
index 6a0f6b0a3ab2..920a1ab47b63 100644
--- a/fs/ntfs3/super.c
+++ b/fs/ntfs3/super.c
@@ -555,6 +555,55 @@ static const struct proc_ops ntfs3_label_fops = {
.proc_write = ntfs3_label_write,
};
+static void ntfs_create_procdir(struct super_block *sb)
+{
+ struct proc_dir_entry *e;
+
+ if (!proc_info_root)
+ return;
+
+ e = proc_mkdir(sb->s_id, proc_info_root);
+ if (e) {
+ struct ntfs_sb_info *sbi = sb->s_fs_info;
+
+ proc_create_data("volinfo", 0444, e,
+ &ntfs3_volinfo_fops, sb);
+ proc_create_data("label", 0644, e,
+ &ntfs3_label_fops, sb);
+ sbi->procdir = e;
+ }
+}
+
+static void ntfs_remove_procdir(struct super_block *sb)
+{
+ struct ntfs_sb_info *sbi = sb->s_fs_info;
+
+ if (!sbi->procdir)
+ return;
+
+ remove_proc_entry("label", sbi->procdir);
+ remove_proc_entry("volinfo", sbi->procdir);
+ remove_proc_entry(sb->s_id, proc_info_root);
+ sbi->procdir = NULL;
+}
+
+static void ntfs_create_proc_root(void)
+{
+ proc_info_root = proc_mkdir("fs/ntfs3", NULL);
+}
+
+static void ntfs_remove_proc_root(void)
+{
+ if (proc_info_root) {
+ remove_proc_entry("fs/ntfs3", NULL);
+ proc_info_root = NULL;
+ }
+}
+#else
+static void ntfs_create_procdir(struct super_block *sb) {}
+static void ntfs_remove_procdir(struct super_block *sb) {}
+static void ntfs_create_proc_root(void) {}
+static void ntfs_remove_proc_root(void) {}
#endif
static struct kmem_cache *ntfs_inode_cachep;
@@ -644,15 +693,7 @@ static void ntfs_put_super(struct super_block *sb)
{
struct ntfs_sb_info *sbi = sb->s_fs_info;
-#ifdef CONFIG_PROC_FS
- // Remove /proc/fs/ntfs3/..
- if (sbi->procdir) {
- remove_proc_entry("label", sbi->procdir);
- remove_proc_entry("volinfo", sbi->procdir);
- remove_proc_entry(sb->s_id, proc_info_root);
- sbi->procdir = NULL;
- }
-#endif
+ ntfs_remove_procdir(sb);
/* Mark rw ntfs as clear, if possible. */
ntfs_set_state(sbi, NTFS_DIRTY_CLEAR);
@@ -1590,20 +1631,7 @@ load_root:
kfree(boot2);
}
-#ifdef CONFIG_PROC_FS
- /* Create /proc/fs/ntfs3/.. */
- if (proc_info_root) {
- struct proc_dir_entry *e = proc_mkdir(sb->s_id, proc_info_root);
- static_assert((S_IRUGO | S_IWUSR) == 0644);
- if (e) {
- proc_create_data("volinfo", S_IRUGO, e,
- &ntfs3_volinfo_fops, sb);
- proc_create_data("label", S_IRUGO | S_IWUSR, e,
- &ntfs3_label_fops, sb);
- sbi->procdir = e;
- }
- }
-#endif
+ ntfs_create_procdir(sb);
if (is_legacy_ntfs(sb))
sb->s_flags |= SB_RDONLY;
@@ -1853,14 +1881,11 @@ static int __init init_ntfs_fs(void)
if (IS_ENABLED(CONFIG_NTFS3_LZX_XPRESS))
pr_info("ntfs3: Read-only LZX/Xpress compression included\n");
-#ifdef CONFIG_PROC_FS
- /* Create "/proc/fs/ntfs3" */
- proc_info_root = proc_mkdir("fs/ntfs3", NULL);
-#endif
+ ntfs_create_proc_root();
err = ntfs3_init_bitmap();
if (err)
- return err;
+ goto out2;
ntfs_inode_cachep = kmem_cache_create(
"ntfs_inode_cache", sizeof(struct ntfs_inode), 0,
@@ -1880,6 +1905,8 @@ out:
kmem_cache_destroy(ntfs_inode_cachep);
out1:
ntfs3_exit_bitmap();
+out2:
+ ntfs_remove_proc_root();
return err;
}
@@ -1890,11 +1917,7 @@ static void __exit exit_ntfs_fs(void)
unregister_filesystem(&ntfs_fs_type);
unregister_as_ntfs_legacy();
ntfs3_exit_bitmap();
-
-#ifdef CONFIG_PROC_FS
- if (proc_info_root)
- remove_proc_entry("fs/ntfs3", NULL);
-#endif
+ ntfs_remove_proc_root();
}
MODULE_LICENSE("GPL");
diff --git a/fs/smb/server/auth.c b/fs/smb/server/auth.c
index 00b31cf86462..83caa3849749 100644
--- a/fs/smb/server/auth.c
+++ b/fs/smb/server/auth.c
@@ -1016,9 +1016,9 @@ static int ksmbd_get_encryption_key(struct ksmbd_work *work, __u64 ses_id,
ses_enc_key = enc ? sess->smb3encryptionkey :
sess->smb3decryptionkey;
- if (enc)
- ksmbd_user_session_get(sess);
memcpy(key, ses_enc_key, SMB3_ENC_DEC_KEY_SIZE);
+ if (!enc)
+ ksmbd_user_session_put(sess);
return 0;
}
diff --git a/fs/smb/server/connection.h b/fs/smb/server/connection.h
index 91c2318639e7..14620e147dda 100644
--- a/fs/smb/server/connection.h
+++ b/fs/smb/server/connection.h
@@ -27,6 +27,7 @@ enum {
KSMBD_SESS_EXITING,
KSMBD_SESS_NEED_RECONNECT,
KSMBD_SESS_NEED_NEGOTIATE,
+ KSMBD_SESS_NEED_SETUP,
KSMBD_SESS_RELEASING
};
@@ -187,6 +188,11 @@ static inline bool ksmbd_conn_need_negotiate(struct ksmbd_conn *conn)
return READ_ONCE(conn->status) == KSMBD_SESS_NEED_NEGOTIATE;
}
+static inline bool ksmbd_conn_need_setup(struct ksmbd_conn *conn)
+{
+ return READ_ONCE(conn->status) == KSMBD_SESS_NEED_SETUP;
+}
+
static inline bool ksmbd_conn_need_reconnect(struct ksmbd_conn *conn)
{
return READ_ONCE(conn->status) == KSMBD_SESS_NEED_RECONNECT;
@@ -217,6 +223,11 @@ static inline void ksmbd_conn_set_need_negotiate(struct ksmbd_conn *conn)
WRITE_ONCE(conn->status, KSMBD_SESS_NEED_NEGOTIATE);
}
+static inline void ksmbd_conn_set_need_setup(struct ksmbd_conn *conn)
+{
+ WRITE_ONCE(conn->status, KSMBD_SESS_NEED_SETUP);
+}
+
static inline void ksmbd_conn_set_need_reconnect(struct ksmbd_conn *conn)
{
WRITE_ONCE(conn->status, KSMBD_SESS_NEED_RECONNECT);
diff --git a/fs/smb/server/mgmt/user_session.c b/fs/smb/server/mgmt/user_session.c
index 53d308f331af..3f45f28f6f0f 100644
--- a/fs/smb/server/mgmt/user_session.c
+++ b/fs/smb/server/mgmt/user_session.c
@@ -181,7 +181,7 @@ static void ksmbd_expire_session(struct ksmbd_conn *conn)
down_write(&sessions_table_lock);
down_write(&conn->session_lock);
xa_for_each(&conn->sessions, id, sess) {
- if (atomic_read(&sess->refcnt) == 0 &&
+ if (atomic_read(&sess->refcnt) <= 1 &&
(sess->state != SMB2_SESSION_VALID ||
time_after(jiffies,
sess->last_active + SMB2_SESSION_TIMEOUT))) {
@@ -233,7 +233,8 @@ void ksmbd_sessions_deregister(struct ksmbd_conn *conn)
down_write(&conn->session_lock);
xa_erase(&conn->sessions, sess->id);
up_write(&conn->session_lock);
- ksmbd_session_destroy(sess);
+ if (atomic_dec_and_test(&sess->refcnt))
+ ksmbd_session_destroy(sess);
}
}
}
@@ -252,7 +253,8 @@ void ksmbd_sessions_deregister(struct ksmbd_conn *conn)
if (xa_empty(&sess->ksmbd_chann_list)) {
xa_erase(&conn->sessions, sess->id);
hash_del(&sess->hlist);
- ksmbd_session_destroy(sess);
+ if (atomic_dec_and_test(&sess->refcnt))
+ ksmbd_session_destroy(sess);
}
}
up_write(&conn->session_lock);
@@ -328,8 +330,8 @@ void ksmbd_user_session_put(struct ksmbd_session *sess)
if (atomic_read(&sess->refcnt) <= 0)
WARN_ON(1);
- else
- atomic_dec(&sess->refcnt);
+ else if (atomic_dec_and_test(&sess->refcnt))
+ ksmbd_session_destroy(sess);
}
struct preauth_session *ksmbd_preauth_session_alloc(struct ksmbd_conn *conn,
@@ -372,13 +374,13 @@ void destroy_previous_session(struct ksmbd_conn *conn,
ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_RECONNECT);
err = ksmbd_conn_wait_idle_sess_id(conn, id);
if (err) {
- ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_NEGOTIATE);
+ ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_SETUP);
goto out;
}
ksmbd_destroy_file_table(&prev_sess->file_table);
prev_sess->state = SMB2_SESSION_EXPIRED;
- ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_NEGOTIATE);
+ ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_SETUP);
ksmbd_launch_ksmbd_durable_scavenger();
out:
up_write(&conn->session_lock);
@@ -436,7 +438,7 @@ static struct ksmbd_session *__session_create(int protocol)
xa_init(&sess->rpc_handle_list);
sess->sequence_number = 1;
rwlock_init(&sess->tree_conns_lock);
- atomic_set(&sess->refcnt, 1);
+ atomic_set(&sess->refcnt, 2);
ret = __init_smb2_session(sess);
if (ret)
diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
index 4ddf4300371b..d24d95d15d87 100644
--- a/fs/smb/server/smb2pdu.c
+++ b/fs/smb/server/smb2pdu.c
@@ -1249,7 +1249,7 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
}
conn->srv_sec_mode = le16_to_cpu(rsp->SecurityMode);
- ksmbd_conn_set_need_negotiate(conn);
+ ksmbd_conn_set_need_setup(conn);
err_out:
ksmbd_conn_unlock(conn);
@@ -1271,6 +1271,9 @@ static int alloc_preauth_hash(struct ksmbd_session *sess,
if (sess->Preauth_HashValue)
return 0;
+ if (!conn->preauth_info)
+ return -ENOMEM;
+
sess->Preauth_HashValue = kmemdup(conn->preauth_info->Preauth_HashValue,
PREAUTH_HASHVALUE_SIZE, KSMBD_DEFAULT_GFP);
if (!sess->Preauth_HashValue)
@@ -1674,6 +1677,11 @@ int smb2_sess_setup(struct ksmbd_work *work)
ksmbd_debug(SMB, "Received smb2 session setup request\n");
+ if (!ksmbd_conn_need_setup(conn) && !ksmbd_conn_good(conn)) {
+ work->send_no_response = 1;
+ return rc;
+ }
+
WORK_BUFFERS(work, req, rsp);
rsp->StructureSize = cpu_to_le16(9);
@@ -1909,7 +1917,7 @@ out_err:
if (try_delay) {
ksmbd_conn_set_need_reconnect(conn);
ssleep(5);
- ksmbd_conn_set_need_negotiate(conn);
+ ksmbd_conn_set_need_setup(conn);
}
}
smb2_set_err_rsp(work);
@@ -2235,14 +2243,15 @@ int smb2_session_logoff(struct ksmbd_work *work)
return -ENOENT;
}
- ksmbd_destroy_file_table(&sess->file_table);
down_write(&conn->session_lock);
sess->state = SMB2_SESSION_EXPIRED;
up_write(&conn->session_lock);
- ksmbd_free_user(sess->user);
- sess->user = NULL;
- ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_NEGOTIATE);
+ if (sess->user) {
+ ksmbd_free_user(sess->user);
+ sess->user = NULL;
+ }
+ ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_SETUP);
rsp->StructureSize = cpu_to_le16(4);
err = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_logoff_rsp));
diff --git a/fs/smb/server/smbacl.c b/fs/smb/server/smbacl.c
index 49b128698670..5aa7a66334d9 100644
--- a/fs/smb/server/smbacl.c
+++ b/fs/smb/server/smbacl.c
@@ -270,6 +270,11 @@ static int sid_to_id(struct mnt_idmap *idmap,
return -EIO;
}
+ if (psid->num_subauth == 0) {
+ pr_err("%s: zero subauthorities!\n", __func__);
+ return -EIO;
+ }
+
if (sidtype == SIDOWNER) {
kuid_t uid;
uid_t id;
@@ -1026,7 +1031,9 @@ int smb_inherit_dacl(struct ksmbd_conn *conn,
struct dentry *parent = path->dentry->d_parent;
struct mnt_idmap *idmap = mnt_idmap(path->mnt);
int inherited_flags = 0, flags = 0, i, nt_size = 0, pdacl_size;
- int rc = 0, dacloffset, pntsd_type, pntsd_size, acl_len, aces_size;
+ int rc = 0, pntsd_type, pntsd_size, acl_len, aces_size;
+ unsigned int dacloffset;
+ size_t dacl_struct_end;
u16 num_aces, ace_cnt = 0;
char *aces_base;
bool is_dir = S_ISDIR(d_inode(path->dentry)->i_mode);
@@ -1035,8 +1042,11 @@ int smb_inherit_dacl(struct ksmbd_conn *conn,
parent, &parent_pntsd);
if (pntsd_size <= 0)
return -ENOENT;
+
dacloffset = le32_to_cpu(parent_pntsd->dacloffset);
- if (!dacloffset || (dacloffset + sizeof(struct smb_acl) > pntsd_size)) {
+ if (!dacloffset ||
+ check_add_overflow(dacloffset, sizeof(struct smb_acl), &dacl_struct_end) ||
+ dacl_struct_end > (size_t)pntsd_size) {
rc = -EINVAL;
goto free_parent_pntsd;
}
@@ -1240,7 +1250,9 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, const struct path *path,
struct smb_ntsd *pntsd = NULL;
struct smb_acl *pdacl;
struct posix_acl *posix_acls;
- int rc = 0, pntsd_size, acl_size, aces_size, pdacl_size, dacl_offset;
+ int rc = 0, pntsd_size, acl_size, aces_size, pdacl_size;
+ unsigned int dacl_offset;
+ size_t dacl_struct_end;
struct smb_sid sid;
int granted = le32_to_cpu(*pdaccess & ~FILE_MAXIMAL_ACCESS_LE);
struct smb_ace *ace;
@@ -1259,7 +1271,8 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, const struct path *path,
dacl_offset = le32_to_cpu(pntsd->dacloffset);
if (!dacl_offset ||
- (dacl_offset + sizeof(struct smb_acl) > pntsd_size))
+ check_add_overflow(dacl_offset, sizeof(struct smb_acl), &dacl_struct_end) ||
+ dacl_struct_end > (size_t)pntsd_size)
goto err_out;
pdacl = (struct smb_acl *)((char *)pntsd + le32_to_cpu(pntsd->dacloffset));
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 97c4d71115d8..d80f94346199 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -396,32 +396,6 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
goto out;
/*
- * If it's already released don't get it. This avoids to loop
- * in __get_user_pages if userfaultfd_release waits on the
- * caller of handle_userfault to release the mmap_lock.
- */
- if (unlikely(READ_ONCE(ctx->released))) {
- /*
- * Don't return VM_FAULT_SIGBUS in this case, so a non
- * cooperative manager can close the uffd after the
- * last UFFDIO_COPY, without risking to trigger an
- * involuntary SIGBUS if the process was starting the
- * userfaultfd while the userfaultfd was still armed
- * (but after the last UFFDIO_COPY). If the uffd
- * wasn't already closed when the userfault reached
- * this point, that would normally be solved by
- * userfaultfd_must_wait returning 'false'.
- *
- * If we were to return VM_FAULT_SIGBUS here, the non
- * cooperative manager would be instead forced to
- * always call UFFDIO_UNREGISTER before it can safely
- * close the uffd.
- */
- ret = VM_FAULT_NOPAGE;
- goto out;
- }
-
- /*
* Check that we can return VM_FAULT_RETRY.
*
* NOTE: it should become possible to return VM_FAULT_RETRY
@@ -457,6 +431,31 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
goto out;
+ if (unlikely(READ_ONCE(ctx->released))) {
+ /*
+ * If a concurrent release is detected, do not return
+ * VM_FAULT_SIGBUS or VM_FAULT_NOPAGE, but instead always
+ * return VM_FAULT_RETRY with lock released proactively.
+ *
+ * If we were to return VM_FAULT_SIGBUS here, the non
+ * cooperative manager would be instead forced to
+ * always call UFFDIO_UNREGISTER before it can safely
+ * close the uffd, to avoid involuntary SIGBUS triggered.
+ *
+ * If we were to return VM_FAULT_NOPAGE, it would work for
+ * the fault path, in which the lock will be released
+ * later. However for GUP, faultin_page() does nothing
+ * special on NOPAGE, so GUP would spin retrying without
+ * releasing the mmap read lock, causing possible livelock.
+ *
+ * Here only VM_FAULT_RETRY would make sure the mmap lock
+ * be released immediately, so that the thread concurrently
+ * releasing the userfault would always make progress.
+ */
+ release_fault_lock(vmf);
+ goto out;
+ }
+
/* take the reference before dropping the mmap_lock */
userfaultfd_ctx_get(ctx);
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index d1adfba8387e..88a42973fa47 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -227,10 +227,10 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page);
*/
static inline void tlb_remove_table(struct mmu_gather *tlb, void *table)
{
- struct page *page = (struct page *)table;
+ struct ptdesc *ptdesc = (struct ptdesc *)table;
- pagetable_dtor(page_ptdesc(page));
- tlb_remove_page(tlb, page);
+ pagetable_dtor(ptdesc);
+ tlb_remove_page(tlb, ptdesc_page(ptdesc));
}
#endif /* CONFIG_MMU_GATHER_TABLE_FREE */
@@ -493,17 +493,11 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
return tlb_remove_page_size(tlb, page, PAGE_SIZE);
}
-static inline void tlb_remove_ptdesc(struct mmu_gather *tlb, void *pt)
+static inline void tlb_remove_ptdesc(struct mmu_gather *tlb, struct ptdesc *pt)
{
tlb_remove_table(tlb, pt);
}
-/* Like tlb_remove_ptdesc, but for page-like page directories. */
-static inline void tlb_remove_page_ptdesc(struct mmu_gather *tlb, struct ptdesc *pt)
-{
- tlb_remove_page(tlb, ptdesc_page(pt));
-}
-
static inline void tlb_change_page_size(struct mmu_gather *tlb,
unsigned int page_size)
{
diff --git a/include/cxl/event.h b/include/cxl/event.h
index 04edd44bd26f..f9ae1796da85 100644
--- a/include/cxl/event.h
+++ b/include/cxl/event.h
@@ -164,10 +164,99 @@ struct cxl_cper_work_data {
struct cxl_cper_event_rec rec;
};
+#define PROT_ERR_VALID_AGENT_TYPE BIT_ULL(0)
+#define PROT_ERR_VALID_AGENT_ADDRESS BIT_ULL(1)
+#define PROT_ERR_VALID_DEVICE_ID BIT_ULL(2)
+#define PROT_ERR_VALID_SERIAL_NUMBER BIT_ULL(3)
+#define PROT_ERR_VALID_CAPABILITY BIT_ULL(4)
+#define PROT_ERR_VALID_DVSEC BIT_ULL(5)
+#define PROT_ERR_VALID_ERROR_LOG BIT_ULL(6)
+
+/*
+ * The layout of the enumeration and the values matches CXL Agent Type
+ * field in the UEFI 2.10 Section N.2.13,
+ */
+enum {
+ RCD, /* Restricted CXL Device */
+ RCH_DP, /* Restricted CXL Host Downstream Port */
+ DEVICE, /* CXL Device */
+ LD, /* CXL Logical Device */
+ FMLD, /* CXL Fabric Manager managed Logical Device */
+ RP, /* CXL Root Port */
+ DSP, /* CXL Downstream Switch Port */
+ USP, /* CXL Upstream Switch Port */
+};
+
+#pragma pack(1)
+
+/* Compute Express Link Protocol Error Section, UEFI v2.10 sec N.2.13 */
+struct cxl_cper_sec_prot_err {
+ u64 valid_bits;
+ u8 agent_type;
+ u8 reserved[7];
+
+ /*
+ * Except for RCH Downstream Port, all the remaining CXL Agent
+ * types are uniquely identified by the PCIe compatible SBDF number.
+ */
+ union {
+ u64 rcrb_base_addr;
+ struct {
+ u8 function;
+ u8 device;
+ u8 bus;
+ u16 segment;
+ u8 reserved_1[3];
+ };
+ } agent_addr;
+
+ struct {
+ u16 vendor_id;
+ u16 device_id;
+ u16 subsystem_vendor_id;
+ u16 subsystem_id;
+ u8 class_code[2];
+ u16 slot;
+ u8 reserved_1[4];
+ } device_id;
+
+ struct {
+ u32 lower_dw;
+ u32 upper_dw;
+ } dev_serial_num;
+
+ u8 capability[60];
+ u16 dvsec_len;
+ u16 err_len;
+ u8 reserved_2[4];
+};
+
+#pragma pack()
+
+/* CXL RAS Capability Structure, CXL v3.0 sec 8.2.4.16 */
+struct cxl_ras_capability_regs {
+ u32 uncor_status;
+ u32 uncor_mask;
+ u32 uncor_severity;
+ u32 cor_status;
+ u32 cor_mask;
+ u32 cap_control;
+ u32 header_log[16];
+};
+
+struct cxl_cper_prot_err_work_data {
+ struct cxl_cper_sec_prot_err prot_err;
+ struct cxl_ras_capability_regs ras_cap;
+ int severity;
+};
+
#ifdef CONFIG_ACPI_APEI_GHES
int cxl_cper_register_work(struct work_struct *work);
int cxl_cper_unregister_work(struct work_struct *work);
int cxl_cper_kfifo_get(struct cxl_cper_work_data *wd);
+int cxl_cper_register_prot_err_work(struct work_struct *work);
+int cxl_cper_unregister_prot_err_work(struct work_struct *work);
+int cxl_cper_prot_err_kfifo_get(struct cxl_cper_prot_err_work_data *wd);
#else
static inline int cxl_cper_register_work(struct work_struct *work)
{
@@ -182,6 +271,18 @@ static inline int cxl_cper_kfifo_get(struct cxl_cper_work_data *wd)
{
return 0;
}
+static inline int cxl_cper_register_prot_err_work(struct work_struct *work)
+{
+ return 0;
+}
+static inline int cxl_cper_unregister_prot_err_work(struct work_struct *work)
+{
+ return 0;
+}
+static inline int cxl_cper_prot_err_kfifo_get(struct cxl_cper_prot_err_work_data *wd)
+{
+ return 0;
+}
#endif
#endif /* _LINUX_CXL_EVENT_H */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index a70e62d69dc7..3f2e93ed9730 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -1094,6 +1094,17 @@ static inline acpi_handle acpi_get_processor_handle(int cpu)
#endif /* !CONFIG_ACPI */
+#ifdef CONFIG_ACPI_HMAT
+int hmat_get_extended_linear_cache_size(struct resource *backing_res, int nid,
+ resource_size_t *size);
+#else
+static inline int hmat_get_extended_linear_cache_size(struct resource *backing_res,
+ int nid, resource_size_t *size)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
extern void arch_post_acpi_subsys_init(void);
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index ba8f52d48b94..204b22a99c4b 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -184,6 +184,12 @@ static inline void bvec_iter_advance_single(const struct bio_vec *bv,
((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \
bvec_iter_advance_single((bio_vec), &(iter), (bvl).bv_len))
+#define for_each_mp_bvec(bvl, bio_vec, iter, start) \
+ for (iter = (start); \
+ (iter).bi_size && \
+ ((bvl = mp_bvec_iter_bvec((bio_vec), (iter))), 1); \
+ bvec_iter_advance_single((bio_vec), &(iter), (bvl).bv_len))
+
/* for iterating one bio from start to end */
#define BVEC_ITER_ALL_INIT (struct bvec_iter) \
{ \
diff --git a/include/linux/context_tracking_irq.h b/include/linux/context_tracking_irq.h
index c50b5670c4a5..197916ee91a4 100644
--- a/include/linux/context_tracking_irq.h
+++ b/include/linux/context_tracking_irq.h
@@ -10,12 +10,12 @@ void ct_irq_exit_irqson(void);
void ct_nmi_enter(void);
void ct_nmi_exit(void);
#else
-static inline void ct_irq_enter(void) { }
-static inline void ct_irq_exit(void) { }
+static __always_inline void ct_irq_enter(void) { }
+static __always_inline void ct_irq_exit(void) { }
static inline void ct_irq_enter_irqson(void) { }
static inline void ct_irq_exit_irqson(void) { }
-static inline void ct_nmi_enter(void) { }
-static inline void ct_nmi_exit(void) { }
+static __always_inline void ct_nmi_enter(void) { }
+static __always_inline void ct_nmi_exit(void) { }
#endif
#endif
diff --git a/include/linux/cper.h b/include/linux/cper.h
index 265b0f8fc0b3..0ed60a91eca9 100644
--- a/include/linux/cper.h
+++ b/include/linux/cper.h
@@ -89,6 +89,10 @@ enum {
#define CPER_NOTIFY_DMAR \
GUID_INIT(0x667DD791, 0xC6B3, 0x4c27, 0x8A, 0x6B, 0x0F, 0x8E, \
0x72, 0x2D, 0xEB, 0x41)
+/* CXL Protocol Error Section */
+#define CPER_SEC_CXL_PROT_ERR \
+ GUID_INIT(0x80B9EFB4, 0x52B5, 0x4DE3, 0xA7, 0x77, 0x68, 0x78, \
+ 0x4B, 0x77, 0x10, 0x48)
/* CXL Event record UUIDs are formatted as GUIDs and reported in section type */
/*
@@ -601,4 +605,8 @@ void cper_estatus_print(const char *pfx,
int cper_estatus_check_header(const struct acpi_hest_generic_status *estatus);
int cper_estatus_check(const struct acpi_hest_generic_status *estatus);
+struct cxl_cper_sec_prot_err;
+void cxl_cper_print_prot_err(const char *pfx,
+ const struct cxl_cper_sec_prot_err *prot_err);
+
#endif
diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h
index 99f30c7d6208..897d6211c163 100644
--- a/include/linux/fsl/mc.h
+++ b/include/linux/fsl/mc.h
@@ -417,8 +417,6 @@ int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
void fsl_mc_portal_free(struct fsl_mc_io *mc_io);
-int fsl_mc_portal_reset(struct fsl_mc_io *mc_io);
-
int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev,
enum fsl_mc_pool_type pool_type,
struct fsl_mc_device **new_mc_adev);
diff --git a/include/linux/io_uring/cmd.h b/include/linux/io_uring/cmd.h
index e6723fa95160..0634a3de1782 100644
--- a/include/linux/io_uring/cmd.h
+++ b/include/linux/io_uring/cmd.h
@@ -21,7 +21,6 @@ struct io_uring_cmd {
struct io_uring_cmd_data {
void *op_data;
- struct io_uring_sqe sqes[2];
};
static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe)
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 02fe001feebb..68416b135151 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -79,6 +79,11 @@ struct vm_fault;
#define IOMAP_F_ATOMIC_BIO (1U << 8)
/*
+ * Flag reserved for file system specific usage
+ */
+#define IOMAP_F_PRIVATE (1U << 12)
+
+/*
* Flags set by the core iomap code during operations:
*
* IOMAP_F_SIZE_CHANGED indicates to the iomap_end method that the file size
@@ -88,14 +93,8 @@ struct vm_fault;
* range it covers needs to be remapped by the high level before the operation
* can proceed.
*/
-#define IOMAP_F_SIZE_CHANGED (1U << 8)
-#define IOMAP_F_STALE (1U << 9)
-
-/*
- * Flags from 0x1000 up are for file system specific usage:
- */
-#define IOMAP_F_PRIVATE (1U << 12)
-
+#define IOMAP_F_SIZE_CHANGED (1U << 14)
+#define IOMAP_F_STALE (1U << 15)
/*
* Magic value for addr:
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
index 905a2e2f45f6..ecbf819deeca 100644
--- a/include/linux/kdb.h
+++ b/include/linux/kdb.h
@@ -104,7 +104,7 @@ extern int kdb_initial_cpu;
#define KDB_NOENVVALUE (-6)
#define KDB_NOTIMP (-7)
#define KDB_ENVFULL (-8)
-#define KDB_ENVBUFFULL (-9)
+#define KDB_KMALLOCFAILED (-9)
#define KDB_TOOMANYBPT (-10)
#define KDB_TOOMANYDBREGS (-11)
#define KDB_DUPBPT (-12)
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
index 51ef131e66b7..5eebbe7a3545 100644
--- a/include/linux/kgdb.h
+++ b/include/linux/kgdb.h
@@ -257,7 +257,6 @@ extern void kgdb_arch_late(void);
* hardware breakpoints.
* @correct_hw_break: Allow an architecture to specify how to correct the
* hardware debug registers.
- * @enable_nmi: Manage NMI-triggered entry to KGDB
*/
struct kgdb_arch {
unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
@@ -270,8 +269,6 @@ struct kgdb_arch {
void (*disable_hw_break)(struct pt_regs *regs);
void (*remove_all_hw_break)(void);
void (*correct_hw_break)(void);
-
- void (*enable_nmi)(bool on);
};
/**
@@ -306,14 +303,6 @@ extern const struct kgdb_arch arch_kgdb_ops;
extern unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs);
-#ifdef CONFIG_SERIAL_KGDB_NMI
-extern int kgdb_register_nmi_console(void);
-extern int kgdb_unregister_nmi_console(void);
-#else
-static inline int kgdb_register_nmi_console(void) { return 0; }
-static inline int kgdb_unregister_nmi_console(void) { return 0; }
-#endif
-
extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
extern struct kgdb_io *dbg_io_ops;
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index 5c8865bb59d9..b11660b706c5 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -134,10 +134,6 @@
.size name, .-name
#endif
-/* If symbol 'name' is treated as a subroutine (gets called, and returns)
- * then please use ENDPROC to mark 'name' as STT_FUNC for the benefit of
- * static analysis tools such as stack depth analyzer.
- */
#ifndef ENDPROC
/* deprecated, use SYM_FUNC_END */
#define ENDPROC(name) \
diff --git a/include/linux/mfd/mt6397/rtc.h b/include/linux/mfd/mt6397/rtc.h
index 068ae1c0f0e8..27883af44f87 100644
--- a/include/linux/mfd/mt6397/rtc.h
+++ b/include/linux/mfd/mt6397/rtc.h
@@ -60,11 +60,6 @@
#define RTC_PDN2 0x002e
#define RTC_PDN2_PWRON_ALARM BIT(4)
-#define RTC_MIN_YEAR 1968
-#define RTC_BASE_YEAR 1900
-#define RTC_NUM_YEARS 128
-#define RTC_MIN_YEAR_OFFSET (RTC_MIN_YEAR - RTC_BASE_YEAR)
-
#define MTK_RTC_POLL_DELAY_US 10
#define MTK_RTC_POLL_TIMEOUT (jiffies_to_usecs(HZ))
diff --git a/include/linux/mm.h b/include/linux/mm.h
index d66bc0e97632..b7f13f087954 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -4238,4 +4238,14 @@ int arch_get_shadow_stack_status(struct task_struct *t, unsigned long __user *st
int arch_set_shadow_stack_status(struct task_struct *t, unsigned long status);
int arch_lock_shadow_stack_status(struct task_struct *t, unsigned long status);
+
+/*
+ * mseal of userspace process's system mappings.
+ */
+#ifdef CONFIG_MSEAL_SYSTEM_MAPPINGS
+#define VM_SEALED_SYSMAP VM_SEALED
+#else
+#define VM_SEALED_SYSMAP VM_NONE
+#endif
+
#endif /* _LINUX_MM_H */
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 9ac83ca88326..d8cad844870a 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -300,6 +300,7 @@ enum nfsstat4 {
/* error codes for internal client use */
#define NFS4ERR_RESET_TO_MDS 12001
#define NFS4ERR_RESET_TO_PNFS 12002
+#define NFS4ERR_FATAL_IOERROR 12003
static inline bool seqid_mutating_err(u32 err)
{
@@ -691,6 +692,7 @@ enum {
NFSPROC4_CLNT_LISTXATTRS,
NFSPROC4_CLNT_REMOVEXATTR,
NFSPROC4_CLNT_READ_PLUS,
+ NFSPROC4_CLNT_OFFLOAD_STATUS,
};
/* nfs41 types */
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index f00bfcee7120..71319637a84e 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -50,6 +50,7 @@ struct nfs_client {
#define NFS_CS_DS 7 /* - Server is a DS */
#define NFS_CS_REUSEPORT 8 /* - reuse src port on reconnect */
#define NFS_CS_PNFS 9 /* - Server used for pnfs */
+#define NFS_CS_NETUNREACH_FATAL 10 /* - ENETUNREACH errors are fatal */
struct sockaddr_storage cl_addr; /* server identifier */
size_t cl_addrlen;
char * cl_hostname; /* hostname of server */
@@ -167,6 +168,8 @@ struct nfs_server {
#define NFS_MOUNT_TRUNK_DISCOVERY 0x04000000
#define NFS_MOUNT_SHUTDOWN 0x08000000
#define NFS_MOUNT_NO_ALIGNWRITE 0x10000000
+#define NFS_MOUNT_FORCE_RDIRPLUS 0x20000000
+#define NFS_MOUNT_NETUNREACH_FATAL 0x40000000
unsigned int fattr_valid; /* Valid attributes */
unsigned int caps; /* server capabilities */
@@ -250,6 +253,10 @@ struct nfs_server {
struct list_head ss_copies;
struct list_head ss_src_copies;
+ unsigned long delegation_flags;
+#define NFS4SERV_DELEGRETURN (1)
+#define NFS4SERV_DELEGATION_EXPIRED (2)
+#define NFS4SERV_DELEGRETURN_DELAYED (3)
unsigned long delegation_gen;
unsigned long mig_gen;
unsigned long mig_status;
@@ -289,6 +296,7 @@ struct nfs_server {
#define NFS_CAP_CASE_INSENSITIVE (1U << 6)
#define NFS_CAP_CASE_PRESERVING (1U << 7)
#define NFS_CAP_REBOOT_LAYOUTRETURN (1U << 8)
+#define NFS_CAP_OFFLOAD_STATUS (1U << 9)
#define NFS_CAP_OPEN_XOR (1U << 12)
#define NFS_CAP_DELEGTIME (1U << 13)
#define NFS_CAP_POSIX_LOCK (1U << 14)
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index d66c61cbbd1d..67f6632f723b 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1515,8 +1515,9 @@ struct nfs42_offload_status_args {
struct nfs42_offload_status_res {
struct nfs4_sequence_res osr_seq_res;
- uint64_t osr_count;
- int osr_status;
+ u64 osr_count;
+ int complete_count;
+ u32 osr_complete;
};
struct nfs42_copy_notify_args {
diff --git a/include/linux/node.h b/include/linux/node.h
index 9a881c2208b3..2b7517892230 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -57,6 +57,11 @@ enum cache_write_policy {
NODE_CACHE_WRITE_OTHER,
};
+enum cache_mode {
+ NODE_CACHE_ADDR_MODE_RESERVED,
+ NODE_CACHE_ADDR_MODE_EXTENDED_LINEAR,
+};
+
/**
* struct node_cache_attrs - system memory caching attributes
*
@@ -65,6 +70,7 @@ enum cache_write_policy {
* @size: Total size of cache in bytes
* @line_size: Number of bytes fetched on a cache miss
* @level: The cache hierarchy level
+ * @address_mode: The address mode
*/
struct node_cache_attrs {
enum cache_indexing indexing;
@@ -72,6 +78,7 @@ struct node_cache_attrs {
u64 size;
u16 line_size;
u8 level;
+ u16 address_mode;
};
#ifdef CONFIG_HMEM_REPORTING
diff --git a/include/linux/objtool.h b/include/linux/objtool.h
index 3ca965a2ddc8..366ad004d794 100644
--- a/include/linux/objtool.h
+++ b/include/linux/objtool.h
@@ -69,7 +69,7 @@
* In asm, there are two kinds of code: normal C-type callable functions and
* the rest. The normal callable functions can be called by other code, and
* don't do anything unusual with the stack. Such normal callable functions
- * are annotated with the ENTRY/ENDPROC macros. Most asm code falls in this
+ * are annotated with SYM_FUNC_{START,END}. Most asm code falls in this
* category. In this case, no special debugging annotations are needed because
* objtool can automatically generate the ORC data for the ORC unwinder to read
* at runtime.
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 5bd9492a66ee..e6a21b62dcce 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -226,11 +226,48 @@ static __always_inline const struct page *page_fixed_fake_head(const struct page
}
return page;
}
+
+static __always_inline bool page_count_writable(const struct page *page, int u)
+{
+ if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key))
+ return true;
+
+ /*
+ * The refcount check is ordered before the fake-head check to prevent
+ * the following race:
+ * CPU 1 (HVO) CPU 2 (speculative PFN walker)
+ *
+ * page_ref_freeze()
+ * synchronize_rcu()
+ * rcu_read_lock()
+ * page_is_fake_head() is false
+ * vmemmap_remap_pte()
+ * XXX: struct page[] becomes r/o
+ *
+ * page_ref_unfreeze()
+ * page_ref_count() is not zero
+ *
+ * atomic_add_unless(&page->_refcount)
+ * XXX: try to modify r/o struct page[]
+ *
+ * The refcount check also prevents modification attempts to other (r/o)
+ * tail pages that are not fake heads.
+ */
+ if (atomic_read_acquire(&page->_refcount) == u)
+ return false;
+
+ return page_fixed_fake_head(page) == page;
+}
#else
static inline const struct page *page_fixed_fake_head(const struct page *page)
{
return page;
}
+
+static inline bool page_count_writable(const struct page *page, int u)
+{
+ return true;
+}
#endif
static __always_inline int page_is_fake_head(const struct page *page)
diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h
index 8c236c651d1d..544150d1d5fd 100644
--- a/include/linux/page_ref.h
+++ b/include/linux/page_ref.h
@@ -234,7 +234,7 @@ static inline bool page_ref_add_unless(struct page *page, int nr, int u)
rcu_read_lock();
/* avoid writing to the vmemmap area being remapped */
- if (!page_is_fake_head(page) && page_ref_count(page) != u)
+ if (page_count_writable(page, u))
ret = atomic_add_unless(&page->_refcount, nr, u);
rcu_read_unlock();
diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h
index ecf290a0c98f..1f4e4f2b89bb 100644
--- a/include/linux/platform_data/cros_ec_commands.h
+++ b/include/linux/platform_data/cros_ec_commands.h
@@ -5046,6 +5046,7 @@ struct ec_response_pd_status {
#define PD_EVENT_DATA_SWAP BIT(3)
#define PD_EVENT_TYPEC BIT(4)
#define PD_EVENT_PPM BIT(5)
+#define PD_EVENT_INIT BIT(6)
struct ec_response_host_event_status {
uint32_t status; /* PD MCU host event status */
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index f8159f8a7d73..120536f4c6eb 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -132,7 +132,7 @@ static inline void rcu_sysrq_end(void) { }
#if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK))
void rcu_irq_work_resched(void);
#else
-static inline void rcu_irq_work_resched(void) { }
+static __always_inline void rcu_irq_work_resched(void) { }
#endif
#ifdef CONFIG_RCU_NOCB_CPU
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 3f4d315aaec9..95da051fb155 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -170,6 +170,7 @@ struct rtc_device {
/* useful timestamps */
#define RTC_TIMESTAMP_BEGIN_0000 -62167219200ULL /* 0000-01-01 00:00:00 */
#define RTC_TIMESTAMP_BEGIN_1900 -2208988800LL /* 1900-01-01 00:00:00 */
+#define RTC_TIMESTAMP_EPOCH_GPS 315964800LL /* 1980-01-06 00:00:00 */
#define RTC_TIMESTAMP_BEGIN_2000 946684800LL /* 2000-01-01 00:00:00 */
#define RTC_TIMESTAMP_END_2063 2966371199LL /* 2063-12-31 23:59:59 */
#define RTC_TIMESTAMP_END_2079 3471292799LL /* 2079-12-31 23:59:59 */
diff --git a/include/linux/sched/smt.h b/include/linux/sched/smt.h
index fb1e295e7e63..166b19af956f 100644
--- a/include/linux/sched/smt.h
+++ b/include/linux/sched/smt.h
@@ -12,7 +12,7 @@ static __always_inline bool sched_smt_active(void)
return static_branch_likely(&sched_smt_present);
}
#else
-static inline bool sched_smt_active(void) { return false; }
+static __always_inline bool sched_smt_active(void) { return false; }
#endif
void arch_smt_update(void);
diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h
index fe41da005970..52791e070506 100644
--- a/include/linux/seq_buf.h
+++ b/include/linux/seq_buf.h
@@ -167,8 +167,8 @@ extern int seq_buf_hex_dump(struct seq_buf *s, const char *prefix_str,
const void *buf, size_t len, bool ascii);
#ifdef CONFIG_BINARY_PRINTF
-extern int
-seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary);
+__printf(2, 0)
+int seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary);
#endif
void seq_buf_do_printk(struct seq_buf *s, const char *lvl);
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 2fb266ea69fa..d6ebf0596510 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -181,6 +181,7 @@ int seq_open_private(struct file *, const struct seq_operations *, int);
int seq_release_private(struct inode *, struct file *);
#ifdef CONFIG_BINARY_PRINTF
+__printf(2, 0)
void seq_bprintf(struct seq_file *m, const char *f, const u32 *binary);
#endif
diff --git a/include/linux/serdev.h b/include/linux/serdev.h
index ff78efc1f60d..34562eb99931 100644
--- a/include/linux/serdev.h
+++ b/include/linux/serdev.h
@@ -84,7 +84,6 @@ enum serdev_parity {
struct serdev_controller_ops {
ssize_t (*write_buf)(struct serdev_controller *, const u8 *, size_t);
void (*write_flush)(struct serdev_controller *);
- int (*write_room)(struct serdev_controller *);
int (*open)(struct serdev_controller *);
void (*close)(struct serdev_controller *);
void (*set_flow_control)(struct serdev_controller *, bool);
@@ -212,7 +211,6 @@ int serdev_device_break_ctl(struct serdev_device *serdev, int break_state);
void serdev_device_write_wakeup(struct serdev_device *);
ssize_t serdev_device_write(struct serdev_device *, const u8 *, size_t, long);
void serdev_device_write_flush(struct serdev_device *);
-int serdev_device_write_room(struct serdev_device *);
/*
* serdev device driver functions
@@ -273,10 +271,6 @@ static inline ssize_t serdev_device_write(struct serdev_device *sdev,
return -ENODEV;
}
static inline void serdev_device_write_flush(struct serdev_device *sdev) {}
-static inline int serdev_device_write_room(struct serdev_device *sdev)
-{
- return 0;
-}
#define serdev_device_driver_register(x)
#define serdev_device_driver_unregister(x)
diff --git a/include/linux/sort.h b/include/linux/sort.h
index e163287ac6c1..8e5603b10941 100644
--- a/include/linux/sort.h
+++ b/include/linux/sort.h
@@ -13,4 +13,15 @@ void sort(void *base, size_t num, size_t size,
cmp_func_t cmp_func,
swap_func_t swap_func);
+/* Versions that periodically call cond_resched(): */
+
+void sort_r_nonatomic(void *base, size_t num, size_t size,
+ cmp_r_func_t cmp_func,
+ swap_r_func_t swap_func,
+ const void *priv);
+
+void sort_nonatomic(void *base, size_t num, size_t size,
+ cmp_func_t cmp_func,
+ swap_func_t swap_func);
+
#endif
diff --git a/include/linux/string.h b/include/linux/string.h
index 0403a4ca4c11..01621ad0f598 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -336,8 +336,8 @@ int __sysfs_match_string(const char * const *array, size_t n, const char *s);
#define sysfs_match_string(_a, _s) __sysfs_match_string(_a, ARRAY_SIZE(_a), _s)
#ifdef CONFIG_BINARY_PRINTF
-int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args);
-int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf);
+__printf(3, 0) int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args);
+__printf(3, 0) int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf);
#endif
extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index fec976e58174..f8b406b0a1af 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -64,7 +64,9 @@ struct rpc_clnt {
cl_noretranstimeo: 1,/* No retransmit timeouts */
cl_autobind : 1,/* use getport() */
cl_chatty : 1,/* be verbose */
- cl_shutdown : 1;/* rpc immediate -EIO */
+ cl_shutdown : 1,/* rpc immediate -EIO */
+ cl_netunreach_fatal : 1;
+ /* Treat ENETUNREACH errors as fatal */
struct xprtsec_parms cl_xprtsec; /* transport security policy */
struct rpc_rtt * cl_rtt; /* RTO estimator data */
@@ -175,6 +177,7 @@ struct rpc_add_xprt_test {
#define RPC_CLNT_CREATE_SOFTERR (1UL << 10)
#define RPC_CLNT_CREATE_REUSEPORT (1UL << 11)
#define RPC_CLNT_CREATE_CONNECTED (1UL << 12)
+#define RPC_CLNT_CREATE_NETUNREACH_FATAL (1UL << 13)
struct rpc_clnt *rpc_create(struct rpc_create_args *args);
struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *,
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index eac57914dcf3..ccba79ebf893 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -134,6 +134,7 @@ struct rpc_task_setup {
#define RPC_TASK_MOVEABLE 0x0004 /* nfs4.1+ rpc tasks */
#define RPC_TASK_NULLCREDS 0x0010 /* Use AUTH_NULL credential */
#define RPC_CALL_MAJORSEEN 0x0020 /* major timeout seen */
+#define RPC_TASK_NETUNREACH_FATAL 0x0040 /* ENETUNREACH is fatal */
#define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */
#define RPC_TASK_NO_ROUND_ROBIN 0x0100 /* send requests on "main" xprt */
#define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */
diff --git a/include/linux/sunrpc/xprtmultipath.h b/include/linux/sunrpc/xprtmultipath.h
index e411368cdacf..e4db5022fe92 100644
--- a/include/linux/sunrpc/xprtmultipath.h
+++ b/include/linux/sunrpc/xprtmultipath.h
@@ -56,6 +56,7 @@ extern void rpc_xprt_switch_add_xprt(struct rpc_xprt_switch *xps,
struct rpc_xprt *xprt);
extern void rpc_xprt_switch_remove_xprt(struct rpc_xprt_switch *xps,
struct rpc_xprt *xprt, bool offline);
+extern struct rpc_xprt *rpc_xprt_switch_get_main_xprt(struct rpc_xprt_switch *xps);
extern void xprt_iter_init(struct rpc_xprt_iter *xpi,
struct rpc_xprt_switch *xps);
diff --git a/include/linux/trace.h b/include/linux/trace.h
index fdcd76b7be83..7eaad857dee0 100644
--- a/include/linux/trace.h
+++ b/include/linux/trace.h
@@ -72,8 +72,8 @@ static inline int unregister_ftrace_export(struct trace_export *export)
static inline void trace_printk_init_buffers(void)
{
}
-static inline int trace_array_printk(struct trace_array *tr, unsigned long ip,
- const char *fmt, ...)
+static inline __printf(3, 4)
+int trace_array_printk(struct trace_array *tr, unsigned long ip, const char *fmt, ...)
{
return 0;
}
diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h
index 1ef95c0287f0..a93ed5ac3226 100644
--- a/include/linux/trace_seq.h
+++ b/include/linux/trace_seq.h
@@ -88,8 +88,8 @@ extern __printf(2, 3)
void trace_seq_printf(struct trace_seq *s, const char *fmt, ...);
extern __printf(2, 0)
void trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args);
-extern void
-trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary);
+extern __printf(2, 0)
+void trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary);
extern int trace_print_seq(struct seq_file *m, struct trace_seq *s);
extern int trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
int cnt);
@@ -113,8 +113,8 @@ static inline __printf(2, 3)
void trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
{
}
-static inline void
-trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
+static inline __printf(2, 0)
+void trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
{
}
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 2372f9357240..0a46e4054dec 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -239,7 +239,6 @@ struct tty_struct {
struct list_head tty_files;
-#define N_TTY_BUF_SIZE 4096
struct work_struct SAK_work;
} __randomize_layout;
@@ -251,7 +250,7 @@ struct tty_file_private {
};
/**
- * DOC: TTY Struct Flags
+ * enum tty_struct_flags - TTY Struct Flags
*
* These bits are used in the :c:member:`tty_struct.flags` field.
*
@@ -260,62 +259,64 @@ struct tty_file_private {
* tty->write. Thus, you must use the inline functions set_bit() and
* clear_bit() to make things atomic.
*
- * TTY_THROTTLED
+ * @TTY_THROTTLED:
* Driver input is throttled. The ldisc should call
* :c:member:`tty_driver.unthrottle()` in order to resume reception when
* it is ready to process more data (at threshold min).
*
- * TTY_IO_ERROR
+ * @TTY_IO_ERROR:
* If set, causes all subsequent userspace read/write calls on the tty to
* fail, returning -%EIO. (May be no ldisc too.)
*
- * TTY_OTHER_CLOSED
+ * @TTY_OTHER_CLOSED:
* Device is a pty and the other side has closed.
*
- * TTY_EXCLUSIVE
+ * @TTY_EXCLUSIVE:
* Exclusive open mode (a single opener).
*
- * TTY_DO_WRITE_WAKEUP
+ * @TTY_DO_WRITE_WAKEUP:
* If set, causes the driver to call the
* :c:member:`tty_ldisc_ops.write_wakeup()` method in order to resume
* transmission when it can accept more data to transmit.
*
- * TTY_LDISC_OPEN
+ * @TTY_LDISC_OPEN:
* Indicates that a line discipline is open. For debugging purposes only.
*
- * TTY_PTY_LOCK
+ * @TTY_PTY_LOCK:
* A flag private to pty code to implement %TIOCSPTLCK/%TIOCGPTLCK logic.
*
- * TTY_NO_WRITE_SPLIT
+ * @TTY_NO_WRITE_SPLIT:
* Prevent driver from splitting up writes into smaller chunks (preserve
* write boundaries to driver).
*
- * TTY_HUPPED
+ * @TTY_HUPPED:
* The TTY was hung up. This is set post :c:member:`tty_driver.hangup()`.
*
- * TTY_HUPPING
+ * @TTY_HUPPING:
* The TTY is in the process of hanging up to abort potential readers.
*
- * TTY_LDISC_CHANGING
+ * @TTY_LDISC_CHANGING:
* Line discipline for this TTY is being changed. I/O should not block
* when this is set. Use tty_io_nonblock() to check.
*
- * TTY_LDISC_HALTED
+ * @TTY_LDISC_HALTED:
* Line discipline for this TTY was stopped. No work should be queued to
* this ldisc.
*/
-#define TTY_THROTTLED 0
-#define TTY_IO_ERROR 1
-#define TTY_OTHER_CLOSED 2
-#define TTY_EXCLUSIVE 3
-#define TTY_DO_WRITE_WAKEUP 5
-#define TTY_LDISC_OPEN 11
-#define TTY_PTY_LOCK 16
-#define TTY_NO_WRITE_SPLIT 17
-#define TTY_HUPPED 18
-#define TTY_HUPPING 19
-#define TTY_LDISC_CHANGING 20
-#define TTY_LDISC_HALTED 22
+enum tty_struct_flags {
+ TTY_THROTTLED,
+ TTY_IO_ERROR,
+ TTY_OTHER_CLOSED,
+ TTY_EXCLUSIVE,
+ TTY_DO_WRITE_WAKEUP,
+ TTY_LDISC_OPEN,
+ TTY_PTY_LOCK,
+ TTY_NO_WRITE_SPLIT,
+ TTY_HUPPED,
+ TTY_HUPPING,
+ TTY_LDISC_CHANGING,
+ TTY_LDISC_HALTED,
+};
static inline bool tty_io_nonblock(struct tty_struct *tty, struct file *file)
{
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index d4cdc089f6c3..188ee9b768eb 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -17,6 +17,92 @@ struct serial_icounter_struct;
struct serial_struct;
/**
+ * enum tty_driver_flag -- TTY Driver Flags
+ *
+ * These are flags passed to tty_alloc_driver().
+ *
+ * @TTY_DRIVER_INSTALLED:
+ * Whether this driver was succesfully installed. This is a tty internal
+ * flag. Do not touch.
+ *
+ * @TTY_DRIVER_RESET_TERMIOS:
+ * Requests the tty layer to reset the termios setting when the last
+ * process has closed the device. Used for PTYs, in particular.
+ *
+ * @TTY_DRIVER_REAL_RAW:
+ * Indicates that the driver will guarantee not to set any special
+ * character handling flags if this is set for the tty:
+ *
+ * ``(IGNBRK || (!BRKINT && !PARMRK)) && (IGNPAR || !INPCK)``
+ *
+ * That is, if there is no reason for the driver to
+ * send notifications of parity and break characters up to the line
+ * driver, it won't do so. This allows the line driver to optimize for
+ * this case if this flag is set. (Note that there is also a promise, if
+ * the above case is true, not to signal overruns, either.)
+ *
+ * @TTY_DRIVER_DYNAMIC_DEV:
+ * The individual tty devices need to be registered with a call to
+ * tty_register_device() when the device is found in the system and
+ * unregistered with a call to tty_unregister_device() so the devices will
+ * be show up properly in sysfs. If not set, all &tty_driver.num entries
+ * will be created by the tty core in sysfs when tty_register_driver() is
+ * called. This is to be used by drivers that have tty devices that can
+ * appear and disappear while the main tty driver is registered with the
+ * tty core.
+ *
+ * @TTY_DRIVER_DEVPTS_MEM:
+ * Don't use the standard arrays (&tty_driver.ttys and
+ * &tty_driver.termios), instead use dynamic memory keyed through the
+ * devpts filesystem. This is only applicable to the PTY driver.
+ *
+ * @TTY_DRIVER_HARDWARE_BREAK:
+ * Hardware handles break signals. Pass the requested timeout to the
+ * &tty_operations.break_ctl instead of using a simple on/off interface.
+ *
+ * @TTY_DRIVER_DYNAMIC_ALLOC:
+ * Do not allocate structures which are needed per line for this driver
+ * (&tty_driver.ports) as it would waste memory. The driver will take
+ * care. This is only applicable to the PTY driver.
+ *
+ * @TTY_DRIVER_UNNUMBERED_NODE:
+ * Do not create numbered ``/dev`` nodes. For example, create
+ * ``/dev/ttyprintk`` and not ``/dev/ttyprintk0``. Applicable only when a
+ * driver for a single tty device is being allocated.
+ */
+enum tty_driver_flag {
+ TTY_DRIVER_INSTALLED = BIT(0),
+ TTY_DRIVER_RESET_TERMIOS = BIT(1),
+ TTY_DRIVER_REAL_RAW = BIT(2),
+ TTY_DRIVER_DYNAMIC_DEV = BIT(3),
+ TTY_DRIVER_DEVPTS_MEM = BIT(4),
+ TTY_DRIVER_HARDWARE_BREAK = BIT(5),
+ TTY_DRIVER_DYNAMIC_ALLOC = BIT(6),
+ TTY_DRIVER_UNNUMBERED_NODE = BIT(7),
+};
+
+enum tty_driver_type {
+ TTY_DRIVER_TYPE_SYSTEM,
+ TTY_DRIVER_TYPE_CONSOLE,
+ TTY_DRIVER_TYPE_SERIAL,
+ TTY_DRIVER_TYPE_PTY,
+ TTY_DRIVER_TYPE_SCC,
+ TTY_DRIVER_TYPE_SYSCONS,
+};
+
+enum tty_driver_subtype {
+ SYSTEM_TYPE_TTY = 1,
+ SYSTEM_TYPE_CONSOLE,
+ SYSTEM_TYPE_SYSCONS,
+ SYSTEM_TYPE_SYSPTMX,
+
+ PTY_TYPE_MASTER = 1,
+ PTY_TYPE_SLAVE,
+
+ SERIAL_TYPE_NORMAL = 1,
+};
+
+/**
* struct tty_operations -- interface between driver and tty
*
* @lookup: ``struct tty_struct *()(struct tty_driver *self, struct file *,
@@ -414,8 +500,8 @@ struct tty_operations {
* @major: major /dev device number (zero for autoassignment)
* @minor_start: the first minor /dev device number
* @num: number of devices allocated
- * @type: type of tty driver (%TTY_DRIVER_TYPE_)
- * @subtype: subtype of tty driver (%SYSTEM_TYPE_, %PTY_TYPE_, %SERIAL_TYPE_)
+ * @type: type of tty driver (enum tty_driver_type)
+ * @subtype: subtype of tty driver (enum tty_driver_subtype)
* @init_termios: termios to set to each tty initially (e.g. %tty_std_termios)
* @flags: tty driver flags (%TTY_DRIVER_)
* @proc_entry: proc fs entry, used internally
@@ -447,8 +533,8 @@ struct tty_driver {
int major;
int minor_start;
unsigned int num;
- short type;
- short subtype;
+ enum tty_driver_type type;
+ enum tty_driver_subtype subtype;
struct ktermios init_termios;
unsigned long flags;
struct proc_dir_entry *proc_entry;
@@ -478,7 +564,13 @@ struct tty_driver *tty_find_polling_driver(char *name, int *line);
void tty_driver_kref_put(struct tty_driver *driver);
-/* Use TTY_DRIVER_* flags below */
+/**
+ * tty_alloc_driver - allocate tty driver
+ * @lines: count of lines this driver can handle at most
+ * @flags: some of enum tty_driver_flag, will be set in driver->flags
+ *
+ * Returns: struct tty_driver or a PTR-encoded error (use IS_ERR() and friends).
+ */
#define tty_alloc_driver(lines, flags) \
__tty_alloc_driver(lines, THIS_MODULE, flags)
@@ -494,84 +586,6 @@ static inline void tty_set_operations(struct tty_driver *driver,
driver->ops = op;
}
-/**
- * DOC: TTY Driver Flags
- *
- * TTY_DRIVER_RESET_TERMIOS
- * Requests the tty layer to reset the termios setting when the last
- * process has closed the device. Used for PTYs, in particular.
- *
- * TTY_DRIVER_REAL_RAW
- * Indicates that the driver will guarantee not to set any special
- * character handling flags if this is set for the tty:
- *
- * ``(IGNBRK || (!BRKINT && !PARMRK)) && (IGNPAR || !INPCK)``
- *
- * That is, if there is no reason for the driver to
- * send notifications of parity and break characters up to the line
- * driver, it won't do so. This allows the line driver to optimize for
- * this case if this flag is set. (Note that there is also a promise, if
- * the above case is true, not to signal overruns, either.)
- *
- * TTY_DRIVER_DYNAMIC_DEV
- * The individual tty devices need to be registered with a call to
- * tty_register_device() when the device is found in the system and
- * unregistered with a call to tty_unregister_device() so the devices will
- * be show up properly in sysfs. If not set, all &tty_driver.num entries
- * will be created by the tty core in sysfs when tty_register_driver() is
- * called. This is to be used by drivers that have tty devices that can
- * appear and disappear while the main tty driver is registered with the
- * tty core.
- *
- * TTY_DRIVER_DEVPTS_MEM
- * Don't use the standard arrays (&tty_driver.ttys and
- * &tty_driver.termios), instead use dynamic memory keyed through the
- * devpts filesystem. This is only applicable to the PTY driver.
- *
- * TTY_DRIVER_HARDWARE_BREAK
- * Hardware handles break signals. Pass the requested timeout to the
- * &tty_operations.break_ctl instead of using a simple on/off interface.
- *
- * TTY_DRIVER_DYNAMIC_ALLOC
- * Do not allocate structures which are needed per line for this driver
- * (&tty_driver.ports) as it would waste memory. The driver will take
- * care. This is only applicable to the PTY driver.
- *
- * TTY_DRIVER_UNNUMBERED_NODE
- * Do not create numbered ``/dev`` nodes. For example, create
- * ``/dev/ttyprintk`` and not ``/dev/ttyprintk0``. Applicable only when a
- * driver for a single tty device is being allocated.
- */
-#define TTY_DRIVER_INSTALLED 0x0001
-#define TTY_DRIVER_RESET_TERMIOS 0x0002
-#define TTY_DRIVER_REAL_RAW 0x0004
-#define TTY_DRIVER_DYNAMIC_DEV 0x0008
-#define TTY_DRIVER_DEVPTS_MEM 0x0010
-#define TTY_DRIVER_HARDWARE_BREAK 0x0020
-#define TTY_DRIVER_DYNAMIC_ALLOC 0x0040
-#define TTY_DRIVER_UNNUMBERED_NODE 0x0080
-
-/* tty driver types */
-#define TTY_DRIVER_TYPE_SYSTEM 0x0001
-#define TTY_DRIVER_TYPE_CONSOLE 0x0002
-#define TTY_DRIVER_TYPE_SERIAL 0x0003
-#define TTY_DRIVER_TYPE_PTY 0x0004
-#define TTY_DRIVER_TYPE_SCC 0x0005 /* scc driver */
-#define TTY_DRIVER_TYPE_SYSCONS 0x0006
-
-/* system subtypes (magic, used by tty_io.c) */
-#define SYSTEM_TYPE_TTY 0x0001
-#define SYSTEM_TYPE_CONSOLE 0x0002
-#define SYSTEM_TYPE_SYSCONS 0x0003
-#define SYSTEM_TYPE_SYSPTMX 0x0004
-
-/* pty subtypes (magic, used by tty_io.c) */
-#define PTY_TYPE_MASTER 0x0001
-#define PTY_TYPE_SLAVE 0x0002
-
-/* serial subtype definitions */
-#define SERIAL_TYPE_NORMAL 1
-
int tty_register_driver(struct tty_driver *driver);
void tty_unregister_driver(struct tty_driver *driver);
struct device *tty_register_device(struct tty_driver *driver, unsigned index,
diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
index af01e89074b2..c5cccc3fc1e8 100644
--- a/include/linux/tty_ldisc.h
+++ b/include/linux/tty_ldisc.h
@@ -39,7 +39,6 @@ do { \
int ldsem_down_read(struct ld_semaphore *sem, long timeout);
int ldsem_down_read_trylock(struct ld_semaphore *sem);
int ldsem_down_write(struct ld_semaphore *sem, long timeout);
-int ldsem_down_write_trylock(struct ld_semaphore *sem);
void ldsem_up_read(struct ld_semaphore *sem);
void ldsem_up_write(struct ld_semaphore *sem);
diff --git a/include/linux/usb.h b/include/linux/usb.h
index cfa8005e24f9..b46738701f8d 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -51,6 +51,7 @@ struct ep_device;
* @desc: descriptor for this endpoint, wMaxPacketSize in native byteorder
* @ss_ep_comp: SuperSpeed companion descriptor for this endpoint
* @ssp_isoc_ep_comp: SuperSpeedPlus isoc companion descriptor for this endpoint
+ * @eusb2_isoc_ep_comp: eUSB2 isoc companion descriptor for this endpoint
* @urb_list: urbs queued to this endpoint; maintained by usbcore
* @hcpriv: for use by HCD; typically holds hardware dma queue head (QH)
* with one or more transfer descriptors (TDs) per urb
@@ -64,9 +65,10 @@ struct ep_device;
* descriptor within an active interface in a given USB configuration.
*/
struct usb_host_endpoint {
- struct usb_endpoint_descriptor desc;
- struct usb_ss_ep_comp_descriptor ss_ep_comp;
- struct usb_ssp_isoc_ep_comp_descriptor ssp_isoc_ep_comp;
+ struct usb_endpoint_descriptor desc;
+ struct usb_ss_ep_comp_descriptor ss_ep_comp;
+ struct usb_ssp_isoc_ep_comp_descriptor ssp_isoc_ep_comp;
+ struct usb_eusb2_isoc_ep_comp_descriptor eusb2_isoc_ep_comp;
struct list_head urb_list;
void *hcpriv;
struct ep_device *ep_dev; /* For sysfs info */
diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h
index 3963e55e88a3..fbdef950f06c 100644
--- a/include/linux/usb/musb.h
+++ b/include/linux/usb/musb.h
@@ -61,7 +61,7 @@ struct musb_hdrc_eps_bits {
};
struct musb_hdrc_config {
- struct musb_fifo_cfg *fifo_cfg; /* board fifo configuration */
+ const struct musb_fifo_cfg *fifo_cfg; /* board fifo configuration */
unsigned fifo_cfg_size; /* size of the fifo configuration */
/* MUSB configuration-specific details */
diff --git a/include/linux/usb/ulpi.h b/include/linux/usb/ulpi.h
index 5050f502c1ed..4b651065738a 100644
--- a/include/linux/usb/ulpi.h
+++ b/include/linux/usb/ulpi.h
@@ -49,19 +49,10 @@
/*-------------------------------------------------------------------------*/
#if IS_ENABLED(CONFIG_USB_ULPI)
-struct usb_phy *otg_ulpi_create(struct usb_phy_io_ops *ops,
- unsigned int flags);
-
struct usb_phy *devm_otg_ulpi_create(struct device *dev,
struct usb_phy_io_ops *ops,
unsigned int flags);
#else
-static inline struct usb_phy *otg_ulpi_create(struct usb_phy_io_ops *ops,
- unsigned int flags)
-{
- return NULL;
-}
-
static inline struct usb_phy *devm_otg_ulpi_create(struct device *dev,
struct usb_phy_io_ops *ops,
unsigned int flags)
diff --git a/include/misc/cxl-base.h b/include/misc/cxl-base.h
deleted file mode 100644
index 2251da7f32d9..000000000000
--- a/include/misc/cxl-base.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright 2014 IBM Corp.
- */
-
-#ifndef _MISC_CXL_BASE_H
-#define _MISC_CXL_BASE_H
-
-#ifdef CONFIG_CXL_BASE
-
-#define CXL_IRQ_RANGES 4
-
-struct cxl_irq_ranges {
- irq_hw_number_t offset[CXL_IRQ_RANGES];
- irq_hw_number_t range[CXL_IRQ_RANGES];
-};
-
-extern atomic_t cxl_use_count;
-
-static inline bool cxl_ctx_in_use(void)
-{
- return (atomic_read(&cxl_use_count) != 0);
-}
-
-static inline void cxl_ctx_get(void)
-{
- atomic_inc(&cxl_use_count);
-}
-
-static inline void cxl_ctx_put(void)
-{
- atomic_dec(&cxl_use_count);
-}
-
-struct cxl_afu *cxl_afu_get(struct cxl_afu *afu);
-void cxl_afu_put(struct cxl_afu *afu);
-void cxl_slbia(struct mm_struct *mm);
-
-#else /* CONFIG_CXL_BASE */
-
-static inline bool cxl_ctx_in_use(void) { return false; }
-static inline struct cxl_afu *cxl_afu_get(struct cxl_afu *afu) { return NULL; }
-static inline void cxl_afu_put(struct cxl_afu *afu) {}
-static inline void cxl_slbia(struct mm_struct *mm) {}
-
-#endif /* CONFIG_CXL_BASE */
-
-#endif
diff --git a/include/misc/cxl.h b/include/misc/cxl.h
deleted file mode 100644
index d8044299d654..000000000000
--- a/include/misc/cxl.h
+++ /dev/null
@@ -1,265 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright 2015 IBM Corp.
- */
-
-#ifndef _MISC_CXL_H
-#define _MISC_CXL_H
-
-#include <linux/pci.h>
-#include <linux/poll.h>
-#include <linux/interrupt.h>
-#include <uapi/misc/cxl.h>
-
-/*
- * This documents the in kernel API for driver to use CXL. It allows kernel
- * drivers to bind to AFUs using an AFU configuration record exposed as a PCI
- * configuration record.
- *
- * This API enables control over AFU and contexts which can't be part of the
- * generic PCI API. This API is agnostic to the actual AFU.
- */
-
-/* Get the AFU associated with a pci_dev */
-struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev);
-
-/* Get the AFU conf record number associated with a pci_dev */
-unsigned int cxl_pci_to_cfg_record(struct pci_dev *dev);
-
-
-/*
- * Context lifetime overview:
- *
- * An AFU context may be inited and then started and stopped multiple times
- * before it's released. ie.
- * - cxl_dev_context_init()
- * - cxl_start_context()
- * - cxl_stop_context()
- * - cxl_start_context()
- * - cxl_stop_context()
- * ...repeat...
- * - cxl_release_context()
- * Once released, a context can't be started again.
- *
- * One context is inited by the cxl driver for every pci_dev. This is to be
- * used as a default kernel context. cxl_get_context() will get this
- * context. This context will be released by PCI hot unplug, so doesn't need to
- * be released explicitly by drivers.
- *
- * Additional kernel contexts may be inited using cxl_dev_context_init().
- * These must be released using cxl_context_detach().
- *
- * Once a context has been inited, IRQs may be configured. Firstly these IRQs
- * must be allocated (cxl_allocate_afu_irqs()), then individually mapped to
- * specific handlers (cxl_map_afu_irq()).
- *
- * These IRQs can be unmapped (cxl_unmap_afu_irq()) and finally released
- * (cxl_free_afu_irqs()).
- *
- * The AFU can be reset (cxl_afu_reset()). This will cause the PSL/AFU
- * hardware to lose track of all contexts. It's upto the caller of
- * cxl_afu_reset() to restart these contexts.
- */
-
-/*
- * On pci_enabled_device(), the cxl driver will init a single cxl context for
- * use by the driver. It doesn't start this context (as that will likely
- * generate DMA traffic for most AFUs).
- *
- * This gets the default context associated with this pci_dev. This context
- * doesn't need to be released as this will be done by the PCI subsystem on hot
- * unplug.
- */
-struct cxl_context *cxl_get_context(struct pci_dev *dev);
-/*
- * Allocate and initalise a context associated with a AFU PCI device. This
- * doesn't start the context in the AFU.
- */
-struct cxl_context *cxl_dev_context_init(struct pci_dev *dev);
-/*
- * Release and free a context. Context should be stopped before calling.
- */
-int cxl_release_context(struct cxl_context *ctx);
-
-/*
- * Set and get private data associated with a context. Allows drivers to have a
- * back pointer to some useful structure.
- */
-int cxl_set_priv(struct cxl_context *ctx, void *priv);
-void *cxl_get_priv(struct cxl_context *ctx);
-
-/*
- * Allocate AFU interrupts for this context. num=0 will allocate the default
- * for this AFU as given in the AFU descriptor. This number doesn't include the
- * interrupt 0 (CAIA defines AFU IRQ 0 for page faults). Each interrupt to be
- * used must map a handler with cxl_map_afu_irq.
- */
-int cxl_allocate_afu_irqs(struct cxl_context *cxl, int num);
-/* Free allocated interrupts */
-void cxl_free_afu_irqs(struct cxl_context *cxl);
-
-/*
- * Map a handler for an AFU interrupt associated with a particular context. AFU
- * IRQS numbers start from 1 (CAIA defines AFU IRQ 0 for page faults). cookie
- * is private data is that will be provided to the interrupt handler.
- */
-int cxl_map_afu_irq(struct cxl_context *cxl, int num,
- irq_handler_t handler, void *cookie, char *name);
-/* unmap mapped IRQ handlers */
-void cxl_unmap_afu_irq(struct cxl_context *cxl, int num, void *cookie);
-
-/*
- * Start work on the AFU. This starts an cxl context and associates it with a
- * task. task == NULL will make it a kernel context.
- */
-int cxl_start_context(struct cxl_context *ctx, u64 wed,
- struct task_struct *task);
-/*
- * Stop a context and remove it from the PSL
- */
-int cxl_stop_context(struct cxl_context *ctx);
-
-/* Reset the AFU */
-int cxl_afu_reset(struct cxl_context *ctx);
-
-/*
- * Set a context as a master context.
- * This sets the default problem space area mapped as the full space, rather
- * than just the per context area (for slaves).
- */
-void cxl_set_master(struct cxl_context *ctx);
-
-/*
- * Map and unmap the AFU Problem Space area. The amount and location mapped
- * depends on if this context is a master or slave.
- */
-void __iomem *cxl_psa_map(struct cxl_context *ctx);
-void cxl_psa_unmap(void __iomem *addr);
-
-/* Get the process element for this context */
-int cxl_process_element(struct cxl_context *ctx);
-
-/*
- * These calls allow drivers to create their own file descriptors and make them
- * identical to the cxl file descriptor user API. An example use case:
- *
- * struct file_operations cxl_my_fops = {};
- * ......
- * // Init the context
- * ctx = cxl_dev_context_init(dev);
- * if (IS_ERR(ctx))
- * return PTR_ERR(ctx);
- * // Create and attach a new file descriptor to my file ops
- * file = cxl_get_fd(ctx, &cxl_my_fops, &fd);
- * // Start context
- * rc = cxl_start_work(ctx, &work.work);
- * if (rc) {
- * fput(file);
- * put_unused_fd(fd);
- * return -ENODEV;
- * }
- * // No error paths after installing the fd
- * fd_install(fd, file);
- * return fd;
- *
- * This inits a context, and gets a file descriptor and associates some file
- * ops to that file descriptor. If the file ops are blank, the cxl driver will
- * fill them in with the default ones that mimic the standard user API. Once
- * completed, the file descriptor can be installed. Once the file descriptor is
- * installed, it's visible to the user so no errors must occur past this point.
- *
- * If cxl_fd_release() file op call is installed, the context will be stopped
- * and released when the fd is released. Hence the driver won't need to manage
- * this itself.
- */
-
-/*
- * Take a context and associate it with my file ops. Returns the associated
- * file and file descriptor. Any file ops which are blank are filled in by the
- * cxl driver with the default ops to mimic the standard API.
- */
-struct file *cxl_get_fd(struct cxl_context *ctx, struct file_operations *fops,
- int *fd);
-/* Get the context associated with this file */
-struct cxl_context *cxl_fops_get_context(struct file *file);
-/*
- * Start a context associated a struct cxl_ioctl_start_work used by the
- * standard cxl user API.
- */
-int cxl_start_work(struct cxl_context *ctx,
- struct cxl_ioctl_start_work *work);
-/*
- * Export all the existing fops so drivers can use them
- */
-int cxl_fd_open(struct inode *inode, struct file *file);
-int cxl_fd_release(struct inode *inode, struct file *file);
-long cxl_fd_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-int cxl_fd_mmap(struct file *file, struct vm_area_struct *vm);
-__poll_t cxl_fd_poll(struct file *file, struct poll_table_struct *poll);
-ssize_t cxl_fd_read(struct file *file, char __user *buf, size_t count,
- loff_t *off);
-
-/*
- * For EEH, a driver may want to assert a PERST will reload the same image
- * from flash into the FPGA.
- *
- * This is a property of the entire adapter, not a single AFU, so drivers
- * should set this property with care!
- */
-void cxl_perst_reloads_same_image(struct cxl_afu *afu,
- bool perst_reloads_same_image);
-
-/*
- * Read the VPD for the card where the AFU resides
- */
-ssize_t cxl_read_adapter_vpd(struct pci_dev *dev, void *buf, size_t count);
-
-/*
- * AFU driver ops allow an AFU driver to create their own events to pass to
- * userspace through the file descriptor as a simpler alternative to overriding
- * the read() and poll() calls that works with the generic cxl events. These
- * events are given priority over the generic cxl events, so they will be
- * delivered first if multiple types of events are pending.
- *
- * The AFU driver must call cxl_context_events_pending() to notify the cxl
- * driver that new events are ready to be delivered for a specific context.
- * cxl_context_events_pending() will adjust the current count of AFU driver
- * events for this context, and wake up anyone waiting on the context wait
- * queue.
- *
- * The cxl driver will then call fetch_event() to get a structure defining
- * the size and address of the driver specific event data. The cxl driver
- * will build a cxl header with type and process_element fields filled in,
- * and header.size set to sizeof(struct cxl_event_header) + data_size.
- * The total size of the event is limited to CXL_READ_MIN_SIZE (4K).
- *
- * fetch_event() is called with a spin lock held, so it must not sleep.
- *
- * The cxl driver will then deliver the event to userspace, and finally
- * call event_delivered() to return the status of the operation, identified
- * by cxl context and AFU driver event data pointers.
- * 0 Success
- * -EFAULT copy_to_user() has failed
- * -EINVAL Event data pointer is NULL, or event size is greater than
- * CXL_READ_MIN_SIZE.
- */
-struct cxl_afu_driver_ops {
- struct cxl_event_afu_driver_reserved *(*fetch_event) (
- struct cxl_context *ctx);
- void (*event_delivered) (struct cxl_context *ctx,
- struct cxl_event_afu_driver_reserved *event,
- int rc);
-};
-
-/*
- * Associate the above driver ops with a specific context.
- * Reset the current count of AFU driver events.
- */
-void cxl_set_driver_ops(struct cxl_context *ctx,
- struct cxl_afu_driver_ops *ops);
-
-/* Notify cxl driver that new events are ready to be delivered for context */
-void cxl_context_events_pending(struct cxl_context *ctx,
- unsigned int new_events);
-
-#endif /* _MISC_CXL_H */
diff --git a/include/misc/cxllib.h b/include/misc/cxllib.h
deleted file mode 100644
index eacc417288fc..000000000000
--- a/include/misc/cxllib.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright 2017 IBM Corp.
- */
-
-#ifndef _MISC_CXLLIB_H
-#define _MISC_CXLLIB_H
-
-#include <linux/pci.h>
-#include <asm/reg.h>
-
-/*
- * cxl driver exports a in-kernel 'library' API which can be called by
- * other drivers to help interacting with an IBM XSL.
- */
-
-/*
- * tells whether capi is supported on the PCIe slot where the
- * device is seated
- *
- * Input:
- * dev: device whose slot needs to be checked
- * flags: 0 for the time being
- */
-bool cxllib_slot_is_supported(struct pci_dev *dev, unsigned long flags);
-
-
-/*
- * Returns the configuration parameters to be used by the XSL or device
- *
- * Input:
- * dev: device, used to find PHB
- * Output:
- * struct cxllib_xsl_config:
- * version
- * capi BAR address, i.e. 0x2000000000000-0x2FFFFFFFFFFFF
- * capi BAR size
- * data send control (XSL_DSNCTL)
- * dummy read address (XSL_DRA)
- */
-#define CXL_XSL_CONFIG_VERSION1 1
-struct cxllib_xsl_config {
- u32 version; /* format version for register encoding */
- u32 log_bar_size;/* log size of the capi_window */
- u64 bar_addr; /* address of the start of capi window */
- u64 dsnctl; /* matches definition of XSL_DSNCTL */
- u64 dra; /* real address that can be used for dummy read */
-};
-
-int cxllib_get_xsl_config(struct pci_dev *dev, struct cxllib_xsl_config *cfg);
-
-
-/*
- * Activate capi for the pci host bridge associated with the device.
- * Can be extended to deactivate once we know how to do it.
- * Device must be ready to accept messages from the CAPP unit and
- * respond accordingly (TLB invalidates, ...)
- *
- * PHB is switched to capi mode through calls to skiboot.
- * CAPP snooping is activated
- *
- * Input:
- * dev: device whose PHB should switch mode
- * mode: mode to switch to i.e. CAPI or PCI
- * flags: options related to the mode
- */
-enum cxllib_mode {
- CXL_MODE_CXL,
- CXL_MODE_PCI,
-};
-
-#define CXL_MODE_NO_DMA 0
-#define CXL_MODE_DMA_TVT0 1
-#define CXL_MODE_DMA_TVT1 2
-
-int cxllib_switch_phb_mode(struct pci_dev *dev, enum cxllib_mode mode,
- unsigned long flags);
-
-
-/*
- * Set the device for capi DMA.
- * Define its dma_ops and dma offset so that allocations will be using TVT#1
- *
- * Input:
- * dev: device to set
- * flags: options. CXL_MODE_DMA_TVT1 should be used
- */
-int cxllib_set_device_dma(struct pci_dev *dev, unsigned long flags);
-
-
-/*
- * Get the Process Element structure for the given thread
- *
- * Input:
- * task: task_struct for the context of the translation
- * translation_mode: whether addresses should be translated
- * Output:
- * attr: attributes to fill up the Process Element structure from CAIA
- */
-struct cxllib_pe_attributes {
- u64 sr;
- u32 lpid;
- u32 tid;
- u32 pid;
-};
-#define CXL_TRANSLATED_MODE 0
-#define CXL_REAL_MODE 1
-
-int cxllib_get_PE_attributes(struct task_struct *task,
- unsigned long translation_mode, struct cxllib_pe_attributes *attr);
-
-
-/*
- * Handle memory fault.
- * Fault in all the pages of the specified buffer for the permissions
- * provided in ‘flags’
- *
- * Shouldn't be called from interrupt context
- *
- * Input:
- * mm: struct mm for the thread faulting the pages
- * addr: base address of the buffer to page in
- * size: size of the buffer to page in
- * flags: permission requested (DSISR_ISSTORE...)
- */
-int cxllib_handle_fault(struct mm_struct *mm, u64 addr, u64 size, u64 flags);
-
-
-#endif /* _MISC_CXLLIB_H */
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index 851841336ee6..5d331383047b 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -343,6 +343,7 @@ TRACE_EVENT(rpc_request,
{ RPC_TASK_MOVEABLE, "MOVEABLE" }, \
{ RPC_TASK_NULLCREDS, "NULLCREDS" }, \
{ RPC_CALL_MAJORSEEN, "MAJORSEEN" }, \
+ { RPC_TASK_NETUNREACH_FATAL, "NETUNREACH_FATAL"}, \
{ RPC_TASK_DYNAMIC, "DYNAMIC" }, \
{ RPC_TASK_NO_ROUND_ROBIN, "NO_ROUND_ROBIN" }, \
{ RPC_TASK_SOFT, "SOFT" }, \
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 5e0eb41d967e..5ec43ecbceb7 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -229,6 +229,9 @@
* - FUSE_URING_IN_OUT_HEADER_SZ
* - FUSE_URING_OP_IN_OUT_SZ
* - enum fuse_uring_cmd
+ *
+ * 7.43
+ * - add FUSE_REQUEST_TIMEOUT
*/
#ifndef _LINUX_FUSE_H
@@ -264,7 +267,7 @@
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 42
+#define FUSE_KERNEL_MINOR_VERSION 43
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
@@ -435,6 +438,8 @@ struct fuse_file_lock {
* of the request ID indicates resend requests
* FUSE_ALLOW_IDMAP: allow creation of idmapped mounts
* FUSE_OVER_IO_URING: Indicate that client supports io-uring
+ * FUSE_REQUEST_TIMEOUT: kernel supports timing out requests.
+ * init_out.request_timeout contains the timeout (in secs)
*/
#define FUSE_ASYNC_READ (1 << 0)
#define FUSE_POSIX_LOCKS (1 << 1)
@@ -477,11 +482,11 @@ struct fuse_file_lock {
#define FUSE_PASSTHROUGH (1ULL << 37)
#define FUSE_NO_EXPORT_SUPPORT (1ULL << 38)
#define FUSE_HAS_RESEND (1ULL << 39)
-
/* Obsolete alias for FUSE_DIRECT_IO_ALLOW_MMAP */
#define FUSE_DIRECT_IO_RELAX FUSE_DIRECT_IO_ALLOW_MMAP
#define FUSE_ALLOW_IDMAP (1ULL << 40)
#define FUSE_OVER_IO_URING (1ULL << 41)
+#define FUSE_REQUEST_TIMEOUT (1ULL << 42)
/**
* CUSE INIT request/reply flags
@@ -909,7 +914,8 @@ struct fuse_init_out {
uint16_t map_alignment;
uint32_t flags2;
uint32_t max_stack_depth;
- uint32_t unused[6];
+ uint16_t request_timeout;
+ uint16_t unused[11];
};
#define CUSE_INIT_INFO_MAX 4096
diff --git a/include/uapi/linux/ublk_cmd.h b/include/uapi/linux/ublk_cmd.h
index 7255b36b5cf6..583b86681c93 100644
--- a/include/uapi/linux/ublk_cmd.h
+++ b/include/uapi/linux/ublk_cmd.h
@@ -410,6 +410,29 @@ struct ublk_param_dma_align {
__u8 pad[4];
};
+#define UBLK_MIN_SEGMENT_SIZE 4096
+/*
+ * If any one of the three segment parameter is set as 0, the behavior is
+ * undefined.
+ */
+struct ublk_param_segment {
+ /*
+ * seg_boundary_mask + 1 needs to be power_of_2(), and the sum has
+ * to be >= UBLK_MIN_SEGMENT_SIZE(4096)
+ */
+ __u64 seg_boundary_mask;
+
+ /*
+ * max_segment_size could be override by virt_boundary_mask, so be
+ * careful when setting both.
+ *
+ * max_segment_size has to be >= UBLK_MIN_SEGMENT_SIZE(4096)
+ */
+ __u32 max_segment_size;
+ __u16 max_segments;
+ __u8 pad[2];
+};
+
struct ublk_params {
/*
* Total length of parameters, userspace has to set 'len' for both
@@ -423,6 +446,7 @@ struct ublk_params {
#define UBLK_PARAM_TYPE_DEVT (1 << 2)
#define UBLK_PARAM_TYPE_ZONED (1 << 3)
#define UBLK_PARAM_TYPE_DMA_ALIGN (1 << 4)
+#define UBLK_PARAM_TYPE_SEGMENT (1 << 5)
__u32 types; /* types of parameter included */
struct ublk_param_basic basic;
@@ -430,6 +454,7 @@ struct ublk_params {
struct ublk_param_devt devt;
struct ublk_param_zoned zoned;
struct ublk_param_dma_align dma;
+ struct ublk_param_segment seg;
};
#endif
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index 052290652046..8003243a4937 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -253,6 +253,9 @@ struct usb_ctrlrequest {
#define USB_DT_BOS 0x0f
#define USB_DT_DEVICE_CAPABILITY 0x10
#define USB_DT_WIRELESS_ENDPOINT_COMP 0x11
+/* From the eUSB2 spec */
+#define USB_DT_EUSB2_ISOC_ENDPOINT_COMP 0x12
+/* From Wireless USB spec */
#define USB_DT_WIRE_ADAPTER 0x21
/* From USB Device Firmware Upgrade Specification, Revision 1.1 */
#define USB_DT_DFU_FUNCTIONAL 0x21
@@ -676,6 +679,18 @@ static inline int usb_endpoint_interrupt_type(
/*-------------------------------------------------------------------------*/
+/* USB_DT_EUSB2_ISOC_ENDPOINT_COMP: eUSB2 Isoch Endpoint Companion descriptor */
+struct usb_eusb2_isoc_ep_comp_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __le16 wMaxPacketSize;
+ __le32 dwBytesPerInterval;
+} __attribute__ ((packed));
+
+#define USB_DT_EUSB2_ISOC_EP_COMP_SIZE 8
+
+/*-------------------------------------------------------------------------*/
+
/* USB_DT_SSP_ISOC_ENDPOINT_COMP: SuperSpeedPlus Isochronous Endpoint Companion
* descriptor
*/
diff --git a/include/uapi/misc/cxl.h b/include/uapi/misc/cxl.h
deleted file mode 100644
index 56376d3907d8..000000000000
--- a/include/uapi/misc/cxl.h
+++ /dev/null
@@ -1,156 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
-/*
- * Copyright 2014 IBM Corp.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _UAPI_MISC_CXL_H
-#define _UAPI_MISC_CXL_H
-
-#include <linux/types.h>
-#include <linux/ioctl.h>
-
-
-struct cxl_ioctl_start_work {
- __u64 flags;
- __u64 work_element_descriptor;
- __u64 amr;
- __s16 num_interrupts;
- __u16 tid;
- __s32 reserved1;
- __u64 reserved2;
- __u64 reserved3;
- __u64 reserved4;
- __u64 reserved5;
-};
-
-#define CXL_START_WORK_AMR 0x0000000000000001ULL
-#define CXL_START_WORK_NUM_IRQS 0x0000000000000002ULL
-#define CXL_START_WORK_ERR_FF 0x0000000000000004ULL
-#define CXL_START_WORK_TID 0x0000000000000008ULL
-#define CXL_START_WORK_ALL (CXL_START_WORK_AMR |\
- CXL_START_WORK_NUM_IRQS |\
- CXL_START_WORK_ERR_FF |\
- CXL_START_WORK_TID)
-
-
-/* Possible modes that an afu can be in */
-#define CXL_MODE_DEDICATED 0x1
-#define CXL_MODE_DIRECTED 0x2
-
-/* possible flags for the cxl_afu_id flags field */
-#define CXL_AFUID_FLAG_SLAVE 0x1 /* In directed-mode afu is in slave mode */
-
-struct cxl_afu_id {
- __u64 flags; /* One of CXL_AFUID_FLAG_X */
- __u32 card_id;
- __u32 afu_offset;
- __u32 afu_mode; /* one of the CXL_MODE_X */
- __u32 reserved1;
- __u64 reserved2;
- __u64 reserved3;
- __u64 reserved4;
- __u64 reserved5;
- __u64 reserved6;
-};
-
-/* base adapter image header is included in the image */
-#define CXL_AI_NEED_HEADER 0x0000000000000001ULL
-#define CXL_AI_ALL CXL_AI_NEED_HEADER
-
-#define CXL_AI_HEADER_SIZE 128
-#define CXL_AI_BUFFER_SIZE 4096
-#define CXL_AI_MAX_ENTRIES 256
-#define CXL_AI_MAX_CHUNK_SIZE (CXL_AI_BUFFER_SIZE * CXL_AI_MAX_ENTRIES)
-
-struct cxl_adapter_image {
- __u64 flags;
- __u64 data;
- __u64 len_data;
- __u64 len_image;
- __u64 reserved1;
- __u64 reserved2;
- __u64 reserved3;
- __u64 reserved4;
-};
-
-/* ioctl numbers */
-#define CXL_MAGIC 0xCA
-/* AFU devices */
-#define CXL_IOCTL_START_WORK _IOW(CXL_MAGIC, 0x00, struct cxl_ioctl_start_work)
-#define CXL_IOCTL_GET_PROCESS_ELEMENT _IOR(CXL_MAGIC, 0x01, __u32)
-#define CXL_IOCTL_GET_AFU_ID _IOR(CXL_MAGIC, 0x02, struct cxl_afu_id)
-/* adapter devices */
-#define CXL_IOCTL_DOWNLOAD_IMAGE _IOW(CXL_MAGIC, 0x0A, struct cxl_adapter_image)
-#define CXL_IOCTL_VALIDATE_IMAGE _IOW(CXL_MAGIC, 0x0B, struct cxl_adapter_image)
-
-#define CXL_READ_MIN_SIZE 0x1000 /* 4K */
-
-/* Events from read() */
-enum cxl_event_type {
- CXL_EVENT_RESERVED = 0,
- CXL_EVENT_AFU_INTERRUPT = 1,
- CXL_EVENT_DATA_STORAGE = 2,
- CXL_EVENT_AFU_ERROR = 3,
- CXL_EVENT_AFU_DRIVER = 4,
-};
-
-struct cxl_event_header {
- __u16 type;
- __u16 size;
- __u16 process_element;
- __u16 reserved1;
-};
-
-struct cxl_event_afu_interrupt {
- __u16 flags;
- __u16 irq; /* Raised AFU interrupt number */
- __u32 reserved1;
-};
-
-struct cxl_event_data_storage {
- __u16 flags;
- __u16 reserved1;
- __u32 reserved2;
- __u64 addr;
- __u64 dsisr;
- __u64 reserved3;
-};
-
-struct cxl_event_afu_error {
- __u16 flags;
- __u16 reserved1;
- __u32 reserved2;
- __u64 error;
-};
-
-struct cxl_event_afu_driver_reserved {
- /*
- * Defines the buffer passed to the cxl driver by the AFU driver.
- *
- * This is not ABI since the event header.size passed to the user for
- * existing events is set in the read call to sizeof(cxl_event_header)
- * + sizeof(whatever event is being dispatched) and the user is already
- * required to use a 4K buffer on the read call.
- *
- * Of course the contents will be ABI, but that's up the AFU driver.
- */
- __u32 data_size;
- __u8 data[];
-};
-
-struct cxl_event {
- struct cxl_event_header header;
- union {
- struct cxl_event_afu_interrupt irq;
- struct cxl_event_data_storage fault;
- struct cxl_event_afu_error afu_error;
- struct cxl_event_afu_driver_reserved afu_driver_event;
- };
-};
-
-#endif /* _UAPI_MISC_CXL_H */
diff --git a/init/Kconfig b/init/Kconfig
index 681f38ee68db..ede5a43029a9 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -132,6 +132,11 @@ config CC_HAS_COUNTED_BY
config CC_HAS_MULTIDIMENSIONAL_NONSTRING
def_bool $(success,echo 'char tag[][4] __attribute__((__nonstring__)) = { };' | $(CC) $(CLANG_FLAGS) -x c - -c -o /dev/null -Werror)
+config LD_CAN_USE_KEEP_IN_OVERLAY
+ # ld.lld prior to 21.0.0 did not support KEEP within an overlay description
+ # https://github.com/llvm/llvm-project/pull/130661
+ def_bool LD_IS_BFD || LLD_VERSION >= 210000
+
config RUSTC_HAS_COERCE_POINTEE
def_bool RUSTC_VERSION >= 108400
@@ -1888,6 +1893,28 @@ config ARCH_HAS_MEMBARRIER_CALLBACKS
config ARCH_HAS_MEMBARRIER_SYNC_CORE
bool
+config ARCH_SUPPORTS_MSEAL_SYSTEM_MAPPINGS
+ bool
+ help
+ Control MSEAL_SYSTEM_MAPPINGS access based on architecture.
+
+ A 64-bit kernel is required for the memory sealing feature.
+ No specific hardware features from the CPU are needed.
+
+ To enable this feature, the architecture needs to update their
+ special mappings calls to include the sealing flag and confirm
+ that it doesn't unmap/remap system mappings during the life
+ time of the process. The existence of this flag for an architecture
+ implies that it does not require the remapping of the system
+ mappings during process lifetime, so sealing these mappings is safe
+ from a kernel perspective.
+
+ After the architecture enables this, a distribution can set
+ CONFIG_MSEAL_SYSTEM_MAPPING to manage access to the feature.
+
+ For complete descriptions of memory sealing, please see
+ Documentation/userspace-api/mseal.rst
+
config HAVE_PERF_EVENTS
bool
help
diff --git a/io_uring/Kconfig b/io_uring/Kconfig
index 9e2a4beba1ef..4b949c42c0bf 100644
--- a/io_uring/Kconfig
+++ b/io_uring/Kconfig
@@ -5,6 +5,7 @@
config IO_URING_ZCRX
def_bool y
+ depends on IO_URING
depends on PAGE_POOL
depends on INET
depends on NET_RX_BUSY_POLL
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 3ba49c628337..c6209fe44cb1 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1141,10 +1141,9 @@ void tctx_task_work(struct callback_head *cb)
WARN_ON_ONCE(ret);
}
-static inline void io_req_local_work_add(struct io_kiocb *req,
- struct io_ring_ctx *ctx,
- unsigned flags)
+static void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
{
+ struct io_ring_ctx *ctx = req->ctx;
unsigned nr_wait, nr_tw, nr_tw_prev;
struct llist_node *head;
@@ -1239,17 +1238,16 @@ static void io_req_normal_work_add(struct io_kiocb *req)
void __io_req_task_work_add(struct io_kiocb *req, unsigned flags)
{
if (req->ctx->flags & IORING_SETUP_DEFER_TASKRUN)
- io_req_local_work_add(req, req->ctx, flags);
+ io_req_local_work_add(req, flags);
else
io_req_normal_work_add(req);
}
-void io_req_task_work_add_remote(struct io_kiocb *req, struct io_ring_ctx *ctx,
- unsigned flags)
+void io_req_task_work_add_remote(struct io_kiocb *req, unsigned flags)
{
- if (WARN_ON_ONCE(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN)))
+ if (WARN_ON_ONCE(!(req->ctx->flags & IORING_SETUP_DEFER_TASKRUN)))
return;
- io_req_local_work_add(req, ctx, flags);
+ __io_req_task_work_add(req, flags);
}
static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
@@ -1645,6 +1643,8 @@ io_req_flags_t io_file_get_flags(struct file *file)
{
io_req_flags_t res = 0;
+ BUILD_BUG_ON(REQ_F_ISREG_BIT != REQ_F_SUPPORT_NOWAIT_BIT + 1);
+
if (S_ISREG(file_inode(file)->i_mode))
res |= REQ_F_ISREG;
if ((file->f_flags & O_NONBLOCK) || (file->f_mode & FMODE_NOWAIT))
@@ -1796,7 +1796,7 @@ struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
struct io_kiocb *nxt = NULL;
- if (req_ref_put_and_test(req)) {
+ if (req_ref_put_and_test_atomic(req)) {
if (req->flags & IO_REQ_LINK_FLAGS)
nxt = io_req_find_next(req);
io_free_req(req);
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index 87f883130286..e4050b2d0821 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -89,8 +89,7 @@ struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
unsigned issue_flags);
void __io_req_task_work_add(struct io_kiocb *req, unsigned flags);
-void io_req_task_work_add_remote(struct io_kiocb *req, struct io_ring_ctx *ctx,
- unsigned flags);
+void io_req_task_work_add_remote(struct io_kiocb *req, unsigned flags);
void io_req_task_queue(struct io_kiocb *req);
void io_req_task_complete(struct io_kiocb *req, io_tw_token_t tw);
void io_req_task_queue_fail(struct io_kiocb *req, int ret);
diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c
index 0bbcbbcdebfd..50a958e9c921 100644
--- a/io_uring/msg_ring.c
+++ b/io_uring/msg_ring.c
@@ -38,8 +38,8 @@ static void io_double_unlock_ctx(struct io_ring_ctx *octx)
mutex_unlock(&octx->uring_lock);
}
-static int io_double_lock_ctx(struct io_ring_ctx *octx,
- unsigned int issue_flags)
+static int io_lock_external_ctx(struct io_ring_ctx *octx,
+ unsigned int issue_flags)
{
/*
* To ensure proper ordering between the two ctxs, we can only
@@ -93,13 +93,14 @@ static int io_msg_remote_post(struct io_ring_ctx *ctx, struct io_kiocb *req,
kmem_cache_free(req_cachep, req);
return -EOWNERDEAD;
}
+ req->opcode = IORING_OP_NOP;
req->cqe.user_data = user_data;
io_req_set_res(req, res, cflags);
percpu_ref_get(&ctx->refs);
req->ctx = ctx;
req->tctx = NULL;
req->io_task_work.func = io_msg_tw_complete;
- io_req_task_work_add_remote(req, ctx, IOU_F_TWQ_LAZY_WAKE);
+ io_req_task_work_add_remote(req, IOU_F_TWQ_LAZY_WAKE);
return 0;
}
@@ -154,7 +155,7 @@ static int __io_msg_ring_data(struct io_ring_ctx *target_ctx,
ret = -EOVERFLOW;
if (target_ctx->flags & IORING_SETUP_IOPOLL) {
- if (unlikely(io_double_lock_ctx(target_ctx, issue_flags)))
+ if (unlikely(io_lock_external_ctx(target_ctx, issue_flags)))
return -EAGAIN;
}
if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, flags))
@@ -199,7 +200,7 @@ static int io_msg_install_complete(struct io_kiocb *req, unsigned int issue_flag
struct file *src_file = msg->src_file;
int ret;
- if (unlikely(io_double_lock_ctx(target_ctx, issue_flags)))
+ if (unlikely(io_lock_external_ctx(target_ctx, issue_flags)))
return -EAGAIN;
ret = __io_fixed_fd_install(target_ctx, src_file, msg->dst_fd);
diff --git a/io_uring/net.c b/io_uring/net.c
index 8944eb679024..24040bc3916a 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -97,6 +97,11 @@ struct io_recvzc {
struct io_zcrx_ifq *ifq;
};
+static int io_sg_from_iter_iovec(struct sk_buff *skb,
+ struct iov_iter *from, size_t length);
+static int io_sg_from_iter(struct sk_buff *skb,
+ struct iov_iter *from, size_t length);
+
int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
@@ -176,16 +181,6 @@ static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req)
return hdr;
}
-/* assign new iovec to kmsg, if we need to */
-static void io_net_vec_assign(struct io_kiocb *req, struct io_async_msghdr *kmsg,
- struct iovec *iov)
-{
- if (iov) {
- req->flags |= REQ_F_NEED_CLEANUP;
- io_vec_reset_iovec(&kmsg->vec, iov, kmsg->msg.msg_iter.nr_segs);
- }
-}
-
static inline void io_mshot_prep_retry(struct io_kiocb *req,
struct io_async_msghdr *kmsg)
{
@@ -217,7 +212,11 @@ static int io_net_import_vec(struct io_kiocb *req, struct io_async_msghdr *iomsg
&iomsg->msg.msg_iter, io_is_compat(req->ctx));
if (unlikely(ret < 0))
return ret;
- io_net_vec_assign(req, iomsg, iov);
+
+ if (iov) {
+ req->flags |= REQ_F_NEED_CLEANUP;
+ io_vec_reset_iovec(&iomsg->vec, iov, iomsg->msg.msg_iter.nr_segs);
+ }
return 0;
}
@@ -325,25 +324,6 @@ static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg,
return 0;
}
-static int io_sendmsg_copy_hdr(struct io_kiocb *req,
- struct io_async_msghdr *iomsg)
-{
- struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
- struct user_msghdr msg;
- int ret;
-
- ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_SOURCE, NULL);
- if (unlikely(ret))
- return ret;
-
- if (!(req->flags & REQ_F_BUFFER_SELECT))
- ret = io_net_import_vec(req, iomsg, msg.msg_iov, msg.msg_iovlen,
- ITER_SOURCE);
- /* save msg_control as sys_sendmsg() overwrites it */
- sr->msg_control = iomsg->msg.msg_control_user;
- return ret;
-}
-
void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
{
struct io_async_msghdr *io = req->async_data;
@@ -379,6 +359,8 @@ static int io_send_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe)
kmsg->msg.msg_name = &kmsg->addr;
kmsg->msg.msg_namelen = addr_len;
}
+ if (sr->flags & IORING_RECVSEND_FIXED_BUF)
+ return 0;
if (!io_do_buffer_select(req)) {
ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len,
&kmsg->msg.msg_iter);
@@ -392,31 +374,24 @@ static int io_sendmsg_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_async_msghdr *kmsg = req->async_data;
-
- sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
-
- return io_sendmsg_copy_hdr(req, kmsg);
-}
-
-static int io_sendmsg_zc_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe)
-{
- struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
- struct io_async_msghdr *kmsg = req->async_data;
struct user_msghdr msg;
int ret;
- if (!(sr->flags & IORING_RECVSEND_FIXED_BUF))
- return io_sendmsg_setup(req, sqe);
-
sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
-
ret = io_msg_copy_hdr(req, kmsg, &msg, ITER_SOURCE, NULL);
if (unlikely(ret))
return ret;
+ /* save msg_control as sys_sendmsg() overwrites it */
sr->msg_control = kmsg->msg.msg_control_user;
- kmsg->msg.msg_iter.nr_segs = msg.msg_iovlen;
- return io_prep_reg_iovec(req, &kmsg->vec, msg.msg_iov, msg.msg_iovlen);
+ if (sr->flags & IORING_RECVSEND_FIXED_BUF) {
+ kmsg->msg.msg_iter.nr_segs = msg.msg_iovlen;
+ return io_prep_reg_iovec(req, &kmsg->vec, msg.msg_iov,
+ msg.msg_iovlen);
+ }
+ if (req->flags & REQ_F_BUFFER_SELECT)
+ return 0;
+ return io_net_import_vec(req, kmsg, msg.msg_iov, msg.msg_iovlen, ITER_SOURCE);
}
#define SENDMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_BUNDLE)
@@ -427,12 +402,6 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
sr->done_io = 0;
sr->retry = false;
-
- if (req->opcode != IORING_OP_SEND) {
- if (sqe->addr2 || sqe->file_index)
- return -EINVAL;
- }
-
sr->len = READ_ONCE(sqe->len);
sr->flags = READ_ONCE(sqe->ioprio);
if (sr->flags & ~SENDMSG_FLAGS)
@@ -458,6 +427,8 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return -ENOMEM;
if (req->opcode != IORING_OP_SENDMSG)
return io_send_setup(req, sqe);
+ if (unlikely(sqe->addr2 || sqe->file_index))
+ return -EINVAL;
return io_sendmsg_setup(req, sqe);
}
@@ -1302,11 +1273,12 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_ring_ctx *ctx = req->ctx;
+ struct io_async_msghdr *iomsg;
struct io_kiocb *notif;
+ int ret;
zc->done_io = 0;
zc->retry = false;
- req->flags |= REQ_F_POLL_NO_LAZY;
if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
return -EINVAL;
@@ -1320,7 +1292,7 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
notif->cqe.user_data = req->cqe.user_data;
notif->cqe.res = 0;
notif->cqe.flags = IORING_CQE_F_NOTIF;
- req->flags |= REQ_F_NEED_CLEANUP;
+ req->flags |= REQ_F_NEED_CLEANUP | REQ_F_POLL_NO_LAZY;
zc->flags = READ_ONCE(sqe->ioprio);
if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) {
@@ -1335,11 +1307,6 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
}
}
- if (req->opcode != IORING_OP_SEND_ZC) {
- if (unlikely(sqe->addr2 || sqe->file_index))
- return -EINVAL;
- }
-
zc->len = READ_ONCE(sqe->len);
zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL | MSG_ZEROCOPY;
req->buf_index = READ_ONCE(sqe->buf_index);
@@ -1349,13 +1316,28 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (io_is_compat(req->ctx))
zc->msg_flags |= MSG_CMSG_COMPAT;
- if (unlikely(!io_msg_alloc_async(req)))
+ iomsg = io_msg_alloc_async(req);
+ if (unlikely(!iomsg))
return -ENOMEM;
+
if (req->opcode == IORING_OP_SEND_ZC) {
- req->flags |= REQ_F_IMPORT_BUFFER;
- return io_send_setup(req, sqe);
+ if (zc->flags & IORING_RECVSEND_FIXED_BUF)
+ req->flags |= REQ_F_IMPORT_BUFFER;
+ ret = io_send_setup(req, sqe);
+ } else {
+ if (unlikely(sqe->addr2 || sqe->file_index))
+ return -EINVAL;
+ ret = io_sendmsg_setup(req, sqe);
+ }
+ if (unlikely(ret))
+ return ret;
+
+ if (!(zc->flags & IORING_RECVSEND_FIXED_BUF)) {
+ iomsg->msg.sg_from_iter = io_sg_from_iter_iovec;
+ return io_notif_account_mem(zc->notif, iomsg->msg.msg_iter.count);
}
- return io_sendmsg_zc_setup(req, sqe);
+ iomsg->msg.sg_from_iter = io_sg_from_iter;
+ return 0;
}
static int io_sg_from_iter_iovec(struct sk_buff *skb,
@@ -1412,27 +1394,13 @@ static int io_send_zc_import(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_async_msghdr *kmsg = req->async_data;
- int ret;
- if (sr->flags & IORING_RECVSEND_FIXED_BUF) {
- sr->notif->buf_index = req->buf_index;
- ret = io_import_reg_buf(sr->notif, &kmsg->msg.msg_iter,
- (u64)(uintptr_t)sr->buf, sr->len,
- ITER_SOURCE, issue_flags);
- if (unlikely(ret))
- return ret;
- kmsg->msg.sg_from_iter = io_sg_from_iter;
- } else {
- ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &kmsg->msg.msg_iter);
- if (unlikely(ret))
- return ret;
- ret = io_notif_account_mem(sr->notif, sr->len);
- if (unlikely(ret))
- return ret;
- kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
- }
+ WARN_ON_ONCE(!(sr->flags & IORING_RECVSEND_FIXED_BUF));
- return ret;
+ sr->notif->buf_index = req->buf_index;
+ return io_import_reg_buf(sr->notif, &kmsg->msg.msg_iter,
+ (u64)(uintptr_t)sr->buf, sr->len,
+ ITER_SOURCE, issue_flags);
}
int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
@@ -1513,8 +1481,6 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
unsigned flags;
int ret, min_ret = 0;
- kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
-
if (req->flags & REQ_F_IMPORT_BUFFER) {
unsigned uvec_segs = kmsg->msg.msg_iter.nr_segs;
int ret;
@@ -1523,7 +1489,6 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
&kmsg->vec, uvec_segs, issue_flags);
if (unlikely(ret))
return ret;
- kmsg->msg.sg_from_iter = io_sg_from_iter;
req->flags &= ~REQ_F_IMPORT_BUFFER;
}
diff --git a/io_uring/refs.h b/io_uring/refs.h
index 63982ead9f7d..0d928d87c4ed 100644
--- a/io_uring/refs.h
+++ b/io_uring/refs.h
@@ -17,6 +17,13 @@ static inline bool req_ref_inc_not_zero(struct io_kiocb *req)
return atomic_inc_not_zero(&req->refs);
}
+static inline bool req_ref_put_and_test_atomic(struct io_kiocb *req)
+{
+ WARN_ON_ONCE(!(data_race(req->flags) & REQ_F_REFCOUNT));
+ WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
+ return atomic_dec_and_test(&req->refs);
+}
+
static inline bool req_ref_put_and_test(struct io_kiocb *req)
{
if (likely(!(req->flags & REQ_F_REFCOUNT)))
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index 3f195e24777e..5e64a8bb30a4 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -1002,20 +1002,33 @@ unlock:
}
EXPORT_SYMBOL_GPL(io_buffer_unregister_bvec);
-static int io_import_fixed(int ddir, struct iov_iter *iter,
- struct io_mapped_ubuf *imu,
- u64 buf_addr, size_t len)
+static int validate_fixed_range(u64 buf_addr, size_t len,
+ const struct io_mapped_ubuf *imu)
{
u64 buf_end;
- size_t offset;
- if (WARN_ON_ONCE(!imu))
- return -EFAULT;
if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
return -EFAULT;
/* not inside the mapped region */
if (unlikely(buf_addr < imu->ubuf || buf_end > (imu->ubuf + imu->len)))
return -EFAULT;
+ if (unlikely(len > MAX_RW_COUNT))
+ return -EFAULT;
+ return 0;
+}
+
+static int io_import_fixed(int ddir, struct iov_iter *iter,
+ struct io_mapped_ubuf *imu,
+ u64 buf_addr, size_t len)
+{
+ size_t offset;
+ int ret;
+
+ if (WARN_ON_ONCE(!imu))
+ return -EFAULT;
+ ret = validate_fixed_range(buf_addr, len, imu);
+ if (unlikely(ret))
+ return ret;
if (!(imu->dir & (1 << ddir)))
return -EFAULT;
@@ -1305,12 +1318,12 @@ static int io_vec_fill_bvec(int ddir, struct iov_iter *iter,
u64 buf_addr = (u64)(uintptr_t)iovec[iov_idx].iov_base;
struct bio_vec *src_bvec;
size_t offset;
- u64 buf_end;
+ int ret;
+
+ ret = validate_fixed_range(buf_addr, iov_len, imu);
+ if (unlikely(ret))
+ return ret;
- if (unlikely(check_add_overflow(buf_addr, (u64)iov_len, &buf_end)))
- return -EFAULT;
- if (unlikely(buf_addr < imu->ubuf || buf_end > (imu->ubuf + imu->len)))
- return -EFAULT;
if (unlikely(!iov_len))
return -EFAULT;
if (unlikely(check_add_overflow(total_len, iov_len, &total_len)))
@@ -1349,6 +1362,82 @@ static int io_estimate_bvec_size(struct iovec *iov, unsigned nr_iovs,
return max_segs;
}
+static int io_vec_fill_kern_bvec(int ddir, struct iov_iter *iter,
+ struct io_mapped_ubuf *imu,
+ struct iovec *iovec, unsigned nr_iovs,
+ struct iou_vec *vec)
+{
+ const struct bio_vec *src_bvec = imu->bvec;
+ struct bio_vec *res_bvec = vec->bvec;
+ unsigned res_idx = 0;
+ size_t total_len = 0;
+ unsigned iov_idx;
+
+ for (iov_idx = 0; iov_idx < nr_iovs; iov_idx++) {
+ size_t offset = (size_t)(uintptr_t)iovec[iov_idx].iov_base;
+ size_t iov_len = iovec[iov_idx].iov_len;
+ struct bvec_iter bi = {
+ .bi_size = offset + iov_len,
+ };
+ struct bio_vec bv;
+
+ bvec_iter_advance(src_bvec, &bi, offset);
+ for_each_mp_bvec(bv, src_bvec, bi, bi)
+ res_bvec[res_idx++] = bv;
+ total_len += iov_len;
+ }
+ iov_iter_bvec(iter, ddir, res_bvec, res_idx, total_len);
+ return 0;
+}
+
+static int iov_kern_bvec_size(const struct iovec *iov,
+ const struct io_mapped_ubuf *imu,
+ unsigned int *nr_seg)
+{
+ size_t offset = (size_t)(uintptr_t)iov->iov_base;
+ const struct bio_vec *bvec = imu->bvec;
+ int start = 0, i = 0;
+ size_t off = 0;
+ int ret;
+
+ ret = validate_fixed_range(offset, iov->iov_len, imu);
+ if (unlikely(ret))
+ return ret;
+
+ for (i = 0; off < offset + iov->iov_len && i < imu->nr_bvecs;
+ off += bvec[i].bv_len, i++) {
+ if (offset >= off && offset < off + bvec[i].bv_len)
+ start = i;
+ }
+ *nr_seg = i - start;
+ return 0;
+}
+
+static int io_kern_bvec_size(struct iovec *iov, unsigned nr_iovs,
+ struct io_mapped_ubuf *imu, unsigned *nr_segs)
+{
+ unsigned max_segs = 0;
+ size_t total_len = 0;
+ unsigned i;
+ int ret;
+
+ *nr_segs = 0;
+ for (i = 0; i < nr_iovs; i++) {
+ if (unlikely(!iov[i].iov_len))
+ return -EFAULT;
+ if (unlikely(check_add_overflow(total_len, iov[i].iov_len,
+ &total_len)))
+ return -EOVERFLOW;
+ ret = iov_kern_bvec_size(&iov[i], imu, &max_segs);
+ if (unlikely(ret))
+ return ret;
+ *nr_segs += max_segs;
+ }
+ if (total_len > MAX_RW_COUNT)
+ return -EINVAL;
+ return 0;
+}
+
int io_import_reg_vec(int ddir, struct iov_iter *iter,
struct io_kiocb *req, struct iou_vec *vec,
unsigned nr_iovs, unsigned issue_flags)
@@ -1363,14 +1452,20 @@ int io_import_reg_vec(int ddir, struct iov_iter *iter,
if (!node)
return -EFAULT;
imu = node->buf;
- if (imu->is_kbuf)
- return -EOPNOTSUPP;
if (!(imu->dir & (1 << ddir)))
return -EFAULT;
iovec_off = vec->nr - nr_iovs;
iov = vec->iovec + iovec_off;
- nr_segs = io_estimate_bvec_size(iov, nr_iovs, imu);
+
+ if (imu->is_kbuf) {
+ int ret = io_kern_bvec_size(iov, nr_iovs, imu, &nr_segs);
+
+ if (unlikely(ret))
+ return ret;
+ } else {
+ nr_segs = io_estimate_bvec_size(iov, nr_iovs, imu);
+ }
if (sizeof(struct bio_vec) > sizeof(struct iovec)) {
size_t bvec_bytes;
@@ -1397,6 +1492,9 @@ int io_import_reg_vec(int ddir, struct iov_iter *iter,
req->flags |= REQ_F_NEED_CLEANUP;
}
+ if (imu->is_kbuf)
+ return io_vec_fill_kern_bvec(ddir, iter, imu, iov, nr_iovs, vec);
+
return io_vec_fill_bvec(ddir, iter, imu, iov, nr_iovs, vec);
}
diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
index f2cfc371f3d0..a9ea7d29cdd9 100644
--- a/io_uring/uring_cmd.c
+++ b/io_uring/uring_cmd.c
@@ -205,8 +205,8 @@ static int io_uring_cmd_prep_setup(struct io_kiocb *req,
* that it doesn't read in per-op data, play it safe and ensure that
* any SQE data is stable beyond prep. This can later get relaxed.
*/
- memcpy(ac->data.sqes, sqe, uring_sqe_size(req->ctx));
- ioucmd->sqe = ac->data.sqes;
+ memcpy(ac->sqes, sqe, uring_sqe_size(req->ctx));
+ ioucmd->sqe = ac->sqes;
return 0;
}
@@ -307,17 +307,18 @@ static inline int io_uring_cmd_getsockopt(struct socket *sock,
struct io_uring_cmd *cmd,
unsigned int issue_flags)
{
+ const struct io_uring_sqe *sqe = cmd->sqe;
bool compat = !!(issue_flags & IO_URING_F_COMPAT);
int optlen, optname, level, err;
void __user *optval;
- level = READ_ONCE(cmd->sqe->level);
+ level = READ_ONCE(sqe->level);
if (level != SOL_SOCKET)
return -EOPNOTSUPP;
- optval = u64_to_user_ptr(READ_ONCE(cmd->sqe->optval));
- optname = READ_ONCE(cmd->sqe->optname);
- optlen = READ_ONCE(cmd->sqe->optlen);
+ optval = u64_to_user_ptr(READ_ONCE(sqe->optval));
+ optname = READ_ONCE(sqe->optname);
+ optlen = READ_ONCE(sqe->optlen);
err = do_sock_getsockopt(sock, compat, level, optname,
USER_SOCKPTR(optval),
@@ -333,15 +334,16 @@ static inline int io_uring_cmd_setsockopt(struct socket *sock,
struct io_uring_cmd *cmd,
unsigned int issue_flags)
{
+ const struct io_uring_sqe *sqe = cmd->sqe;
bool compat = !!(issue_flags & IO_URING_F_COMPAT);
int optname, optlen, level;
void __user *optval;
sockptr_t optval_s;
- optval = u64_to_user_ptr(READ_ONCE(cmd->sqe->optval));
- optname = READ_ONCE(cmd->sqe->optname);
- optlen = READ_ONCE(cmd->sqe->optlen);
- level = READ_ONCE(cmd->sqe->level);
+ optval = u64_to_user_ptr(READ_ONCE(sqe->optval));
+ optname = READ_ONCE(sqe->optname);
+ optlen = READ_ONCE(sqe->optlen);
+ level = READ_ONCE(sqe->level);
optval_s = USER_SOCKPTR(optval);
return do_sock_setsockopt(sock, compat, level, optname, optval_s,
diff --git a/io_uring/uring_cmd.h b/io_uring/uring_cmd.h
index 14e525255854..b04686b6b5d2 100644
--- a/io_uring/uring_cmd.h
+++ b/io_uring/uring_cmd.h
@@ -6,6 +6,7 @@
struct io_async_cmd {
struct io_uring_cmd_data data;
struct iou_vec vec;
+ struct io_uring_sqe sqes[2];
};
int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags);
diff --git a/io_uring/zcrx.c b/io_uring/zcrx.c
index 9c95b5b6ec4e..80d4a6f71d29 100644
--- a/io_uring/zcrx.c
+++ b/io_uring/zcrx.c
@@ -818,6 +818,14 @@ io_zcrx_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
int ret = 0;
len = min_t(size_t, len, desc->count);
+ /*
+ * __tcp_read_sock() always calls io_zcrx_recv_skb one last time, even
+ * if desc->count is already 0. This is caused by the if (offset + 1 !=
+ * skb->len) check. Return early in this case to break out of
+ * __tcp_read_sock().
+ */
+ if (!len)
+ return 0;
if (unlikely(args->nr_skbs++ > IO_SKBS_PER_CALL_LIMIT))
return -EAGAIN;
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index ce1bb2301c06..0b9495187fba 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -837,10 +837,6 @@ kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
{
struct kgdb_state kgdb_var;
struct kgdb_state *ks = &kgdb_var;
- int ret = 0;
-
- if (arch_kgdb_ops.enable_nmi)
- arch_kgdb_ops.enable_nmi(0);
/*
* Avoid entering the debugger if we were triggered due to an oops
* but panic_timeout indicates the system should automatically
@@ -858,15 +854,11 @@ kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
ks->linux_regs = regs;
if (kgdb_reenter_check(ks))
- goto out; /* Ouch, double exception ! */
+ return 0; /* Ouch, double exception ! */
if (kgdb_info[ks->cpu].enter_kgdb != 0)
- goto out;
+ return 0;
- ret = kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
-out:
- if (arch_kgdb_ops.enable_nmi)
- arch_kgdb_ops.enable_nmi(1);
- return ret;
+ return kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
}
NOKPROBE_SYMBOL(kgdb_handle_exception);
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
index 6a77f1c779c4..9b11b10b120c 100644
--- a/kernel/debug/kdb/kdb_io.c
+++ b/kernel/debug/kdb/kdb_io.c
@@ -334,7 +334,7 @@ poll_again:
*cp = '\0';
p_tmp = strrchr(buffer, ' ');
p_tmp = (p_tmp ? p_tmp + 1 : buffer);
- strscpy(tmpbuffer, p_tmp, sizeof(tmpbuffer));
+ strscpy(tmpbuffer, p_tmp);
*cp = tmp;
len = strlen(tmpbuffer);
@@ -452,7 +452,7 @@ poll_again:
char *kdb_getstr(char *buffer, size_t bufsize, const char *prompt)
{
if (prompt && kdb_prompt_str != prompt)
- strscpy(kdb_prompt_str, prompt, CMD_BUFLEN);
+ strscpy(kdb_prompt_str, prompt);
kdb_printf("%s", kdb_prompt_str);
kdb_nextline = 1; /* Prompt and input resets line number */
return kdb_read(buffer, bufsize);
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 5f4be507d79f..7a4d2d4689a5 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -25,7 +25,6 @@
#include <linux/smp.h>
#include <linux/utsname.h>
#include <linux/vmalloc.h>
-#include <linux/atomic.h>
#include <linux/moduleparam.h>
#include <linux/mm.h>
#include <linux/init.h>
@@ -105,7 +104,7 @@ static kdbmsg_t kdbmsgs[] = {
KDBMSG(NOENVVALUE, "Environment variable should have value"),
KDBMSG(NOTIMP, "Command not implemented"),
KDBMSG(ENVFULL, "Environment full"),
- KDBMSG(ENVBUFFULL, "Environment buffer full"),
+ KDBMSG(KMALLOCFAILED, "Failed to allocate memory"),
KDBMSG(TOOMANYBPT, "Too many breakpoints defined"),
#ifdef CONFIG_CPU_XSCALE
KDBMSG(TOOMANYDBREGS, "More breakpoints than ibcr registers defined"),
@@ -130,13 +129,9 @@ static const int __nkdb_err = ARRAY_SIZE(kdbmsgs);
/*
- * Initial environment. This is all kept static and local to
- * this file. We don't want to rely on the memory allocation
- * mechanisms in the kernel, so we use a very limited allocate-only
- * heap for new and altered environment variables. The entire
- * environment is limited to a fixed number of entries (add more
- * to __env[] if required) and a fixed amount of heap (add more to
- * KDB_ENVBUFSIZE if required).
+ * Initial environment. This is all kept static and local to this file.
+ * The entire environment is limited to a fixed number of entries
+ * (add more to __env[] if required)
*/
static char *__env[31] = {
@@ -259,35 +254,6 @@ char *kdbgetenv(const char *match)
}
/*
- * kdballocenv - This function is used to allocate bytes for
- * environment entries.
- * Parameters:
- * bytes The number of bytes to allocate in the static buffer.
- * Returns:
- * A pointer to the allocated space in the buffer on success.
- * NULL if bytes > size available in the envbuffer.
- * Remarks:
- * We use a static environment buffer (envbuffer) to hold the values
- * of dynamically generated environment variables (see kdb_set). Buffer
- * space once allocated is never free'd, so over time, the amount of space
- * (currently 512 bytes) will be exhausted if env variables are changed
- * frequently.
- */
-static char *kdballocenv(size_t bytes)
-{
-#define KDB_ENVBUFSIZE 512
- static char envbuffer[KDB_ENVBUFSIZE];
- static int envbufsize;
- char *ep = NULL;
-
- if ((KDB_ENVBUFSIZE - envbufsize) >= bytes) {
- ep = &envbuffer[envbufsize];
- envbufsize += bytes;
- }
- return ep;
-}
-
-/*
* kdbgetulenv - This function will return the value of an unsigned
* long-valued environment variable.
* Parameters:
@@ -348,9 +314,9 @@ static int kdb_setenv(const char *var, const char *val)
varlen = strlen(var);
vallen = strlen(val);
- ep = kdballocenv(varlen + vallen + 2);
- if (ep == (char *)0)
- return KDB_ENVBUFFULL;
+ ep = kmalloc(varlen + vallen + 2, GFP_KDB);
+ if (!ep)
+ return KDB_KMALLOCFAILED;
sprintf(ep, "%s=%s", var, val);
@@ -359,6 +325,7 @@ static int kdb_setenv(const char *var, const char *val)
&& ((strncmp(__env[i], var, varlen) == 0)
&& ((__env[i][varlen] == '\0')
|| (__env[i][varlen] == '=')))) {
+ kfree_const(__env[i]);
__env[i] = ep;
return 0;
}
@@ -2119,32 +2086,6 @@ static int kdb_dmesg(int argc, const char **argv)
return 0;
}
#endif /* CONFIG_PRINTK */
-
-/* Make sure we balance enable/disable calls, must disable first. */
-static atomic_t kdb_nmi_disabled;
-
-static int kdb_disable_nmi(int argc, const char *argv[])
-{
- if (atomic_read(&kdb_nmi_disabled))
- return 0;
- atomic_set(&kdb_nmi_disabled, 1);
- arch_kgdb_ops.enable_nmi(0);
- return 0;
-}
-
-static int kdb_param_enable_nmi(const char *val, const struct kernel_param *kp)
-{
- if (!atomic_add_unless(&kdb_nmi_disabled, -1, 0))
- return -EINVAL;
- arch_kgdb_ops.enable_nmi(1);
- return 0;
-}
-
-static const struct kernel_param_ops kdb_param_ops_enable_nmi = {
- .set = kdb_param_enable_nmi,
-};
-module_param_cb(enable_nmi, &kdb_param_ops_enable_nmi, NULL, 0600);
-
/*
* kdb_cpu - This function implements the 'cpu' command.
* cpu [<cpunum>]
@@ -2836,20 +2777,10 @@ static kdbtab_t maintab[] = {
},
};
-static kdbtab_t nmicmd = {
- .name = "disable_nmi",
- .func = kdb_disable_nmi,
- .usage = "",
- .help = "Disable NMI entry to KDB",
- .flags = KDB_ENABLE_ALWAYS_SAFE,
-};
-
/* Initialize the kdb command table. */
static void __init kdb_inittab(void)
{
kdb_register_table(maintab, ARRAY_SIZE(maintab));
- if (arch_kgdb_ops.enable_nmi)
- kdb_register_table(&nmicmd, 1);
}
/* Execute any commands defined in kdb_cmds. */
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 2746791ce1e2..615b4e6d22c7 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -1703,7 +1703,8 @@ static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
}
vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE,
- VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO,
+ VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO|
+ VM_SEALED_SYSMAP,
&xol_mapping);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
diff --git a/kernel/exit.c b/kernel/exit.c
index c2e6c7b7779f..1b51dc099f1e 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -268,6 +268,9 @@ repeat:
leader = p->group_leader;
if (leader != p && thread_group_empty(leader)
&& leader->exit_state == EXIT_ZOMBIE) {
+ /* for pidfs_exit() and do_notify_parent() */
+ if (leader->signal->flags & SIGNAL_GROUP_EXIT)
+ leader->exit_code = leader->signal->group_exit_code;
/*
* If we were the last child thread and the leader has
* exited already, and the leader's parent ignores SIGCHLD,
@@ -756,12 +759,6 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
kill_orphaned_pgrp(tsk->group_leader, NULL);
tsk->exit_state = EXIT_ZOMBIE;
- /*
- * Ignore thread-group leaders that exited before all
- * subthreads did.
- */
- if (!delay_group_leader(tsk))
- do_notify_pidfd(tsk);
if (unlikely(tsk->ptrace)) {
int sig = thread_group_leader(tsk) &&
@@ -774,6 +771,8 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
do_notify_parent(tsk, tsk->exit_signal);
} else {
autoreap = true;
+ /* untraced sub-thread */
+ do_notify_pidfd(tsk);
}
if (autoreap) {
diff --git a/kernel/panic.c b/kernel/panic.c
index 0c55eec9e874..a3889f38153d 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -833,9 +833,15 @@ device_initcall(register_warn_debugfs);
*/
__visible noinstr void __stack_chk_fail(void)
{
+ unsigned long flags;
+
instrumentation_begin();
+ flags = user_access_save();
+
panic("stack-protector: Kernel stack is corrupted in: %pB",
__builtin_return_address(0));
+
+ user_access_restore(flags);
instrumentation_end();
}
EXPORT_SYMBOL(__stack_chk_fail);
diff --git a/kernel/rcu/Kconfig b/kernel/rcu/Kconfig
index aa42de4d2768..4d9b21f69eaa 100644
--- a/kernel/rcu/Kconfig
+++ b/kernel/rcu/Kconfig
@@ -68,6 +68,8 @@ config TREE_SRCU
config FORCE_NEED_SRCU_NMI_SAFE
bool "Force selection of NEED_SRCU_NMI_SAFE"
depends on !TINY_SRCU
+ depends on RCU_EXPERT
+ depends on ARCH_HAS_NMI_SAFE_THIS_CPU_OPS
select NEED_SRCU_NMI_SAFE
default n
help
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index 21575d39c376..66bcd40a28ca 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -4171,8 +4171,8 @@ static struct scx_dispatch_q *create_dsq(u64 dsq_id, int node)
init_dsq(dsq, dsq_id);
- ret = rhashtable_insert_fast(&dsq_hash, &dsq->hash_node,
- dsq_hash_params);
+ ret = rhashtable_lookup_insert_fast(&dsq_hash, &dsq->hash_node,
+ dsq_hash_params);
if (ret) {
kfree(dsq);
return ERR_PTR(ret);
@@ -5361,6 +5361,8 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
*/
cpus_read_lock();
+ scx_idle_enable(ops);
+
if (scx_ops.init) {
ret = SCX_CALL_OP_RET(SCX_KF_UNLOCKED, init);
if (ret) {
@@ -5427,8 +5429,6 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
if (scx_ops.cpu_acquire || scx_ops.cpu_release)
static_branch_enable(&scx_ops_cpu_preempt);
- scx_idle_enable(ops);
-
/*
* Lock out forks, cgroup on/offlining and moves before opening the
* floodgate so that they don't wander into the operations prematurely.
diff --git a/kernel/sched/ext_idle.c b/kernel/sched/ext_idle.c
index 52c36a70a3d0..cb343ca889e0 100644
--- a/kernel/sched/ext_idle.c
+++ b/kernel/sched/ext_idle.c
@@ -544,7 +544,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64
* core.
*/
if (flags & SCX_PICK_IDLE_CORE) {
- cpu = prev_cpu;
+ cpu = -EBUSY;
goto out_unlock;
}
}
@@ -584,8 +584,6 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, u64
* increasing distance.
*/
cpu = scx_pick_idle_cpu(p->cpus_ptr, node, flags);
- if (cpu >= 0)
- goto out_unlock;
out_unlock:
rcu_read_unlock();
@@ -723,14 +721,14 @@ static void reset_idle_masks(struct sched_ext_ops *ops)
void scx_idle_enable(struct sched_ext_ops *ops)
{
if (!ops->update_idle || (ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE))
- static_branch_enable(&scx_builtin_idle_enabled);
+ static_branch_enable_cpuslocked(&scx_builtin_idle_enabled);
else
- static_branch_disable(&scx_builtin_idle_enabled);
+ static_branch_disable_cpuslocked(&scx_builtin_idle_enabled);
if (ops->flags & SCX_OPS_BUILTIN_IDLE_PER_NODE)
- static_branch_enable(&scx_builtin_idle_per_node);
+ static_branch_enable_cpuslocked(&scx_builtin_idle_per_node);
else
- static_branch_disable(&scx_builtin_idle_per_node);
+ static_branch_disable_cpuslocked(&scx_builtin_idle_per_node);
#ifdef CONFIG_SMP
reset_idle_masks(ops);
diff --git a/kernel/signal.c b/kernel/signal.c
index 614d78fe3451..f8859faa26c5 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2180,11 +2180,9 @@ bool do_notify_parent(struct task_struct *tsk, int sig)
WARN_ON_ONCE(!tsk->ptrace &&
(tsk->group_leader != tsk || !thread_group_empty(tsk)));
- /*
- * Notify for thread-group leaders without subthreads.
- */
- if (thread_group_empty(tsk))
- do_notify_pidfd(tsk);
+
+ /* ptraced, or group-leader without sub-threads */
+ do_notify_pidfd(tsk);
if (sig != SIGCHLD) {
/*
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 033fba0633cf..a3f35c7d83b6 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -265,8 +265,7 @@ config FUNCTION_GRAPH_RETADDR
config FUNCTION_TRACE_ARGS
bool
- depends on HAVE_FUNCTION_ARG_ACCESS_API
- depends on DEBUG_INFO_BTF
+ depends on PROBE_EVENTS_BTF_ARGS
default y
help
If supported with function argument access API and BTF, then
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 92015de6203d..1a48aedb5255 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -6855,6 +6855,7 @@ ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer)
}
}
}
+ cond_resched();
} while_for_each_ftrace_rec();
return fail ? -EINVAL : 0;
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index d8d7b28e2c2f..c0f877d39a24 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -6016,7 +6016,7 @@ static void rb_update_meta_page(struct ring_buffer_per_cpu *cpu_buffer)
meta->read = cpu_buffer->read;
/* Some archs do not have data cache coherency between kernel and user-space */
- flush_dcache_folio(virt_to_folio(cpu_buffer->meta_page));
+ flush_kernel_vmap_range(cpu_buffer->meta_page, PAGE_SIZE);
}
static void
@@ -7319,7 +7319,8 @@ consume:
out:
/* Some archs do not have data cache coherency between kernel and user-space */
- flush_dcache_folio(virt_to_folio(cpu_buffer->reader_page->page));
+ flush_kernel_vmap_range(cpu_buffer->reader_page->page,
+ buffer->subbuf_size + BUF_PAGE_HDR_SIZE);
rb_update_meta_page(cpu_buffer);
diff --git a/kernel/trace/rv/rv.c b/kernel/trace/rv/rv.c
index 50344aa9f7f9..968c5c3b0246 100644
--- a/kernel/trace/rv/rv.c
+++ b/kernel/trace/rv/rv.c
@@ -809,7 +809,8 @@ int rv_register_monitor(struct rv_monitor *monitor, struct rv_monitor *parent)
if (p && rv_is_nested_monitor(p)) {
pr_info("Parent monitor %s is already nested, cannot nest further\n",
parent->name);
- return -EINVAL;
+ retval = -EINVAL;
+ goto out_unlock;
}
r = kzalloc(sizeof(struct rv_monitor_def), GFP_KERNEL);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 103b193875b3..b581e388a9d9 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -50,6 +50,7 @@
#include <linux/irq_work.h>
#include <linux/workqueue.h>
#include <linux/sort.h>
+#include <linux/io.h> /* vmap_page_range() */
#include <asm/setup.h> /* COMMAND_LINE_SIZE */
@@ -3341,10 +3342,9 @@ out_nobuffer:
}
EXPORT_SYMBOL_GPL(trace_vbprintk);
-__printf(3, 0)
-static int
-__trace_array_vprintk(struct trace_buffer *buffer,
- unsigned long ip, const char *fmt, va_list args)
+static __printf(3, 0)
+int __trace_array_vprintk(struct trace_buffer *buffer,
+ unsigned long ip, const char *fmt, va_list args)
{
struct ring_buffer_event *event;
int len = 0, size;
@@ -3394,7 +3394,6 @@ out_nobuffer:
return len;
}
-__printf(3, 0)
int trace_array_vprintk(struct trace_array *tr,
unsigned long ip, const char *fmt, va_list args)
{
@@ -3424,7 +3423,6 @@ int trace_array_vprintk(struct trace_array *tr,
* Note, trace_array_init_printk() must be called on @tr before this
* can be used.
*/
-__printf(3, 0)
int trace_array_printk(struct trace_array *tr,
unsigned long ip, const char *fmt, ...)
{
@@ -3469,7 +3467,6 @@ int trace_array_init_printk(struct trace_array *tr)
}
EXPORT_SYMBOL_GPL(trace_array_init_printk);
-__printf(3, 4)
int trace_array_printk_buf(struct trace_buffer *buffer,
unsigned long ip, const char *fmt, ...)
{
@@ -3485,7 +3482,6 @@ int trace_array_printk_buf(struct trace_buffer *buffer,
return ret;
}
-__printf(2, 0)
int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
{
return trace_array_vprintk(printk_trace, ip, fmt, args);
@@ -8505,6 +8501,10 @@ static int tracing_buffers_mmap(struct file *filp, struct vm_area_struct *vma)
struct trace_iterator *iter = &info->iter;
int ret = 0;
+ /* A memmap'ed buffer is not supported for user space mmap */
+ if (iter->tr->flags & TRACE_ARRAY_FL_MEMMAP)
+ return -ENODEV;
+
/* Currently the boot mapped buffer is not supported for mmap */
if (iter->tr->flags & TRACE_ARRAY_FL_BOOT)
return -ENODEV;
@@ -9609,13 +9609,11 @@ static void free_trace_buffers(struct trace_array *tr)
return;
free_trace_buffer(&tr->array_buffer);
+ kfree(tr->module_delta);
#ifdef CONFIG_TRACER_MAX_TRACE
free_trace_buffer(&tr->max_buffer);
#endif
-
- if (tr->range_addr_start)
- vunmap((void *)tr->range_addr_start);
}
static void init_trace_flags_index(struct trace_array *tr)
@@ -9808,29 +9806,27 @@ static int instance_mkdir(const char *name)
return ret;
}
-static u64 map_pages(u64 start, u64 size)
+static u64 map_pages(unsigned long start, unsigned long size)
{
- struct page **pages;
- phys_addr_t page_start;
- unsigned int page_count;
- unsigned int i;
- void *vaddr;
-
- page_count = DIV_ROUND_UP(size, PAGE_SIZE);
+ unsigned long vmap_start, vmap_end;
+ struct vm_struct *area;
+ int ret;
- page_start = start;
- pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL);
- if (!pages)
+ area = get_vm_area(size, VM_IOREMAP);
+ if (!area)
return 0;
- for (i = 0; i < page_count; i++) {
- phys_addr_t addr = page_start + i * PAGE_SIZE;
- pages[i] = pfn_to_page(addr >> PAGE_SHIFT);
+ vmap_start = (unsigned long) area->addr;
+ vmap_end = vmap_start + size;
+
+ ret = vmap_page_range(vmap_start, vmap_end,
+ start, pgprot_nx(PAGE_KERNEL));
+ if (ret < 0) {
+ free_vm_area(area);
+ return 0;
}
- vaddr = vmap(pages, page_count, VM_MAP, PAGE_KERNEL);
- kfree(pages);
- return (u64)(unsigned long)vaddr;
+ return (u64)vmap_start;
}
/**
@@ -10709,6 +10705,7 @@ static inline void do_allocate_snapshot(const char *name) { }
__init static void enable_instances(void)
{
struct trace_array *tr;
+ bool memmap_area = false;
char *curr_str;
char *name;
char *str;
@@ -10777,6 +10774,7 @@ __init static void enable_instances(void)
name);
continue;
}
+ memmap_area = true;
} else if (tok) {
if (!reserve_mem_find_by_name(tok, &start, &size)) {
start = 0;
@@ -10787,7 +10785,20 @@ __init static void enable_instances(void)
}
if (start) {
- addr = map_pages(start, size);
+ /* Start and size must be page aligned */
+ if (start & ~PAGE_MASK) {
+ pr_warn("Tracing: mapping start addr %pa is not page aligned\n", &start);
+ continue;
+ }
+ if (size & ~PAGE_MASK) {
+ pr_warn("Tracing: mapping size %pa is not page aligned\n", &size);
+ continue;
+ }
+
+ if (memmap_area)
+ addr = map_pages(start, size);
+ else
+ addr = (unsigned long)phys_to_virt(start);
if (addr) {
pr_info("Tracing: mapped boot instance %s at physical memory %pa of size 0x%lx\n",
name, &start, (unsigned long)size);
@@ -10814,10 +10825,13 @@ __init static void enable_instances(void)
update_printk_trace(tr);
/*
- * If start is set, then this is a mapped buffer, and
- * cannot be deleted by user space, so keep the reference
- * to it.
+ * memmap'd buffers can not be freed.
*/
+ if (memmap_area) {
+ tr->flags |= TRACE_ARRAY_FL_MEMMAP;
+ tr->ref++;
+ }
+
if (start) {
tr->flags |= TRACE_ARRAY_FL_BOOT | TRACE_ARRAY_FL_LAST_BOOT;
tr->range_name = no_free_ptr(rname);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index c20f6bcc200a..79be1995db44 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -447,6 +447,7 @@ enum {
TRACE_ARRAY_FL_BOOT = BIT(1),
TRACE_ARRAY_FL_LAST_BOOT = BIT(2),
TRACE_ARRAY_FL_MOD_INIT = BIT(3),
+ TRACE_ARRAY_FL_MEMMAP = BIT(4),
};
#ifdef CONFIG_MODULES
@@ -852,13 +853,15 @@ static inline void __init disable_tracing_selftest(const char *reason)
extern void *head_page(struct trace_array_cpu *data);
extern unsigned long long ns2usecs(u64 nsec);
-extern int
-trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
-extern int
-trace_vprintk(unsigned long ip, const char *fmt, va_list args);
-extern int
-trace_array_vprintk(struct trace_array *tr,
- unsigned long ip, const char *fmt, va_list args);
+
+__printf(2, 0)
+int trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
+__printf(2, 0)
+int trace_vprintk(unsigned long ip, const char *fmt, va_list args);
+__printf(3, 0)
+int trace_array_vprintk(struct trace_array *tr,
+ unsigned long ip, const char *fmt, va_list args);
+__printf(3, 4)
int trace_array_printk_buf(struct trace_buffer *buffer,
unsigned long ip, const char *fmt, ...);
void trace_printk_seq(struct trace_seq *s);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 8638b7f7ff85..069e92856bda 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -470,6 +470,7 @@ static void test_event_printk(struct trace_event_call *call)
case '%':
continue;
case 'p':
+ do_pointer:
/* Find dereferencing fields */
switch (fmt[i + 1]) {
case 'B': case 'R': case 'r':
@@ -498,6 +499,12 @@ static void test_event_printk(struct trace_event_call *call)
continue;
if (fmt[i + j] == '*') {
star = true;
+ /* Handle %*pbl case */
+ if (!j && fmt[i + 1] == 'p') {
+ arg++;
+ i++;
+ goto do_pointer;
+ }
continue;
}
if ((fmt[i + j] == 's')) {
diff --git a/lib/sg_split.c b/lib/sg_split.c
index 60a0babebf2e..0f89aab5c671 100644
--- a/lib/sg_split.c
+++ b/lib/sg_split.c
@@ -88,8 +88,6 @@ static void sg_split_phys(struct sg_splitter *splitters, const int nb_splits)
if (!j) {
out_sg->offset += split->skip_sg0;
out_sg->length -= split->skip_sg0;
- } else {
- out_sg->offset = 0;
}
sg_dma_address(out_sg) = 0;
sg_dma_len(out_sg) = 0;
diff --git a/lib/sort.c b/lib/sort.c
index 8e73dc55476b..52363995ccc5 100644
--- a/lib/sort.c
+++ b/lib/sort.c
@@ -186,36 +186,13 @@ static size_t parent(size_t i, unsigned int lsbit, size_t size)
return i / 2;
}
-/**
- * sort_r - sort an array of elements
- * @base: pointer to data to sort
- * @num: number of elements
- * @size: size of each element
- * @cmp_func: pointer to comparison function
- * @swap_func: pointer to swap function or NULL
- * @priv: third argument passed to comparison function
- *
- * This function does a heapsort on the given array. You may provide
- * a swap_func function if you need to do something more than a memory
- * copy (e.g. fix up pointers or auxiliary data), but the built-in swap
- * avoids a slow retpoline and so is significantly faster.
- *
- * The comparison function must adhere to specific mathematical
- * properties to ensure correct and stable sorting:
- * - Antisymmetry: cmp_func(a, b) must return the opposite sign of
- * cmp_func(b, a).
- * - Transitivity: if cmp_func(a, b) <= 0 and cmp_func(b, c) <= 0, then
- * cmp_func(a, c) <= 0.
- *
- * Sorting time is O(n log n) both on average and worst-case. While
- * quicksort is slightly faster on average, it suffers from exploitable
- * O(n*n) worst-case behavior and extra memory requirements that make
- * it less suitable for kernel use.
- */
-void sort_r(void *base, size_t num, size_t size,
- cmp_r_func_t cmp_func,
- swap_r_func_t swap_func,
- const void *priv)
+#include <linux/sched.h>
+
+static void __sort_r(void *base, size_t num, size_t size,
+ cmp_r_func_t cmp_func,
+ swap_r_func_t swap_func,
+ const void *priv,
+ bool may_schedule)
{
/* pre-scale counters for performance */
size_t n = num * size, a = (num/2) * size;
@@ -286,6 +263,9 @@ void sort_r(void *base, size_t num, size_t size,
b = parent(b, lsbit, size);
do_swap(base + b, base + c, size, swap_func, priv);
}
+
+ if (may_schedule)
+ cond_resched();
}
n -= size;
@@ -293,8 +273,63 @@ void sort_r(void *base, size_t num, size_t size,
if (n == size * 2 && do_cmp(base, base + size, cmp_func, priv) > 0)
do_swap(base, base + size, size, swap_func, priv);
}
+
+/**
+ * sort_r - sort an array of elements
+ * @base: pointer to data to sort
+ * @num: number of elements
+ * @size: size of each element
+ * @cmp_func: pointer to comparison function
+ * @swap_func: pointer to swap function or NULL
+ * @priv: third argument passed to comparison function
+ *
+ * This function does a heapsort on the given array. You may provide
+ * a swap_func function if you need to do something more than a memory
+ * copy (e.g. fix up pointers or auxiliary data), but the built-in swap
+ * avoids a slow retpoline and so is significantly faster.
+ *
+ * The comparison function must adhere to specific mathematical
+ * properties to ensure correct and stable sorting:
+ * - Antisymmetry: cmp_func(a, b) must return the opposite sign of
+ * cmp_func(b, a).
+ * - Transitivity: if cmp_func(a, b) <= 0 and cmp_func(b, c) <= 0, then
+ * cmp_func(a, c) <= 0.
+ *
+ * Sorting time is O(n log n) both on average and worst-case. While
+ * quicksort is slightly faster on average, it suffers from exploitable
+ * O(n*n) worst-case behavior and extra memory requirements that make
+ * it less suitable for kernel use.
+ */
+void sort_r(void *base, size_t num, size_t size,
+ cmp_r_func_t cmp_func,
+ swap_r_func_t swap_func,
+ const void *priv)
+{
+ __sort_r(base, num, size, cmp_func, swap_func, priv, false);
+}
EXPORT_SYMBOL(sort_r);
+/**
+ * sort_r_nonatomic - sort an array of elements, with cond_resched
+ * @base: pointer to data to sort
+ * @num: number of elements
+ * @size: size of each element
+ * @cmp_func: pointer to comparison function
+ * @swap_func: pointer to swap function or NULL
+ * @priv: third argument passed to comparison function
+ *
+ * Same as sort_r, but preferred for larger arrays as it does a periodic
+ * cond_resched().
+ */
+void sort_r_nonatomic(void *base, size_t num, size_t size,
+ cmp_r_func_t cmp_func,
+ swap_r_func_t swap_func,
+ const void *priv)
+{
+ __sort_r(base, num, size, cmp_func, swap_func, priv, true);
+}
+EXPORT_SYMBOL(sort_r_nonatomic);
+
void sort(void *base, size_t num, size_t size,
cmp_func_t cmp_func,
swap_func_t swap_func)
@@ -304,6 +339,19 @@ void sort(void *base, size_t num, size_t size,
.swap = swap_func,
};
- return sort_r(base, num, size, _CMP_WRAPPER, SWAP_WRAPPER, &w);
+ return __sort_r(base, num, size, _CMP_WRAPPER, SWAP_WRAPPER, &w, false);
}
EXPORT_SYMBOL(sort);
+
+void sort_nonatomic(void *base, size_t num, size_t size,
+ cmp_func_t cmp_func,
+ swap_func_t swap_func)
+{
+ struct wrapper w = {
+ .cmp = cmp_func,
+ .swap = swap_func,
+ };
+
+ return __sort_r(base, num, size, _CMP_WRAPPER, SWAP_WRAPPER, &w, true);
+}
+EXPORT_SYMBOL(sort_nonatomic);
diff --git a/lib/vdso/datastore.c b/lib/vdso/datastore.c
index c715e217ec65..3693c6caf2c4 100644
--- a/lib/vdso/datastore.c
+++ b/lib/vdso/datastore.c
@@ -99,7 +99,8 @@ const struct vm_special_mapping vdso_vvar_mapping = {
struct vm_area_struct *vdso_install_vvar_mapping(struct mm_struct *mm, unsigned long addr)
{
return _install_special_mapping(mm, addr, VDSO_NR_PAGES * PAGE_SIZE,
- VM_READ | VM_MAYREAD | VM_IO | VM_DONTDUMP | VM_PFNMAP,
+ VM_READ | VM_MAYREAD | VM_IO | VM_DONTDUMP |
+ VM_PFNMAP | VM_SEALED_SYSMAP,
&vdso_vvar_mapping);
}
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index abf40eb36c49..01699852f30c 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -1699,8 +1699,12 @@ char *escaped_string(char *buf, char *end, u8 *addr, struct printf_spec spec,
return buf;
}
+#pragma GCC diagnostic push
+#ifndef __clang__
+#pragma GCC diagnostic ignored "-Wsuggest-attribute=format"
+#endif
static char *va_format(char *buf, char *end, struct va_format *va_fmt,
- struct printf_spec spec, const char *fmt)
+ struct printf_spec spec)
{
va_list va;
@@ -1713,6 +1717,7 @@ static char *va_format(char *buf, char *end, struct va_format *va_fmt,
return buf;
}
+#pragma GCC diagnostic pop
static noinline_for_stack
char *uuid_string(char *buf, char *end, const u8 *addr,
@@ -2466,7 +2471,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
case 'U':
return uuid_string(buf, end, ptr, spec, fmt);
case 'V':
- return va_format(buf, end, ptr, spec, fmt);
+ return va_format(buf, end, ptr, spec);
case 'K':
return restricted_pointer(buf, end, ptr, spec);
case 'N':
diff --git a/mm/damon/core.c b/mm/damon/core.c
index fc1eba3da419..f0c1676f0599 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -76,14 +76,13 @@ int damon_register_ops(struct damon_operations *ops)
if (ops->id >= NR_DAMON_OPS)
return -EINVAL;
+
mutex_lock(&damon_ops_lock);
/* Fail for already registered ops */
- if (__damon_is_registered_ops(ops->id)) {
+ if (__damon_is_registered_ops(ops->id))
err = -EINVAL;
- goto out;
- }
- damon_registered_ops[ops->id] = *ops;
-out:
+ else
+ damon_registered_ops[ops->id] = *ops;
mutex_unlock(&damon_ops_lock);
return err;
}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 6fccfe6d046c..39f92aad7bd1 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5179,7 +5179,7 @@ static const struct ctl_table hugetlb_table[] = {
},
};
-static void hugetlb_sysctl_init(void)
+static void __init hugetlb_sysctl_init(void)
{
register_sysctl_init("vm", hugetlb_table);
}
diff --git a/mm/kasan/kasan_test_c.c b/mm/kasan/kasan_test_c.c
index 59d673400085..3ea317837c2d 100644
--- a/mm/kasan/kasan_test_c.c
+++ b/mm/kasan/kasan_test_c.c
@@ -1073,14 +1073,11 @@ static void kmem_cache_rcu_uaf(struct kunit *test)
kmem_cache_destroy(cache);
}
-static void empty_cache_ctor(void *object) { }
-
static void kmem_cache_double_destroy(struct kunit *test)
{
struct kmem_cache *cache;
- /* Provide a constructor to prevent cache merging. */
- cache = kmem_cache_create("test_cache", 200, 0, 0, empty_cache_ctor);
+ cache = kmem_cache_create("test_cache", 200, 0, SLAB_NO_MERGE, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
kmem_cache_destroy(cache);
KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_destroy(cache));
diff --git a/mm/memblock.c b/mm/memblock.c
index 284154445409..0a53db4d9f7b 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -2167,6 +2167,9 @@ static unsigned long __init __free_memory_core(phys_addr_t start,
unsigned long start_pfn = PFN_UP(start);
unsigned long end_pfn = PFN_DOWN(end);
+ if (!IS_ENABLED(CONFIG_HIGHMEM) && end_pfn > max_low_pfn)
+ end_pfn = max_low_pfn;
+
if (start_pfn >= end_pfn)
return 0;
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 75401866fb76..8305483de38b 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1813,21 +1813,15 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
page = pfn_to_page(pfn);
folio = page_folio(page);
- /*
- * No reference or lock is held on the folio, so it might
- * be modified concurrently (e.g. split). As such,
- * folio_nr_pages() may read garbage. This is fine as the outer
- * loop will revisit the split folio later.
- */
- if (folio_test_large(folio))
- pfn = folio_pfn(folio) + folio_nr_pages(folio) - 1;
-
if (!folio_try_get(folio))
continue;
if (unlikely(page_folio(page) != folio))
goto put_folio;
+ if (folio_test_large(folio))
+ pfn = folio_pfn(folio) + folio_nr_pages(folio) - 1;
+
if (folio_contain_hwpoisoned_page(folio)) {
if (WARN_ON(folio_test_lru(folio)))
folio_isolate_lru(folio);
diff --git a/mm/mm_init.c b/mm/mm_init.c
index a38a1909b407..84f14fa12d0d 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -984,19 +984,19 @@ static void __init memmap_init(void)
}
}
-#ifdef CONFIG_SPARSEMEM
/*
* Initialize the memory map for hole in the range [memory_end,
- * section_end].
+ * section_end] for SPARSEMEM and in the range [memory_end, memmap_end]
+ * for FLATMEM.
* Append the pages in this hole to the highest zone in the last
* node.
- * The call to init_unavailable_range() is outside the ifdef to
- * silence the compiler warining about zone_id set but not used;
- * for FLATMEM it is a nop anyway
*/
+#ifdef CONFIG_SPARSEMEM
end_pfn = round_up(end_pfn, PAGES_PER_SECTION);
- if (hole_pfn < end_pfn)
+#else
+ end_pfn = round_up(end_pfn, MAX_ORDER_NR_PAGES);
#endif
+ if (hole_pfn < end_pfn)
init_unavailable_range(hole_pfn, end_pfn, zone_id, nid);
}
diff --git a/mm/mremap.c b/mm/mremap.c
index 0865387531ed..7db9da609c84 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -1561,11 +1561,12 @@ static unsigned long expand_vma_in_place(struct vma_remap_struct *vrm)
* adjacent to the expanded vma and otherwise
* compatible.
*/
- vma = vrm->vma = vma_merge_extend(&vmi, vma, vrm->delta);
+ vma = vma_merge_extend(&vmi, vma, vrm->delta);
if (!vma) {
vrm_uncharge(vrm);
return -ENOMEM;
}
+ vrm->vma = vma;
vrm_stat_account(vrm, vrm->delta);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f51aa6051a99..fd6b865cb1ab 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1593,7 +1593,7 @@ static __always_inline void page_del_and_expand(struct zone *zone,
static void check_new_page_bad(struct page *page)
{
- if (unlikely(page->flags & __PG_HWPOISON)) {
+ if (unlikely(PageHWPoison(page))) {
/* Don't complain about hwpoisoned pages */
if (PageBuddy(page))
__ClearPageBuddy(page);
@@ -4604,8 +4604,8 @@ retry:
goto retry;
/* Reclaim/compaction failed to prevent the fallback */
- if (defrag_mode) {
- alloc_flags &= ALLOC_NOFRAGMENT;
+ if (defrag_mode && (alloc_flags & ALLOC_NOFRAGMENT)) {
+ alloc_flags &= ~ALLOC_NOFRAGMENT;
goto retry;
}
@@ -7385,6 +7385,9 @@ struct page *try_alloc_pages_noprof(int nid, unsigned int order)
/* Unlike regular alloc_pages() there is no __alloc_pages_slowpath(). */
+ if (page)
+ set_page_refcounted(page);
+
if (memcg_kmem_online() && page &&
unlikely(__memcg_kmem_charge_page(page, alloc_gfp, order) != 0)) {
free_pages_nolock(page, order);
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index a051a29e95ad..b2fc5266e3d2 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -83,7 +83,14 @@ static struct page *has_unmovable_pages(unsigned long start_pfn, unsigned long e
unsigned int skip_pages;
if (PageHuge(page)) {
- if (!hugepage_migration_supported(folio_hstate(folio)))
+ struct hstate *h;
+
+ /*
+ * The huge page may be freed so can not
+ * use folio_hstate() directly.
+ */
+ h = size_to_hstate(folio_size(folio));
+ if (h && !hugepage_migration_supported(h))
return page;
} else if (!folio_test_lru(folio) && !__folio_test_movable(folio)) {
return page;
diff --git a/mm/zswap.c b/mm/zswap.c
index 0dcc54eab58b..204fb59da33c 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -883,18 +883,32 @@ static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
{
struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
+ struct acomp_req *req;
+ struct crypto_acomp *acomp;
+ u8 *buffer;
+
+ if (IS_ERR_OR_NULL(acomp_ctx))
+ return 0;
mutex_lock(&acomp_ctx->mutex);
- if (!IS_ERR_OR_NULL(acomp_ctx)) {
- if (!IS_ERR_OR_NULL(acomp_ctx->req))
- acomp_request_free(acomp_ctx->req);
- acomp_ctx->req = NULL;
- if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
- crypto_free_acomp(acomp_ctx->acomp);
- kfree(acomp_ctx->buffer);
- }
+ req = acomp_ctx->req;
+ acomp = acomp_ctx->acomp;
+ buffer = acomp_ctx->buffer;
+ acomp_ctx->req = NULL;
+ acomp_ctx->acomp = NULL;
+ acomp_ctx->buffer = NULL;
mutex_unlock(&acomp_ctx->mutex);
+ /*
+ * Do the actual freeing after releasing the mutex to avoid subtle
+ * locking dependencies causing deadlocks.
+ */
+ if (!IS_ERR_OR_NULL(req))
+ acomp_request_free(req);
+ if (!IS_ERR_OR_NULL(acomp))
+ crypto_free_acomp(acomp);
+ kfree(buffer);
+
return 0;
}
diff --git a/net/9p/client.c b/net/9p/client.c
index 09f8ced9f8bb..61461b9fa134 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -1548,7 +1548,8 @@ p9_client_read_once(struct p9_fid *fid, u64 offset, struct iov_iter *to,
struct p9_client *clnt = fid->clnt;
struct p9_req_t *req;
int count = iov_iter_count(to);
- int rsize, received, non_zc = 0;
+ u32 rsize, received;
+ bool non_zc = false;
char *dataptr;
*err = 0;
@@ -1571,7 +1572,7 @@ p9_client_read_once(struct p9_fid *fid, u64 offset, struct iov_iter *to,
0, 11, "dqd", fid->fid,
offset, rsize);
} else {
- non_zc = 1;
+ non_zc = true;
req = p9_client_rpc(clnt, P9_TREAD, "dqd", fid->fid, offset,
rsize);
}
@@ -1592,11 +1593,13 @@ p9_client_read_once(struct p9_fid *fid, u64 offset, struct iov_iter *to,
return 0;
}
if (rsize < received) {
- pr_err("bogus RREAD count (%d > %d)\n", received, rsize);
- received = rsize;
+ pr_err("bogus RREAD count (%u > %u)\n", received, rsize);
+ *err = -EIO;
+ p9_req_put(clnt, req);
+ return 0;
}
- p9_debug(P9_DEBUG_9P, "<<< RREAD count %d\n", received);
+ p9_debug(P9_DEBUG_9P, "<<< RREAD count %u\n", received);
if (non_zc) {
int n = copy_to_iter(dataptr, received, to);
@@ -1623,9 +1626,9 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
*err = 0;
while (iov_iter_count(from)) {
- int count = iov_iter_count(from);
- int rsize = fid->iounit;
- int written;
+ size_t count = iov_iter_count(from);
+ u32 rsize = fid->iounit;
+ u32 written;
if (!rsize || rsize > clnt->msize - P9_IOHDRSZ)
rsize = clnt->msize - P9_IOHDRSZ;
@@ -1633,7 +1636,7 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
if (count < rsize)
rsize = count;
- p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %d (/%d)\n",
+ p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %u (/%zu)\n",
fid->fid, offset, rsize, count);
/* Don't bother zerocopy for small IO (< 1024) */
@@ -1659,11 +1662,14 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
break;
}
if (rsize < written) {
- pr_err("bogus RWRITE count (%d > %d)\n", written, rsize);
- written = rsize;
+ pr_err("bogus RWRITE count (%u > %u)\n", written, rsize);
+ *err = -EIO;
+ iov_iter_revert(from, count - iov_iter_count(from));
+ p9_req_put(clnt, req);
+ break;
}
- p9_debug(P9_DEBUG_9P, "<<< RWRITE count %d\n", written);
+ p9_debug(P9_DEBUG_9P, "<<< RWRITE count %u\n", written);
p9_req_put(clnt, req);
iov_iter_revert(from, count - written - iov_iter_count(from));
@@ -1712,7 +1718,7 @@ p9_client_write_subreq(struct netfs_io_subrequest *subreq)
if (written > len) {
pr_err("bogus RWRITE count (%d > %u)\n", written, len);
- written = len;
+ written = -EIO;
}
p9_debug(P9_DEBUG_9P, "<<< RWRITE count %d\n", len);
@@ -2098,7 +2104,8 @@ EXPORT_SYMBOL_GPL(p9_client_xattrcreate);
int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
{
- int err, rsize, non_zc = 0;
+ int err, non_zc = 0;
+ u32 rsize;
struct p9_client *clnt;
struct p9_req_t *req;
char *dataptr;
@@ -2107,7 +2114,7 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
iov_iter_kvec(&to, ITER_DEST, &kv, 1, count);
- p9_debug(P9_DEBUG_9P, ">>> TREADDIR fid %d offset %llu count %d\n",
+ p9_debug(P9_DEBUG_9P, ">>> TREADDIR fid %d offset %llu count %u\n",
fid->fid, offset, count);
clnt = fid->clnt;
@@ -2142,11 +2149,12 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
goto free_and_error;
}
if (rsize < count) {
- pr_err("bogus RREADDIR count (%d > %d)\n", count, rsize);
- count = rsize;
+ pr_err("bogus RREADDIR count (%u > %u)\n", count, rsize);
+ err = -EIO;
+ goto free_and_error;
}
- p9_debug(P9_DEBUG_9P, "<<< RREADDIR count %d\n", count);
+ p9_debug(P9_DEBUG_9P, "<<< RREADDIR count %u\n", count);
if (non_zc)
memmove(data, dataptr, count);
diff --git a/net/9p/error.c b/net/9p/error.c
index 8da744494b68..8ba8afc91482 100644
--- a/net/9p/error.c
+++ b/net/9p/error.c
@@ -16,6 +16,7 @@
#include <linux/list.h>
#include <linux/jhash.h>
#include <linux/errno.h>
+#include <linux/hashtable.h>
#include <net/9p/9p.h>
/**
@@ -33,8 +34,8 @@ struct errormap {
struct hlist_node list;
};
-#define ERRHASHSZ 32
-static struct hlist_head hash_errmap[ERRHASHSZ];
+#define ERRHASH_BITS 5
+static DEFINE_HASHTABLE(hash_errmap, ERRHASH_BITS);
/* FixMe - reduce to a reasonable size */
static struct errormap errmap[] = {
@@ -176,18 +177,14 @@ static struct errormap errmap[] = {
int p9_error_init(void)
{
struct errormap *c;
- int bucket;
-
- /* initialize hash table */
- for (bucket = 0; bucket < ERRHASHSZ; bucket++)
- INIT_HLIST_HEAD(&hash_errmap[bucket]);
+ u32 hash;
/* load initial error map into hash table */
for (c = errmap; c->name; c++) {
c->namelen = strlen(c->name);
- bucket = jhash(c->name, c->namelen, 0) % ERRHASHSZ;
+ hash = jhash(c->name, c->namelen, 0);
INIT_HLIST_NODE(&c->list);
- hlist_add_head(&c->list, &hash_errmap[bucket]);
+ hash_add(hash_errmap, &c->list, hash);
}
return 1;
@@ -205,12 +202,12 @@ int p9_errstr2errno(char *errstr, int len)
{
int errno;
struct errormap *c;
- int bucket;
+ u32 hash;
errno = 0;
c = NULL;
- bucket = jhash(errstr, len, 0) % ERRHASHSZ;
- hlist_for_each_entry(c, &hash_errmap[bucket], list) {
+ hash = jhash(errstr, len, 0);
+ hash_for_each_possible(hash_errmap, c, list, hash) {
if (c->namelen == len && !memcmp(c->name, errstr, len)) {
errno = c->val;
break;
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 196060dc6138..339ec4e54778 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -11,6 +11,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/in.h>
+#include <linux/in6.h>
#include <linux/module.h>
#include <linux/net.h>
#include <linux/ipv6.h>
@@ -191,12 +192,13 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
spin_lock(&m->req_lock);
- if (m->err) {
+ if (READ_ONCE(m->err)) {
spin_unlock(&m->req_lock);
return;
}
- m->err = err;
+ WRITE_ONCE(m->err, err);
+ ASSERT_EXCLUSIVE_WRITER(m->err);
list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
list_move(&req->req_list, &cancel_list);
@@ -283,7 +285,7 @@ static void p9_read_work(struct work_struct *work)
m = container_of(work, struct p9_conn, rq);
- if (m->err < 0)
+ if (READ_ONCE(m->err) < 0)
return;
p9_debug(P9_DEBUG_TRANS, "start mux %p pos %zd\n", m, m->rc.offset);
@@ -450,7 +452,7 @@ static void p9_write_work(struct work_struct *work)
m = container_of(work, struct p9_conn, wq);
- if (m->err < 0) {
+ if (READ_ONCE(m->err) < 0) {
clear_bit(Wworksched, &m->wsched);
return;
}
@@ -622,7 +624,7 @@ static void p9_poll_mux(struct p9_conn *m)
__poll_t n;
int err = -ECONNRESET;
- if (m->err < 0)
+ if (READ_ONCE(m->err) < 0)
return;
n = p9_fd_poll(m->client, NULL, &err);
@@ -665,6 +667,7 @@ static void p9_poll_mux(struct p9_conn *m)
static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
{
__poll_t n;
+ int err;
struct p9_trans_fd *ts = client->trans;
struct p9_conn *m = &ts->conn;
@@ -673,9 +676,10 @@ static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
spin_lock(&m->req_lock);
- if (m->err < 0) {
+ err = READ_ONCE(m->err);
+ if (err < 0) {
spin_unlock(&m->req_lock);
- return m->err;
+ return err;
}
WRITE_ONCE(req->status, REQ_STATUS_UNSENT);
@@ -954,64 +958,55 @@ static void p9_fd_close(struct p9_client *client)
kfree(ts);
}
-/*
- * stolen from NFS - maybe should be made a generic function?
- */
-static inline int valid_ipaddr4(const char *buf)
-{
- int rc, count, in[4];
-
- rc = sscanf(buf, "%d.%d.%d.%d", &in[0], &in[1], &in[2], &in[3]);
- if (rc != 4)
- return -EINVAL;
- for (count = 0; count < 4; count++) {
- if (in[count] > 255)
- return -EINVAL;
- }
- return 0;
-}
-
static int p9_bind_privport(struct socket *sock)
{
- struct sockaddr_in cl;
+ struct sockaddr_storage stor = { 0 };
int port, err = -EINVAL;
- memset(&cl, 0, sizeof(cl));
- cl.sin_family = AF_INET;
- cl.sin_addr.s_addr = htonl(INADDR_ANY);
+ stor.ss_family = sock->ops->family;
+ if (stor.ss_family == AF_INET)
+ ((struct sockaddr_in *)&stor)->sin_addr.s_addr = htonl(INADDR_ANY);
+ else
+ ((struct sockaddr_in6 *)&stor)->sin6_addr = in6addr_any;
for (port = p9_ipport_resv_max; port >= p9_ipport_resv_min; port--) {
- cl.sin_port = htons((ushort)port);
- err = kernel_bind(sock, (struct sockaddr *)&cl, sizeof(cl));
+ if (stor.ss_family == AF_INET)
+ ((struct sockaddr_in *)&stor)->sin_port = htons((ushort)port);
+ else
+ ((struct sockaddr_in6 *)&stor)->sin6_port = htons((ushort)port);
+ err = kernel_bind(sock, (struct sockaddr *)&stor, sizeof(stor));
if (err != -EADDRINUSE)
break;
}
return err;
}
-
static int
p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
{
int err;
+ char port_str[6];
struct socket *csocket;
- struct sockaddr_in sin_server;
+ struct sockaddr_storage stor = { 0 };
struct p9_fd_opts opts;
err = parse_opts(args, &opts);
if (err < 0)
return err;
- if (addr == NULL || valid_ipaddr4(addr) < 0)
+ if (!addr)
return -EINVAL;
+ sprintf(port_str, "%u", opts.port);
+ err = inet_pton_with_scope(current->nsproxy->net_ns, AF_UNSPEC, addr,
+ port_str, &stor);
+ if (err < 0)
+ return err;
+
csocket = NULL;
client->trans_opts.tcp.port = opts.port;
client->trans_opts.tcp.privport = opts.privport;
- sin_server.sin_family = AF_INET;
- sin_server.sin_addr.s_addr = in_aton(addr);
- sin_server.sin_port = htons(opts.port);
- err = __sock_create(current->nsproxy->net_ns, PF_INET,
+ err = __sock_create(current->nsproxy->net_ns, stor.ss_family,
SOCK_STREAM, IPPROTO_TCP, &csocket, 1);
if (err) {
pr_err("%s (%d): problem creating socket\n",
@@ -1030,8 +1025,8 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
}
err = READ_ONCE(csocket->ops)->connect(csocket,
- (struct sockaddr *)&sin_server,
- sizeof(struct sockaddr_in), 0);
+ (struct sockaddr *)&stor,
+ sizeof(stor), 0);
if (err < 0) {
pr_err("%s (%d): problem connecting socket to %s\n",
__func__, task_pid_nr(current), addr);
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 2fe88ea79a70..6f75862d9782 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -270,9 +270,6 @@ static struct rpc_xprt *rpc_clnt_set_transport(struct rpc_clnt *clnt,
old = rcu_dereference_protected(clnt->cl_xprt,
lockdep_is_held(&clnt->cl_lock));
- if (!xprt_bound(xprt))
- clnt->cl_autobind = 1;
-
clnt->cl_timeout = timeout;
rcu_assign_pointer(clnt->cl_xprt, xprt);
spin_unlock(&clnt->cl_lock);
@@ -512,6 +509,8 @@ static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
clnt->cl_discrtry = 1;
if (!(args->flags & RPC_CLNT_CREATE_QUIET))
clnt->cl_chatty = 1;
+ if (args->flags & RPC_CLNT_CREATE_NETUNREACH_FATAL)
+ clnt->cl_netunreach_fatal = 1;
return clnt;
}
@@ -662,6 +661,7 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args,
new->cl_noretranstimeo = clnt->cl_noretranstimeo;
new->cl_discrtry = clnt->cl_discrtry;
new->cl_chatty = clnt->cl_chatty;
+ new->cl_netunreach_fatal = clnt->cl_netunreach_fatal;
new->cl_principal = clnt->cl_principal;
new->cl_max_connect = clnt->cl_max_connect;
return new;
@@ -1195,6 +1195,8 @@ void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
task->tk_flags |= RPC_TASK_TIMEOUT;
if (clnt->cl_noretranstimeo)
task->tk_flags |= RPC_TASK_NO_RETRANS_TIMEOUT;
+ if (clnt->cl_netunreach_fatal)
+ task->tk_flags |= RPC_TASK_NETUNREACH_FATAL;
atomic_inc(&clnt->cl_task_count);
}
@@ -2102,14 +2104,17 @@ call_bind_status(struct rpc_task *task)
case -EPROTONOSUPPORT:
trace_rpcb_bind_version_err(task);
goto retry_timeout;
+ case -ENETDOWN:
+ case -ENETUNREACH:
+ if (task->tk_flags & RPC_TASK_NETUNREACH_FATAL)
+ break;
+ fallthrough;
case -ECONNREFUSED: /* connection problems */
case -ECONNRESET:
case -ECONNABORTED:
case -ENOTCONN:
case -EHOSTDOWN:
- case -ENETDOWN:
case -EHOSTUNREACH:
- case -ENETUNREACH:
case -EPIPE:
trace_rpcb_unreachable_err(task);
if (!RPC_IS_SOFTCONN(task)) {
@@ -2191,19 +2196,22 @@ call_connect_status(struct rpc_task *task)
task->tk_status = 0;
switch (status) {
+ case -ENETDOWN:
+ case -ENETUNREACH:
+ if (task->tk_flags & RPC_TASK_NETUNREACH_FATAL)
+ break;
+ fallthrough;
case -ECONNREFUSED:
case -ECONNRESET:
/* A positive refusal suggests a rebind is needed. */
- if (RPC_IS_SOFTCONN(task))
- break;
if (clnt->cl_autobind) {
rpc_force_rebind(clnt);
+ if (RPC_IS_SOFTCONN(task))
+ break;
goto out_retry;
}
fallthrough;
case -ECONNABORTED:
- case -ENETDOWN:
- case -ENETUNREACH:
case -EHOSTUNREACH:
case -EPIPE:
case -EPROTO:
@@ -2455,10 +2463,13 @@ call_status(struct rpc_task *task)
trace_rpc_call_status(task);
task->tk_status = 0;
switch(status) {
- case -EHOSTDOWN:
case -ENETDOWN:
- case -EHOSTUNREACH:
case -ENETUNREACH:
+ if (task->tk_flags & RPC_TASK_NETUNREACH_FATAL)
+ goto out_exit;
+ fallthrough;
+ case -EHOSTDOWN:
+ case -EHOSTUNREACH:
case -EPERM:
if (RPC_IS_SOFTCONN(task))
goto out_exit;
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 102c3818bc54..53bcca365fb1 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -820,9 +820,10 @@ static void rpcb_getport_done(struct rpc_task *child, void *data)
}
trace_rpcb_setport(child, map->r_status, map->r_port);
- xprt->ops->set_port(xprt, map->r_port);
- if (map->r_port)
+ if (map->r_port) {
+ xprt->ops->set_port(xprt, map->r_port);
xprt_set_bound(xprt);
+ }
}
/*
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 9b45fbdc90ca..73bc39281ef5 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -276,6 +276,8 @@ EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode)
{
+ if (unlikely(current->flags & PF_EXITING))
+ return -EINTR;
schedule();
if (signal_pending_state(mode, current))
return -ERESTARTSYS;
diff --git a/net/sunrpc/sysfs.c b/net/sunrpc/sysfs.c
index 5c8ecdaaa985..09434e1143c5 100644
--- a/net/sunrpc/sysfs.c
+++ b/net/sunrpc/sysfs.c
@@ -59,6 +59,16 @@ static struct kobject *rpc_sysfs_object_alloc(const char *name,
return NULL;
}
+static inline struct rpc_clnt *
+rpc_sysfs_client_kobj_get_clnt(struct kobject *kobj)
+{
+ struct rpc_sysfs_client *c = container_of(kobj,
+ struct rpc_sysfs_client, kobject);
+ struct rpc_clnt *ret = c->clnt;
+
+ return refcount_inc_not_zero(&ret->cl_count) ? ret : NULL;
+}
+
static inline struct rpc_xprt *
rpc_sysfs_xprt_kobj_get_xprt(struct kobject *kobj)
{
@@ -86,6 +96,51 @@ rpc_sysfs_xprt_switch_kobj_get_xprt(struct kobject *kobj)
return xprt_switch_get(x->xprt_switch);
}
+static ssize_t rpc_sysfs_clnt_version_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct rpc_clnt *clnt = rpc_sysfs_client_kobj_get_clnt(kobj);
+ ssize_t ret;
+
+ if (!clnt)
+ return sprintf(buf, "<closed>\n");
+
+ ret = sprintf(buf, "%u", clnt->cl_vers);
+ refcount_dec(&clnt->cl_count);
+ return ret;
+}
+
+static ssize_t rpc_sysfs_clnt_program_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct rpc_clnt *clnt = rpc_sysfs_client_kobj_get_clnt(kobj);
+ ssize_t ret;
+
+ if (!clnt)
+ return sprintf(buf, "<closed>\n");
+
+ ret = sprintf(buf, "%s", clnt->cl_program->name);
+ refcount_dec(&clnt->cl_count);
+ return ret;
+}
+
+static ssize_t rpc_sysfs_clnt_max_connect_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct rpc_clnt *clnt = rpc_sysfs_client_kobj_get_clnt(kobj);
+ ssize_t ret;
+
+ if (!clnt)
+ return sprintf(buf, "<closed>\n");
+
+ ret = sprintf(buf, "%u\n", clnt->cl_max_connect);
+ refcount_dec(&clnt->cl_count);
+ return ret;
+}
+
static ssize_t rpc_sysfs_xprt_dstaddr_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
@@ -129,6 +184,31 @@ static ssize_t rpc_sysfs_xprt_srcaddr_show(struct kobject *kobj,
return ret;
}
+static const char *xprtsec_strings[] = {
+ [RPC_XPRTSEC_NONE] = "none",
+ [RPC_XPRTSEC_TLS_ANON] = "tls-anon",
+ [RPC_XPRTSEC_TLS_X509] = "tls-x509",
+};
+
+static ssize_t rpc_sysfs_xprt_xprtsec_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct rpc_xprt *xprt = rpc_sysfs_xprt_kobj_get_xprt(kobj);
+ ssize_t ret;
+
+ if (!xprt) {
+ ret = sprintf(buf, "<closed>\n");
+ goto out;
+ }
+
+ ret = sprintf(buf, "%s\n", xprtsec_strings[xprt->xprtsec.policy]);
+ xprt_put(xprt);
+out:
+ return ret;
+
+}
+
static ssize_t rpc_sysfs_xprt_info_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
@@ -206,6 +286,14 @@ static ssize_t rpc_sysfs_xprt_state_show(struct kobject *kobj,
return ret;
}
+static ssize_t rpc_sysfs_xprt_del_xprt_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "# delete this xprt\n");
+}
+
+
static ssize_t rpc_sysfs_xprt_switch_info_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
@@ -225,6 +313,55 @@ static ssize_t rpc_sysfs_xprt_switch_info_show(struct kobject *kobj,
return ret;
}
+static ssize_t rpc_sysfs_xprt_switch_add_xprt_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "# add one xprt to this xprt_switch\n");
+}
+
+static ssize_t rpc_sysfs_xprt_switch_add_xprt_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rpc_xprt_switch *xprt_switch =
+ rpc_sysfs_xprt_switch_kobj_get_xprt(kobj);
+ struct xprt_create xprt_create_args;
+ struct rpc_xprt *xprt, *new;
+
+ if (!xprt_switch)
+ return 0;
+
+ xprt = rpc_xprt_switch_get_main_xprt(xprt_switch);
+ if (!xprt)
+ goto out;
+
+ xprt_create_args.ident = xprt->xprt_class->ident;
+ xprt_create_args.net = xprt->xprt_net;
+ xprt_create_args.dstaddr = (struct sockaddr *)&xprt->addr;
+ xprt_create_args.addrlen = xprt->addrlen;
+ xprt_create_args.servername = xprt->servername;
+ xprt_create_args.bc_xprt = xprt->bc_xprt;
+ xprt_create_args.xprtsec = xprt->xprtsec;
+ xprt_create_args.connect_timeout = xprt->connect_timeout;
+ xprt_create_args.reconnect_timeout = xprt->max_reconnect_timeout;
+
+ new = xprt_create_transport(&xprt_create_args);
+ if (IS_ERR_OR_NULL(new)) {
+ count = PTR_ERR(new);
+ goto out_put_xprt;
+ }
+
+ rpc_xprt_switch_add_xprt(xprt_switch, new);
+ xprt_put(new);
+
+out_put_xprt:
+ xprt_put(xprt);
+out:
+ xprt_switch_put(xprt_switch);
+ return count;
+}
+
static ssize_t rpc_sysfs_xprt_dstaddr_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
@@ -335,6 +472,40 @@ out_put:
return count;
}
+static ssize_t rpc_sysfs_xprt_del_xprt(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rpc_xprt *xprt = rpc_sysfs_xprt_kobj_get_xprt(kobj);
+ struct rpc_xprt_switch *xps = rpc_sysfs_xprt_kobj_get_xprt_switch(kobj);
+
+ if (!xprt || !xps) {
+ count = 0;
+ goto out;
+ }
+
+ if (xprt->main) {
+ count = -EINVAL;
+ goto release_tasks;
+ }
+
+ if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE)) {
+ count = -EINTR;
+ goto out_put;
+ }
+
+ xprt_set_offline_locked(xprt, xps);
+ xprt_delete_locked(xprt, xps);
+
+release_tasks:
+ xprt_release_write(xprt, NULL);
+out_put:
+ xprt_put(xprt);
+ xprt_switch_put(xps);
+out:
+ return count;
+}
+
int rpc_sysfs_init(void)
{
rpc_sunrpc_kset = kset_create_and_add("sunrpc", NULL, kernel_kobj);
@@ -398,23 +569,48 @@ static const void *rpc_sysfs_xprt_namespace(const struct kobject *kobj)
kobject)->xprt->xprt_net;
}
+static struct kobj_attribute rpc_sysfs_clnt_version = __ATTR(rpc_version,
+ 0444, rpc_sysfs_clnt_version_show, NULL);
+
+static struct kobj_attribute rpc_sysfs_clnt_program = __ATTR(program,
+ 0444, rpc_sysfs_clnt_program_show, NULL);
+
+static struct kobj_attribute rpc_sysfs_clnt_max_connect = __ATTR(max_connect,
+ 0444, rpc_sysfs_clnt_max_connect_show, NULL);
+
+static struct attribute *rpc_sysfs_rpc_clnt_attrs[] = {
+ &rpc_sysfs_clnt_version.attr,
+ &rpc_sysfs_clnt_program.attr,
+ &rpc_sysfs_clnt_max_connect.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(rpc_sysfs_rpc_clnt);
+
static struct kobj_attribute rpc_sysfs_xprt_dstaddr = __ATTR(dstaddr,
0644, rpc_sysfs_xprt_dstaddr_show, rpc_sysfs_xprt_dstaddr_store);
static struct kobj_attribute rpc_sysfs_xprt_srcaddr = __ATTR(srcaddr,
0644, rpc_sysfs_xprt_srcaddr_show, NULL);
+static struct kobj_attribute rpc_sysfs_xprt_xprtsec = __ATTR(xprtsec,
+ 0644, rpc_sysfs_xprt_xprtsec_show, NULL);
+
static struct kobj_attribute rpc_sysfs_xprt_info = __ATTR(xprt_info,
0444, rpc_sysfs_xprt_info_show, NULL);
static struct kobj_attribute rpc_sysfs_xprt_change_state = __ATTR(xprt_state,
0644, rpc_sysfs_xprt_state_show, rpc_sysfs_xprt_state_change);
+static struct kobj_attribute rpc_sysfs_xprt_del = __ATTR(del_xprt,
+ 0644, rpc_sysfs_xprt_del_xprt_show, rpc_sysfs_xprt_del_xprt);
+
static struct attribute *rpc_sysfs_xprt_attrs[] = {
&rpc_sysfs_xprt_dstaddr.attr,
&rpc_sysfs_xprt_srcaddr.attr,
+ &rpc_sysfs_xprt_xprtsec.attr,
&rpc_sysfs_xprt_info.attr,
&rpc_sysfs_xprt_change_state.attr,
+ &rpc_sysfs_xprt_del.attr,
NULL,
};
ATTRIBUTE_GROUPS(rpc_sysfs_xprt);
@@ -422,14 +618,20 @@ ATTRIBUTE_GROUPS(rpc_sysfs_xprt);
static struct kobj_attribute rpc_sysfs_xprt_switch_info =
__ATTR(xprt_switch_info, 0444, rpc_sysfs_xprt_switch_info_show, NULL);
+static struct kobj_attribute rpc_sysfs_xprt_switch_add_xprt =
+ __ATTR(add_xprt, 0644, rpc_sysfs_xprt_switch_add_xprt_show,
+ rpc_sysfs_xprt_switch_add_xprt_store);
+
static struct attribute *rpc_sysfs_xprt_switch_attrs[] = {
&rpc_sysfs_xprt_switch_info.attr,
+ &rpc_sysfs_xprt_switch_add_xprt.attr,
NULL,
};
ATTRIBUTE_GROUPS(rpc_sysfs_xprt_switch);
static const struct kobj_type rpc_sysfs_client_type = {
.release = rpc_sysfs_client_release,
+ .default_groups = rpc_sysfs_rpc_clnt_groups,
.sysfs_ops = &kobj_sysfs_ops,
.namespace = rpc_sysfs_client_namespace,
};
diff --git a/net/sunrpc/xprtmultipath.c b/net/sunrpc/xprtmultipath.c
index 7e98d4dd9f10..4c5e08b0aa64 100644
--- a/net/sunrpc/xprtmultipath.c
+++ b/net/sunrpc/xprtmultipath.c
@@ -92,6 +92,27 @@ void rpc_xprt_switch_remove_xprt(struct rpc_xprt_switch *xps,
xprt_put(xprt);
}
+/**
+ * rpc_xprt_switch_get_main_xprt - Get the 'main' xprt for an xprt switch.
+ * @xps: pointer to struct rpc_xprt_switch.
+ */
+struct rpc_xprt *rpc_xprt_switch_get_main_xprt(struct rpc_xprt_switch *xps)
+{
+ struct rpc_xprt_iter xpi;
+ struct rpc_xprt *xprt;
+
+ xprt_iter_init_listall(&xpi, xps);
+
+ xprt = xprt_iter_get_next(&xpi);
+ while (xprt && !xprt->main) {
+ xprt_put(xprt);
+ xprt = xprt_iter_get_next(&xpi);
+ }
+
+ xprt_iter_destroy(&xpi);
+ return xprt;
+}
+
static DEFINE_IDA(rpc_xprtswitch_ids);
void xprt_multipath_cleanup_ids(void)
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index e5d104ce7b82..5696af45bcf7 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -806,8 +806,11 @@ static int __xsk_generic_xmit(struct sock *sk)
* if there is space in it. This avoids having to implement
* any buffering in the Tx path.
*/
- if (xsk_cq_reserve_addr_locked(xs->pool, desc.addr))
+ err = xsk_cq_reserve_addr_locked(xs->pool, desc.addr);
+ if (err) {
+ err = -EAGAIN;
goto out;
+ }
skb = xsk_build_skb(xs, &desc);
if (IS_ERR(skb)) {
diff --git a/rust/Makefile b/rust/Makefile
index b9cc810764e9..99bc3eea44a6 100644
--- a/rust/Makefile
+++ b/rust/Makefile
@@ -260,7 +260,8 @@ bindgen_skip_c_flags := -mno-fp-ret-in-387 -mpreferred-stack-boundary=% \
-mfunction-return=thunk-extern -mrecord-mcount -mabi=lp64 \
-mindirect-branch-cs-prefix -mstack-protector-guard% -mtraceback=no \
-mno-pointers-to-nested-functions -mno-string \
- -mno-strict-align -mstrict-align \
+ -mno-strict-align -mstrict-align -mdirect-extern-access \
+ -mexplicit-relocs -mno-check-zero-division \
-fconserve-stack -falign-jumps=% -falign-loops=% \
-femit-struct-debug-baseonly -fno-ipa-cp-clone -fno-ipa-sra \
-fno-partial-inlining -fplugin-arg-arm_ssp_per_task_plugin-% \
@@ -274,6 +275,8 @@ bindgen_skip_c_flags := -mno-fp-ret-in-387 -mpreferred-stack-boundary=% \
# Derived from `scripts/Makefile.clang`.
BINDGEN_TARGET_x86 := x86_64-linux-gnu
BINDGEN_TARGET_arm64 := aarch64-linux-gnu
+BINDGEN_TARGET_arm := arm-linux-gnueabi
+BINDGEN_TARGET_loongarch := loongarch64-linux-gnusf
BINDGEN_TARGET_um := $(BINDGEN_TARGET_$(SUBARCH))
BINDGEN_TARGET := $(BINDGEN_TARGET_$(SRCARCH))
@@ -431,6 +434,13 @@ redirect-intrinsics = \
__muloti4 __multi3 \
__udivmodti4 __udivti3 __umodti3
+ifdef CONFIG_ARM
+ # Add eabi initrinsics for ARM 32-bit
+ redirect-intrinsics += \
+ __aeabi_fadd __aeabi_fmul __aeabi_fcmpeq __aeabi_fcmple __aeabi_fcmplt __aeabi_fcmpun \
+ __aeabi_dadd __aeabi_dmul __aeabi_dcmple __aeabi_dcmplt __aeabi_dcmpun \
+ __aeabi_uldivmod
+endif
ifneq ($(or $(CONFIG_ARM64),$(and $(CONFIG_RISCV),$(CONFIG_64BIT))),)
# These intrinsics are defined for ARM64 and RISCV64
redirect-intrinsics += \
diff --git a/rust/compiler_builtins.rs b/rust/compiler_builtins.rs
index f14b8d7caf89..dd16c1dc899c 100644
--- a/rust/compiler_builtins.rs
+++ b/rust/compiler_builtins.rs
@@ -73,5 +73,29 @@ define_panicking_intrinsics!("`u128` should not be used", {
__umodti3,
});
+#[cfg(target_arch = "arm")]
+define_panicking_intrinsics!("`f32` should not be used", {
+ __aeabi_fadd,
+ __aeabi_fmul,
+ __aeabi_fcmpeq,
+ __aeabi_fcmple,
+ __aeabi_fcmplt,
+ __aeabi_fcmpun,
+});
+
+#[cfg(target_arch = "arm")]
+define_panicking_intrinsics!("`f64` should not be used", {
+ __aeabi_dadd,
+ __aeabi_dmul,
+ __aeabi_dcmple,
+ __aeabi_dcmplt,
+ __aeabi_dcmpun,
+});
+
+#[cfg(target_arch = "arm")]
+define_panicking_intrinsics!("`u64` division/modulo should not be used", {
+ __aeabi_uldivmod,
+});
+
// NOTE: if you are adding a new intrinsic here, you should also add it to
// `redirect-intrinsics` in `rust/Makefile`.
diff --git a/samples/trace_events/trace-events-sample.h b/samples/trace_events/trace-events-sample.h
index 999f78d380ae..1a05fc153353 100644
--- a/samples/trace_events/trace-events-sample.h
+++ b/samples/trace_events/trace-events-sample.h
@@ -319,7 +319,8 @@ TRACE_EVENT(foo_bar,
__assign_cpumask(cpum, cpumask_bits(mask));
),
- TP_printk("foo %s %d %s %s %s %s %s %s (%s) (%s) %s", __entry->foo, __entry->bar,
+ TP_printk("foo %s %d %s %s %s %s %s %s (%s) (%s) %s [%d] %*pbl",
+ __entry->foo, __entry->bar,
/*
* Notice here the use of some helper functions. This includes:
@@ -370,7 +371,10 @@ TRACE_EVENT(foo_bar,
__get_str(str), __get_str(lstr),
__get_bitmask(cpus), __get_cpumask(cpum),
- __get_str(vstr))
+ __get_str(vstr),
+ __get_dynamic_array_len(cpus),
+ __get_dynamic_array_len(cpus),
+ __get_dynamic_array(cpus))
);
/*
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 57620b439a1f..4d543054f723 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -275,9 +275,9 @@ objtool-args-$(CONFIG_MITIGATION_SLS) += --sls
objtool-args-$(CONFIG_STACK_VALIDATION) += --stackval
objtool-args-$(CONFIG_HAVE_STATIC_CALL_INLINE) += --static-call
objtool-args-$(CONFIG_HAVE_UACCESS_VALIDATION) += --uaccess
-objtool-args-$(CONFIG_GCOV_KERNEL) += --no-unreachable
+objtool-args-$(or $(CONFIG_GCOV_KERNEL),$(CONFIG_KCOV)) += --no-unreachable
objtool-args-$(CONFIG_PREFIX_SYMBOLS) += --prefix=$(CONFIG_FUNCTION_PADDING_BYTES)
-objtool-args-$(CONFIG_OBJTOOL_WERROR) += --Werror --backtrace
+objtool-args-$(CONFIG_OBJTOOL_WERROR) += --Werror
objtool-args = $(objtool-args-y) \
$(if $(delay-objtool), --link) \
diff --git a/scripts/Makefile.vmlinux_o b/scripts/Makefile.vmlinux_o
index 0b6e2ebf60dc..938c7457717e 100644
--- a/scripts/Makefile.vmlinux_o
+++ b/scripts/Makefile.vmlinux_o
@@ -30,13 +30,20 @@ endif
# objtool for vmlinux.o
# ---------------------------------------------------------------------------
#
-# For LTO and IBT, objtool doesn't run on individual translation units.
-# Run everything on vmlinux instead.
+# For delay-objtool (IBT or LTO), objtool doesn't run on individual translation
+# units. Instead it runs on vmlinux.o.
+#
+# For !delay-objtool + CONFIG_NOINSTR_VALIDATION, it runs on both translation
+# units and vmlinux.o, with the latter only used for noinstr/unret validation.
objtool-enabled := $(or $(delay-objtool),$(CONFIG_NOINSTR_VALIDATION))
-vmlinux-objtool-args-$(delay-objtool) += $(objtool-args-y)
-vmlinux-objtool-args-$(CONFIG_GCOV_KERNEL) += --no-unreachable
+ifeq ($(delay-objtool),y)
+vmlinux-objtool-args-y += $(objtool-args-y)
+else
+vmlinux-objtool-args-$(CONFIG_OBJTOOL_WERROR) += --Werror
+endif
+
vmlinux-objtool-args-$(CONFIG_NOINSTR_VALIDATION) += --noinstr \
$(if $(or $(CONFIG_MITIGATION_UNRET_ENTRY),$(CONFIG_MITIGATION_SRSO)), --unret)
diff --git a/scripts/generate_rust_target.rs b/scripts/generate_rust_target.rs
index 4fd6b6ab3e32..8667d0ae3c82 100644
--- a/scripts/generate_rust_target.rs
+++ b/scripts/generate_rust_target.rs
@@ -184,7 +184,9 @@ fn main() {
let mut ts = TargetSpec::new();
// `llvm-target`s are taken from `scripts/Makefile.clang`.
- if cfg.has("ARM64") {
+ if cfg.has("ARM") {
+ panic!("arm uses the builtin rustc target");
+ } else if cfg.has("ARM64") {
panic!("arm64 uses the builtin rustc aarch64-unknown-none target");
} else if cfg.has("RISCV") {
if cfg.has("64BIT") {
diff --git a/scripts/sorttable.c b/scripts/sorttable.c
index 7b4b3714b1af..deed676bfe38 100644
--- a/scripts/sorttable.c
+++ b/scripts/sorttable.c
@@ -857,7 +857,7 @@ static void *sort_mcount_loc(void *arg)
for (void *ptr = vals; ptr < vals + size; ptr += long_size) {
uint64_t key;
- key = long_size == 4 ? r((uint32_t *)ptr) : r8((uint64_t *)ptr);
+ key = long_size == 4 ? *(uint32_t *)ptr : *(uint64_t *)ptr;
if (!find_func(key)) {
if (long_size == 4)
*(uint32_t *)ptr = 0;
diff --git a/security/Kconfig b/security/Kconfig
index 536061cf33a9..4816fc74f81e 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -51,6 +51,27 @@ config PROC_MEM_NO_FORCE
endchoice
+config MSEAL_SYSTEM_MAPPINGS
+ bool "mseal system mappings"
+ depends on 64BIT
+ depends on ARCH_SUPPORTS_MSEAL_SYSTEM_MAPPINGS
+ depends on !CHECKPOINT_RESTORE
+ help
+ Apply mseal on system mappings.
+ The system mappings includes vdso, vvar, vvar_vclock,
+ vectors (arm compat-mode), sigpage (arm compat-mode), uprobes.
+
+ A 64-bit kernel is required for the memory sealing feature.
+ No specific hardware features from the CPU are needed.
+
+ WARNING: This feature breaks programs which rely on relocating
+ or unmapping system mappings. Known broken software at the time
+ of writing includes CHECKPOINT_RESTORE, UML, gVisor, rr. Therefore
+ this config can't be enabled universally.
+
+ For complete descriptions of memory sealing, please see
+ Documentation/userspace-api/mseal.rst
+
config SECURITY
bool "Enable different security models"
depends on SYSFS
diff --git a/sound/hda/intel-sdw-acpi.c b/sound/hda/intel-sdw-acpi.c
index 49d3e0e30073..8686adaf4531 100644
--- a/sound/hda/intel-sdw-acpi.c
+++ b/sound/hda/intel-sdw-acpi.c
@@ -11,8 +11,8 @@
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/export.h>
-#include <linux/fwnode.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/soundwire/sdw_intel.h>
#include <linux/string.h>
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index b4fe681ec3cb..79004bc8107b 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4743,6 +4743,22 @@ static void alc245_fixup_hp_mute_led_coefbit(struct hda_codec *codec,
}
}
+static void alc245_fixup_hp_mute_led_v1_coefbit(struct hda_codec *codec,
+ const struct hda_fixup *fix,
+ int action)
+{
+ struct alc_spec *spec = codec->spec;
+
+ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+ spec->mute_led_polarity = 0;
+ spec->mute_led_coef.idx = 0x0b;
+ spec->mute_led_coef.mask = 1 << 3;
+ spec->mute_led_coef.on = 1 << 3;
+ spec->mute_led_coef.off = 0;
+ snd_hda_gen_add_mute_led_cdev(codec, coef_mute_led_set);
+ }
+}
+
/* turn on/off mic-mute LED per capture hook by coef bit */
static int coef_micmute_led_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
@@ -7574,6 +7590,24 @@ static void alc245_fixup_hp_spectre_x360_16_aa0xxx(struct hda_codec *codec,
alc245_fixup_hp_gpio_led(codec, fix, action);
}
+static void alc245_fixup_hp_zbook_firefly_g12a(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ struct alc_spec *spec = codec->spec;
+ static const hda_nid_t conn[] = { 0x02 };
+
+ switch (action) {
+ case HDA_FIXUP_ACT_PRE_PROBE:
+ spec->gen.auto_mute_via_amp = 1;
+ snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn);
+ break;
+ }
+
+ cs35l41_fixup_i2c_two(codec, fix, action);
+ alc245_fixup_hp_mute_led_coefbit(codec, fix, action);
+ alc285_fixup_hp_coef_micmute_led(codec, fix, action);
+}
+
/*
* ALC287 PCM hooks
*/
@@ -7911,6 +7945,7 @@ enum {
ALC245_FIXUP_TAS2781_SPI_2,
ALC287_FIXUP_YOGA7_14ARB7_I2C,
ALC245_FIXUP_HP_MUTE_LED_COEFBIT,
+ ALC245_FIXUP_HP_MUTE_LED_V1_COEFBIT,
ALC245_FIXUP_HP_X360_MUTE_LEDS,
ALC287_FIXUP_THINKPAD_I2S_SPK,
ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD,
@@ -7921,6 +7956,7 @@ enum {
ALC256_FIXUP_HEADPHONE_AMP_VOL,
ALC245_FIXUP_HP_SPECTRE_X360_EU0XXX,
ALC245_FIXUP_HP_SPECTRE_X360_16_AA0XXX,
+ ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A,
ALC285_FIXUP_ASUS_GA403U,
ALC285_FIXUP_ASUS_GA403U_HEADSET_MIC,
ALC285_FIXUP_ASUS_GA403U_I2C_SPEAKER2_TO_DAC1,
@@ -10164,6 +10200,10 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc245_fixup_hp_mute_led_coefbit,
},
+ [ALC245_FIXUP_HP_MUTE_LED_V1_COEFBIT] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc245_fixup_hp_mute_led_v1_coefbit,
+ },
[ALC245_FIXUP_HP_X360_MUTE_LEDS] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc245_fixup_hp_mute_led_coefbit,
@@ -10212,6 +10252,10 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc245_fixup_hp_spectre_x360_16_aa0xxx,
},
+ [ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc245_fixup_hp_zbook_firefly_g12a,
+ },
[ALC285_FIXUP_ASUS_GA403U] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc285_fixup_asus_ga403u,
@@ -10658,6 +10702,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8b97, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
SND_PCI_QUIRK(0x103c, 0x8bb3, "HP Slim OMEN", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8bb4, "HP Slim OMEN", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x103c, 0x8bcd, "HP Omen 16-xd0xxx", ALC245_FIXUP_HP_MUTE_LED_V1_COEFBIT),
SND_PCI_QUIRK(0x103c, 0x8bdd, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8bde, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8bdf, "HP Envy 15", ALC287_FIXUP_CS35L41_I2C_2),
@@ -10751,15 +10796,15 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8e11, "HP Trekker", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8e12, "HP Trekker", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8e13, "HP Trekker", ALC287_FIXUP_CS35L41_I2C_2),
- SND_PCI_QUIRK(0x103c, 0x8e14, "HP ZBook Firefly 14 G12", ALC285_FIXUP_HP_GPIO_LED),
- SND_PCI_QUIRK(0x103c, 0x8e15, "HP ZBook Firefly 14 G12", ALC285_FIXUP_HP_GPIO_LED),
- SND_PCI_QUIRK(0x103c, 0x8e16, "HP ZBook Firefly 14 G12", ALC285_FIXUP_HP_GPIO_LED),
- SND_PCI_QUIRK(0x103c, 0x8e17, "HP ZBook Firefly 14 G12", ALC285_FIXUP_HP_GPIO_LED),
- SND_PCI_QUIRK(0x103c, 0x8e18, "HP ZBook Firefly 14 G12A", ALC285_FIXUP_HP_GPIO_LED),
- SND_PCI_QUIRK(0x103c, 0x8e19, "HP ZBook Firefly 14 G12A", ALC285_FIXUP_HP_GPIO_LED),
- SND_PCI_QUIRK(0x103c, 0x8e1a, "HP ZBook Firefly 14 G12A", ALC285_FIXUP_HP_GPIO_LED),
- SND_PCI_QUIRK(0x103c, 0x8e1b, "HP EliteBook G12", ALC285_FIXUP_HP_GPIO_LED),
- SND_PCI_QUIRK(0x103c, 0x8e1c, "HP EliteBook G12", ALC285_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8e14, "HP ZBook Firefly 14 G12", ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A),
+ SND_PCI_QUIRK(0x103c, 0x8e15, "HP ZBook Firefly 14 G12", ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A),
+ SND_PCI_QUIRK(0x103c, 0x8e16, "HP ZBook Firefly 14 G12", ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A),
+ SND_PCI_QUIRK(0x103c, 0x8e17, "HP ZBook Firefly 14 G12", ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A),
+ SND_PCI_QUIRK(0x103c, 0x8e18, "HP ZBook Firefly 14 G12A", ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A),
+ SND_PCI_QUIRK(0x103c, 0x8e19, "HP ZBook Firefly 14 G12A", ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A),
+ SND_PCI_QUIRK(0x103c, 0x8e1a, "HP ZBook Firefly 14 G12A", ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A),
+ SND_PCI_QUIRK(0x103c, 0x8e1b, "HP EliteBook G12", ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A),
+ SND_PCI_QUIRK(0x103c, 0x8e1c, "HP EliteBook G12", ALC245_FIXUP_HP_ZBOOK_FIREFLY_G12A),
SND_PCI_QUIRK(0x103c, 0x8e2c, "HP EliteBook 16 G12", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8e36, "HP 14 Enstrom OmniBook X", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8e37, "HP 16 Piston OmniBook X", ALC287_FIXUP_CS35L41_I2C_2),
@@ -10804,6 +10849,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1493, "ASUS GV601VV/VU/VJ/VQ/VI", ALC285_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x14d3, "ASUS G614JY/JZ/JG", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x14e3, "ASUS G513PI/PU/PV", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x1043, 0x14f2, "ASUS VivoBook X515JA", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x1503, "ASUS G733PY/PZ/PZV/PYV", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
SND_PCI_QUIRK(0x1043, 0x1533, "ASUS GV302XA/XJ/XQ/XU/XV/XI", ALC287_FIXUP_CS35L41_I2C_2),
@@ -10843,6 +10889,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1c43, "ASUS UX8406MA", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x1c62, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x1c63, "ASUS GU605M", ALC285_FIXUP_ASUS_GU605_SPI_SPEAKER2_TO_DAC1),
+ SND_PCI_QUIRK(0x1043, 0x1c80, "ASUS VivoBook TP401", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS),
SND_PCI_QUIRK(0x1043, 0x1c9f, "ASUS G614JU/JV/JI", ALC285_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x1caf, "ASUS G634JY/JZ/JI/JG", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
diff --git a/sound/pci/hda/tas2781_hda_i2c.c b/sound/pci/hda/tas2781_hda_i2c.c
index 9ed49b0dbe6b..29dc4f500580 100644
--- a/sound/pci/hda/tas2781_hda_i2c.c
+++ b/sound/pci/hda/tas2781_hda_i2c.c
@@ -558,28 +558,38 @@ static int tas2563_save_calibration(struct tasdevice_priv *tas_priv)
static void tas2781_apply_calib(struct tasdevice_priv *tas_priv)
{
- static const unsigned char page_array[CALIB_MAX] = {
- 0x17, 0x18, 0x18, 0x13, 0x18,
+ struct calidata *cali_data = &tas_priv->cali_data;
+ struct cali_reg *r = &cali_data->cali_reg_array;
+ unsigned int cali_reg[CALIB_MAX] = {
+ TASDEVICE_REG(0, 0x17, 0x74),
+ TASDEVICE_REG(0, 0x18, 0x0c),
+ TASDEVICE_REG(0, 0x18, 0x14),
+ TASDEVICE_REG(0, 0x13, 0x70),
+ TASDEVICE_REG(0, 0x18, 0x7c),
};
- static const unsigned char rgno_array[CALIB_MAX] = {
- 0x74, 0x0c, 0x14, 0x70, 0x7c,
- };
- int offset = 0;
int i, j, rc;
+ int oft = 0;
__be32 data;
+ if (tas_priv->dspbin_typ != TASDEV_BASIC) {
+ cali_reg[0] = r->r0_reg;
+ cali_reg[1] = r->invr0_reg;
+ cali_reg[2] = r->r0_low_reg;
+ cali_reg[3] = r->pow_reg;
+ cali_reg[4] = r->tlimit_reg;
+ }
+
for (i = 0; i < tas_priv->ndev; i++) {
for (j = 0; j < CALIB_MAX; j++) {
data = cpu_to_be32(
- *(uint32_t *)&tas_priv->cali_data.data[offset]);
+ *(uint32_t *)&tas_priv->cali_data.data[oft]);
rc = tasdevice_dev_bulk_write(tas_priv, i,
- TASDEVICE_REG(0, page_array[j], rgno_array[j]),
- (unsigned char *)&data, 4);
+ cali_reg[j], (unsigned char *)&data, 4);
if (rc < 0)
dev_err(tas_priv->dev,
"chn %d calib %d bulk_wr err = %d\n",
i, j, rc);
- offset += 4;
+ oft += 4;
}
}
}
diff --git a/sound/soc/codecs/rt5665.c b/sound/soc/codecs/rt5665.c
index e0d1991cffdb..bcb6d7c6f301 100644
--- a/sound/soc/codecs/rt5665.c
+++ b/sound/soc/codecs/rt5665.c
@@ -31,9 +31,7 @@
#include "rl6231.h"
#include "rt5665.h"
-#define RT5665_NUM_SUPPLIES 3
-
-static const char *rt5665_supply_names[RT5665_NUM_SUPPLIES] = {
+static const char * const rt5665_supply_names[] = {
"AVDD",
"MICVDD",
"VBAT",
@@ -46,7 +44,6 @@ struct rt5665_priv {
struct gpio_desc *gpiod_ldo1_en;
struct gpio_desc *gpiod_reset;
struct snd_soc_jack *hs_jack;
- struct regulator_bulk_data supplies[RT5665_NUM_SUPPLIES];
struct delayed_work jack_detect_work;
struct delayed_work calibrate_work;
struct delayed_work jd_check_work;
@@ -4471,8 +4468,6 @@ static void rt5665_remove(struct snd_soc_component *component)
struct rt5665_priv *rt5665 = snd_soc_component_get_drvdata(component);
regmap_write(rt5665->regmap, RT5665_RESET, 0);
-
- regulator_bulk_disable(ARRAY_SIZE(rt5665->supplies), rt5665->supplies);
}
#ifdef CONFIG_PM
@@ -4758,7 +4753,7 @@ static int rt5665_i2c_probe(struct i2c_client *i2c)
{
struct rt5665_platform_data *pdata = dev_get_platdata(&i2c->dev);
struct rt5665_priv *rt5665;
- int i, ret;
+ int ret;
unsigned int val;
rt5665 = devm_kzalloc(&i2c->dev, sizeof(struct rt5665_priv),
@@ -4774,24 +4769,13 @@ static int rt5665_i2c_probe(struct i2c_client *i2c)
else
rt5665_parse_dt(rt5665, &i2c->dev);
- for (i = 0; i < ARRAY_SIZE(rt5665->supplies); i++)
- rt5665->supplies[i].supply = rt5665_supply_names[i];
-
- ret = devm_regulator_bulk_get(&i2c->dev, ARRAY_SIZE(rt5665->supplies),
- rt5665->supplies);
+ ret = devm_regulator_bulk_get_enable(&i2c->dev, ARRAY_SIZE(rt5665_supply_names),
+ rt5665_supply_names);
if (ret != 0) {
dev_err(&i2c->dev, "Failed to request supplies: %d\n", ret);
return ret;
}
- ret = regulator_bulk_enable(ARRAY_SIZE(rt5665->supplies),
- rt5665->supplies);
- if (ret != 0) {
- dev_err(&i2c->dev, "Failed to enable supplies: %d\n", ret);
- return ret;
- }
-
-
rt5665->gpiod_ldo1_en = devm_gpiod_get_optional(&i2c->dev,
"realtek,ldo1-en",
GPIOD_OUT_HIGH);
diff --git a/sound/soc/codecs/sma1307.c b/sound/soc/codecs/sma1307.c
index f5c303d4bb62..498189ab691c 100644
--- a/sound/soc/codecs/sma1307.c
+++ b/sound/soc/codecs/sma1307.c
@@ -1705,7 +1705,7 @@ static void sma1307_check_fault_worker(struct work_struct *work)
static void sma1307_setting_loaded(struct sma1307_priv *sma1307, const char *file)
{
const struct firmware *fw;
- int *data, size, offset, num_mode;
+ int size, offset, num_mode;
int ret;
ret = request_firmware(&fw, file, sma1307->dev);
@@ -1722,7 +1722,7 @@ static void sma1307_setting_loaded(struct sma1307_priv *sma1307, const char *fil
return;
}
- data = kzalloc(fw->size, GFP_KERNEL);
+ int *data __free(kfree) = kzalloc(fw->size, GFP_KERNEL);
if (!data) {
release_firmware(fw);
sma1307->set.status = false;
@@ -1742,7 +1742,6 @@ static void sma1307_setting_loaded(struct sma1307_priv *sma1307, const char *fil
sma1307->set.header_size,
GFP_KERNEL);
if (!sma1307->set.header) {
- kfree(data);
sma1307->set.status = false;
return;
}
@@ -1763,8 +1762,6 @@ static void sma1307_setting_loaded(struct sma1307_priv *sma1307, const char *fil
= devm_kzalloc(sma1307->dev,
sma1307->set.def_size * sizeof(int), GFP_KERNEL);
if (!sma1307->set.def) {
- kfree(data);
- kfree(sma1307->set.header);
sma1307->set.status = false;
return;
}
@@ -1782,9 +1779,6 @@ static void sma1307_setting_loaded(struct sma1307_priv *sma1307, const char *fil
sma1307->set.mode_size * 2 * sizeof(int),
GFP_KERNEL);
if (!sma1307->set.mode_set[i]) {
- kfree(data);
- kfree(sma1307->set.header);
- kfree(sma1307->set.def);
for (int j = 0; j < i; j++)
kfree(sma1307->set.mode_set[j]);
sma1307->set.status = false;
@@ -1799,7 +1793,6 @@ static void sma1307_setting_loaded(struct sma1307_priv *sma1307, const char *fil
}
}
- kfree(data);
sma1307->set.status = true;
}
diff --git a/sound/soc/codecs/wcd934x.c b/sound/soc/codecs/wcd934x.c
index dd0cda394bf1..fa69817c97ea 100644
--- a/sound/soc/codecs/wcd934x.c
+++ b/sound/soc/codecs/wcd934x.c
@@ -2263,7 +2263,7 @@ static irqreturn_t wcd934x_slim_irq_handler(int irq, void *data)
{
struct wcd934x_codec *wcd = data;
unsigned long status = 0;
- int i, j, port_id;
+ unsigned int i, j, port_id;
unsigned int val, int_val = 0;
irqreturn_t ret = IRQ_NONE;
bool tx;
diff --git a/sound/soc/codecs/wsa883x.c b/sound/soc/codecs/wsa883x.c
index d259e1d4d83d..1c9df7c061bd 100644
--- a/sound/soc/codecs/wsa883x.c
+++ b/sound/soc/codecs/wsa883x.c
@@ -568,7 +568,7 @@ static const struct sdw_port_config wsa883x_pconfig[WSA883X_MAX_SWR_PORTS] = {
},
[WSA883X_PORT_VISENSE] = {
.num = WSA883X_PORT_VISENSE + 1,
- .ch_mask = 0x3,
+ .ch_mask = 0x1,
},
};
diff --git a/sound/soc/codecs/wsa884x.c b/sound/soc/codecs/wsa884x.c
index 8051483aa1ac..daada1a2a34c 100644
--- a/sound/soc/codecs/wsa884x.c
+++ b/sound/soc/codecs/wsa884x.c
@@ -891,7 +891,7 @@ static const struct sdw_port_config wsa884x_pconfig[WSA884X_MAX_SWR_PORTS] = {
},
[WSA884X_PORT_VISENSE] = {
.num = WSA884X_PORT_VISENSE + 1,
- .ch_mask = 0x3,
+ .ch_mask = 0x1,
},
[WSA884X_PORT_CPS] = {
.num = WSA884X_PORT_CPS + 1,
diff --git a/sound/soc/fsl/imx-card.c b/sound/soc/fsl/imx-card.c
index 905294682996..3686d468506b 100644
--- a/sound/soc/fsl/imx-card.c
+++ b/sound/soc/fsl/imx-card.c
@@ -772,6 +772,8 @@ static int imx_card_probe(struct platform_device *pdev)
data->dapm_routes[i].sink =
devm_kasprintf(&pdev->dev, GFP_KERNEL, "%d %s",
i + 1, "Playback");
+ if (!data->dapm_routes[i].sink)
+ return -ENOMEM;
data->dapm_routes[i].source = "CPU-Playback";
}
}
@@ -789,6 +791,8 @@ static int imx_card_probe(struct platform_device *pdev)
data->dapm_routes[i].source =
devm_kasprintf(&pdev->dev, GFP_KERNEL, "%d %s",
i + 1, "Capture");
+ if (!data->dapm_routes[i].source)
+ return -ENOMEM;
data->dapm_routes[i].sink = "CPU-Capture";
}
}
diff --git a/sound/soc/qcom/qdsp6/q6apm-dai.c b/sound/soc/qcom/qdsp6/q6apm-dai.c
index c9404b5934c7..2cd522108221 100644
--- a/sound/soc/qcom/qdsp6/q6apm-dai.c
+++ b/sound/soc/qcom/qdsp6/q6apm-dai.c
@@ -24,8 +24,8 @@
#define PLAYBACK_MIN_PERIOD_SIZE 128
#define CAPTURE_MIN_NUM_PERIODS 2
#define CAPTURE_MAX_NUM_PERIODS 8
-#define CAPTURE_MAX_PERIOD_SIZE 4096
-#define CAPTURE_MIN_PERIOD_SIZE 320
+#define CAPTURE_MAX_PERIOD_SIZE 65536
+#define CAPTURE_MIN_PERIOD_SIZE 6144
#define BUFFER_BYTES_MAX (PLAYBACK_MAX_NUM_PERIODS * PLAYBACK_MAX_PERIOD_SIZE)
#define BUFFER_BYTES_MIN (PLAYBACK_MIN_NUM_PERIODS * PLAYBACK_MIN_PERIOD_SIZE)
#define COMPR_PLAYBACK_MAX_FRAGMENT_SIZE (128 * 1024)
@@ -64,12 +64,12 @@ struct q6apm_dai_rtd {
phys_addr_t phys;
unsigned int pcm_size;
unsigned int pcm_count;
- unsigned int pos; /* Buffer position */
unsigned int periods;
unsigned int bytes_sent;
unsigned int bytes_received;
unsigned int copied_total;
uint16_t bits_per_sample;
+ snd_pcm_uframes_t queue_ptr;
bool next_track;
enum stream_state state;
struct q6apm_graph *graph;
@@ -123,25 +123,16 @@ static void event_handler(uint32_t opcode, uint32_t token, void *payload, void *
{
struct q6apm_dai_rtd *prtd = priv;
struct snd_pcm_substream *substream = prtd->substream;
- unsigned long flags;
switch (opcode) {
case APM_CLIENT_EVENT_CMD_EOS_DONE:
prtd->state = Q6APM_STREAM_STOPPED;
break;
case APM_CLIENT_EVENT_DATA_WRITE_DONE:
- spin_lock_irqsave(&prtd->lock, flags);
- prtd->pos += prtd->pcm_count;
- spin_unlock_irqrestore(&prtd->lock, flags);
snd_pcm_period_elapsed(substream);
- if (prtd->state == Q6APM_STREAM_RUNNING)
- q6apm_write_async(prtd->graph, prtd->pcm_count, 0, 0, 0);
break;
case APM_CLIENT_EVENT_DATA_READ_DONE:
- spin_lock_irqsave(&prtd->lock, flags);
- prtd->pos += prtd->pcm_count;
- spin_unlock_irqrestore(&prtd->lock, flags);
snd_pcm_period_elapsed(substream);
if (prtd->state == Q6APM_STREAM_RUNNING)
q6apm_read(prtd->graph);
@@ -248,7 +239,6 @@ static int q6apm_dai_prepare(struct snd_soc_component *component,
}
prtd->pcm_count = snd_pcm_lib_period_bytes(substream);
- prtd->pos = 0;
/* rate and channels are sent to audio driver */
ret = q6apm_graph_media_format_shmem(prtd->graph, &cfg);
if (ret < 0) {
@@ -294,6 +284,27 @@ static int q6apm_dai_prepare(struct snd_soc_component *component,
return 0;
}
+static int q6apm_dai_ack(struct snd_soc_component *component, struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct q6apm_dai_rtd *prtd = runtime->private_data;
+ int i, ret = 0, avail_periods;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ avail_periods = (runtime->control->appl_ptr - prtd->queue_ptr)/runtime->period_size;
+ for (i = 0; i < avail_periods; i++) {
+ ret = q6apm_write_async(prtd->graph, prtd->pcm_count, 0, 0, NO_TIMESTAMP);
+ if (ret < 0) {
+ dev_err(component->dev, "Error queuing playback buffer %d\n", ret);
+ return ret;
+ }
+ prtd->queue_ptr += runtime->period_size;
+ }
+ }
+
+ return ret;
+}
+
static int q6apm_dai_trigger(struct snd_soc_component *component,
struct snd_pcm_substream *substream, int cmd)
{
@@ -305,9 +316,6 @@ static int q6apm_dai_trigger(struct snd_soc_component *component,
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- /* start writing buffers for playback only as we already queued capture buffers */
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- ret = q6apm_write_async(prtd->graph, prtd->pcm_count, 0, 0, 0);
break;
case SNDRV_PCM_TRIGGER_STOP:
/* TODO support be handled via SoftPause Module */
@@ -377,13 +385,14 @@ static int q6apm_dai_open(struct snd_soc_component *component,
}
}
- ret = snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 32);
+ /* setup 10ms latency to accommodate DSP restrictions */
+ ret = snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 480);
if (ret < 0) {
dev_err(dev, "constraint for period bytes step ret = %d\n", ret);
goto err;
}
- ret = snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 32);
+ ret = snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 480);
if (ret < 0) {
dev_err(dev, "constraint for buffer bytes step ret = %d\n", ret);
goto err;
@@ -428,16 +437,12 @@ static snd_pcm_uframes_t q6apm_dai_pointer(struct snd_soc_component *component,
struct snd_pcm_runtime *runtime = substream->runtime;
struct q6apm_dai_rtd *prtd = runtime->private_data;
snd_pcm_uframes_t ptr;
- unsigned long flags;
- spin_lock_irqsave(&prtd->lock, flags);
- if (prtd->pos == prtd->pcm_size)
- prtd->pos = 0;
-
- ptr = bytes_to_frames(runtime, prtd->pos);
- spin_unlock_irqrestore(&prtd->lock, flags);
+ ptr = q6apm_get_hw_pointer(prtd->graph, substream->stream) * runtime->period_size;
+ if (ptr)
+ return ptr - 1;
- return ptr;
+ return 0;
}
static int q6apm_dai_hw_params(struct snd_soc_component *component,
@@ -652,8 +657,6 @@ static int q6apm_dai_compr_set_params(struct snd_soc_component *component,
prtd->pcm_size = runtime->fragments * runtime->fragment_size;
prtd->bits_per_sample = 16;
- prtd->pos = 0;
-
if (prtd->next_track != true) {
memcpy(&prtd->codec, codec, sizeof(*codec));
@@ -836,6 +839,7 @@ static const struct snd_soc_component_driver q6apm_fe_dai_component = {
.hw_params = q6apm_dai_hw_params,
.pointer = q6apm_dai_pointer,
.trigger = q6apm_dai_trigger,
+ .ack = q6apm_dai_ack,
.compress_ops = &q6apm_dai_compress_ops,
.use_dai_pcm_id = true,
};
diff --git a/sound/soc/qcom/qdsp6/q6apm.c b/sound/soc/qcom/qdsp6/q6apm.c
index 11e252a70f69..b4ffa0f0b188 100644
--- a/sound/soc/qcom/qdsp6/q6apm.c
+++ b/sound/soc/qcom/qdsp6/q6apm.c
@@ -494,6 +494,19 @@ int q6apm_read(struct q6apm_graph *graph)
}
EXPORT_SYMBOL_GPL(q6apm_read);
+int q6apm_get_hw_pointer(struct q6apm_graph *graph, int dir)
+{
+ struct audioreach_graph_data *data;
+
+ if (dir == SNDRV_PCM_STREAM_PLAYBACK)
+ data = &graph->rx_data;
+ else
+ data = &graph->tx_data;
+
+ return (int)atomic_read(&data->hw_ptr);
+}
+EXPORT_SYMBOL_GPL(q6apm_get_hw_pointer);
+
static int graph_callback(struct gpr_resp_pkt *data, void *priv, int op)
{
struct data_cmd_rsp_rd_sh_mem_ep_data_buffer_done_v2 *rd_done;
@@ -520,7 +533,8 @@ static int graph_callback(struct gpr_resp_pkt *data, void *priv, int op)
done = data->payload;
phys = graph->rx_data.buf[token].phys;
mutex_unlock(&graph->lock);
-
+ /* token numbering starts at 0 */
+ atomic_set(&graph->rx_data.hw_ptr, token + 1);
if (lower_32_bits(phys) == done->buf_addr_lsw &&
upper_32_bits(phys) == done->buf_addr_msw) {
graph->result.opcode = hdr->opcode;
@@ -553,6 +567,8 @@ static int graph_callback(struct gpr_resp_pkt *data, void *priv, int op)
rd_done = data->payload;
phys = graph->tx_data.buf[hdr->token].phys;
mutex_unlock(&graph->lock);
+ /* token numbering starts at 0 */
+ atomic_set(&graph->tx_data.hw_ptr, hdr->token + 1);
if (upper_32_bits(phys) == rd_done->buf_addr_msw &&
lower_32_bits(phys) == rd_done->buf_addr_lsw) {
diff --git a/sound/soc/qcom/qdsp6/q6apm.h b/sound/soc/qcom/qdsp6/q6apm.h
index c248c8d2b1ab..7ce08b401e31 100644
--- a/sound/soc/qcom/qdsp6/q6apm.h
+++ b/sound/soc/qcom/qdsp6/q6apm.h
@@ -2,6 +2,7 @@
#ifndef __Q6APM_H__
#define __Q6APM_H__
#include <linux/types.h>
+#include <linux/atomic.h>
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/kernel.h>
@@ -77,6 +78,7 @@ struct audioreach_graph_data {
uint32_t num_periods;
uint32_t dsp_buf;
uint32_t mem_map_handle;
+ atomic_t hw_ptr;
};
struct audioreach_graph {
@@ -150,4 +152,5 @@ int q6apm_enable_compress_module(struct device *dev, struct q6apm_graph *graph,
int q6apm_remove_initial_silence(struct device *dev, struct q6apm_graph *graph, uint32_t samples);
int q6apm_remove_trailing_silence(struct device *dev, struct q6apm_graph *graph, uint32_t samples);
int q6apm_set_real_module_id(struct device *dev, struct q6apm_graph *graph, uint32_t codec_id);
+int q6apm_get_hw_pointer(struct q6apm_graph *graph, int dir);
#endif /* __APM_GRAPH_ */
diff --git a/sound/soc/qcom/qdsp6/q6asm-dai.c b/sound/soc/qcom/qdsp6/q6asm-dai.c
index 045100c94352..a400c9a31fea 100644
--- a/sound/soc/qcom/qdsp6/q6asm-dai.c
+++ b/sound/soc/qcom/qdsp6/q6asm-dai.c
@@ -892,9 +892,7 @@ static int q6asm_dai_compr_set_params(struct snd_soc_component *component,
if (ret < 0) {
dev_err(dev, "q6asm_open_write failed\n");
- q6asm_audio_client_free(prtd->audio_client);
- prtd->audio_client = NULL;
- return ret;
+ goto open_err;
}
}
@@ -903,7 +901,7 @@ static int q6asm_dai_compr_set_params(struct snd_soc_component *component,
prtd->session_id, dir);
if (ret) {
dev_err(dev, "Stream reg failed ret:%d\n", ret);
- return ret;
+ goto q6_err;
}
ret = __q6asm_dai_compr_set_codec_params(component, stream,
@@ -911,7 +909,7 @@ static int q6asm_dai_compr_set_params(struct snd_soc_component *component,
prtd->stream_id);
if (ret) {
dev_err(dev, "codec param setup failed ret:%d\n", ret);
- return ret;
+ goto q6_err;
}
ret = q6asm_map_memory_regions(dir, prtd->audio_client, prtd->phys,
@@ -920,12 +918,21 @@ static int q6asm_dai_compr_set_params(struct snd_soc_component *component,
if (ret < 0) {
dev_err(dev, "Buffer Mapping failed ret:%d\n", ret);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto q6_err;
}
prtd->state = Q6ASM_STREAM_RUNNING;
return 0;
+
+q6_err:
+ q6asm_cmd(prtd->audio_client, prtd->stream_id, CMD_CLOSE);
+
+open_err:
+ q6asm_audio_client_free(prtd->audio_client);
+ prtd->audio_client = NULL;
+ return ret;
}
static int q6asm_dai_compr_set_metadata(struct snd_soc_component *component,
diff --git a/sound/soc/sof/intel/hda-dsp.c b/sound/soc/sof/intel/hda-dsp.c
index ccf8eefdca70..f64e8a6a9a33 100644
--- a/sound/soc/sof/intel/hda-dsp.c
+++ b/sound/soc/sof/intel/hda-dsp.c
@@ -991,6 +991,10 @@ int hda_dsp_runtime_suspend(struct snd_sof_dev *sdev)
if (!sdev->dspless_mode_selected) {
/* cancel any attempt for DSP D0I3 */
cancel_delayed_work_sync(&hda->d0i3_work);
+
+ /* Cancel the microphone privacy work if mic privacy is active */
+ if (hda->mic_privacy.active)
+ cancel_work_sync(&hda->mic_privacy.work);
}
/* stop hda controller and power dsp off */
@@ -1017,6 +1021,10 @@ int hda_dsp_suspend(struct snd_sof_dev *sdev, u32 target_state)
if (!sdev->dspless_mode_selected) {
/* cancel any attempt for DSP D0I3 */
cancel_delayed_work_sync(&hda->d0i3_work);
+
+ /* Cancel the microphone privacy work if mic privacy is active */
+ if (hda->mic_privacy.active)
+ cancel_work_sync(&hda->mic_privacy.work);
}
if (target_state == SOF_DSP_PM_D0) {
diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
index 6b1ada566476..b34e5fdf10f1 100644
--- a/sound/soc/sof/intel/hda.c
+++ b/sound/soc/sof/intel/hda.c
@@ -968,6 +968,10 @@ void hda_dsp_remove(struct snd_sof_dev *sdev)
if (sdev->dspless_mode_selected)
goto skip_disable_dsp;
+ /* Cancel the microphone privacy work if mic privacy is active */
+ if (hda->mic_privacy.active)
+ cancel_work_sync(&hda->mic_privacy.work);
+
/* no need to check for error as the DSP will be disabled anyway */
if (chip && chip->power_down_dsp)
chip->power_down_dsp(sdev);
diff --git a/sound/soc/sof/intel/hda.h b/sound/soc/sof/intel/hda.h
index 76154627fc17..108cad04879e 100644
--- a/sound/soc/sof/intel/hda.h
+++ b/sound/soc/sof/intel/hda.h
@@ -487,6 +487,11 @@ enum sof_hda_D0_substate {
SOF_HDA_DSP_PM_D0I3, /* low power D0 substate */
};
+struct sof_ace3_mic_privacy {
+ bool active;
+ struct work_struct work;
+};
+
/* represents DSP HDA controller frontend - i.e. host facing control */
struct sof_intel_hda_dev {
bool imrboot_supported;
@@ -542,6 +547,9 @@ struct sof_intel_hda_dev {
/* Intel NHLT information */
struct nhlt_acpi_table *nhlt;
+ /* work queue for mic privacy state change notification sending */
+ struct sof_ace3_mic_privacy mic_privacy;
+
/*
* Pointing to the IPC message if immediate sending was not possible
* because the downlink communication channel was BUSY at the time.
diff --git a/sound/soc/sof/intel/ptl.c b/sound/soc/sof/intel/ptl.c
index 8fa4bdceedd9..aa0b772178bc 100644
--- a/sound/soc/sof/intel/ptl.c
+++ b/sound/soc/sof/intel/ptl.c
@@ -27,22 +27,44 @@ static bool sof_ptl_check_mic_privacy_irq(struct snd_sof_dev *sdev, bool alt,
return hdac_bus_eml_is_mic_privacy_changed(sof_to_bus(sdev), alt, elid);
}
+static void sof_ptl_mic_privacy_work(struct work_struct *work)
+{
+ struct sof_intel_hda_dev *hdev = container_of(work,
+ struct sof_intel_hda_dev,
+ mic_privacy.work);
+ struct hdac_bus *bus = &hdev->hbus.core;
+ struct snd_sof_dev *sdev = dev_get_drvdata(bus->dev);
+ bool state;
+
+ /*
+ * The microphone privacy state is only available via Soundwire shim
+ * in PTL
+ * The work is only scheduled on change.
+ */
+ state = hdac_bus_eml_get_mic_privacy_state(bus, 1,
+ AZX_REG_ML_LEPTR_ID_SDW);
+ sof_ipc4_mic_privacy_state_change(sdev, state);
+}
+
static void sof_ptl_process_mic_privacy(struct snd_sof_dev *sdev, bool alt,
int elid)
{
- bool state;
+ struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
if (!alt || elid != AZX_REG_ML_LEPTR_ID_SDW)
return;
- state = hdac_bus_eml_get_mic_privacy_state(sof_to_bus(sdev), alt, elid);
-
- sof_ipc4_mic_privacy_state_change(sdev, state);
+ /*
+ * Schedule the work to read the microphone privacy state and send IPC
+ * message about the new state to the firmware
+ */
+ schedule_work(&hdev->mic_privacy.work);
}
static void sof_ptl_set_mic_privacy(struct snd_sof_dev *sdev,
struct sof_ipc4_intel_mic_privacy_cap *caps)
{
+ struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
u32 micpvcp;
if (!caps || !caps->capabilities_length)
@@ -58,6 +80,9 @@ static void sof_ptl_set_mic_privacy(struct snd_sof_dev *sdev,
hdac_bus_eml_set_mic_privacy_mask(sof_to_bus(sdev), true,
AZX_REG_ML_LEPTR_ID_SDW,
PTL_MICPVCP_GET_SDW_MASK(micpvcp));
+
+ INIT_WORK(&hdev->mic_privacy.work, sof_ptl_mic_privacy_work);
+ hdev->mic_privacy.active = true;
}
int sof_ptl_set_ops(struct snd_sof_dev *sdev, struct snd_sof_dsp_ops *dsp_ops)
diff --git a/tools/objtool/Documentation/objtool.txt b/tools/objtool/Documentation/objtool.txt
index 28ac57b9e102..9e97fc25b2d8 100644
--- a/tools/objtool/Documentation/objtool.txt
+++ b/tools/objtool/Documentation/objtool.txt
@@ -34,7 +34,7 @@ Objtool has the following features:
- Return thunk annotation -- annotates all return thunk sites so kernel
can patch them inline, depending on enabled mitigations
-- Return thunk training valiation -- validate that all entry paths
+- Return thunk untraining validation -- validate that all entry paths
untrain a "safe return" before the first return (or call)
- Non-instrumentation validation -- validates non-instrumentable
@@ -281,8 +281,8 @@ the objtool maintainers.
If the error is for an asm file, and func() is indeed a callable
function, add proper frame pointer logic using the FRAME_BEGIN and
FRAME_END macros. Otherwise, if it's not a callable function, remove
- its ELF function annotation by changing ENDPROC to END, and instead
- use the manual unwind hint macros in asm/unwind_hints.h.
+ its ELF function annotation by using SYM_CODE_{START,END} and use the
+ manual unwind hint macros in asm/unwind_hints.h.
If it's a GCC-compiled .c file, the error may be because the function
uses an inline asm() statement which has a "call" instruction. An
@@ -352,7 +352,7 @@ the objtool maintainers.
This is a kernel entry/exit instruction like sysenter or iret. Such
instructions aren't allowed in a callable function, and are most
likely part of the kernel entry code. Such code should probably be
- placed in a SYM_FUNC_CODE block with unwind hints.
+ placed in a SYM_CODE_{START,END} block with unwind hints.
6. file.o: warning: objtool: func()+0x26: sibling call from callable instruction with modified stack frame
@@ -381,7 +381,7 @@ the objtool maintainers.
Another possibility is that the code has some asm or inline asm which
does some unusual things to the stack or the frame pointer. In such
- cases it's probably appropriate to use SYM_FUNC_CODE with unwind
+ cases it's probably appropriate to use SYM_CODE_{START,END} with unwind
hints.
diff --git a/tools/objtool/arch/loongarch/decode.c b/tools/objtool/arch/loongarch/decode.c
index 02e490555966..b6fdc68053cc 100644
--- a/tools/objtool/arch/loongarch/decode.c
+++ b/tools/objtool/arch/loongarch/decode.c
@@ -63,7 +63,7 @@ static bool is_loongarch(const struct elf *elf)
if (elf->ehdr.e_machine == EM_LOONGARCH)
return true;
- WARN("unexpected ELF machine type %d", elf->ehdr.e_machine);
+ ERROR("unexpected ELF machine type %d", elf->ehdr.e_machine);
return false;
}
@@ -327,8 +327,10 @@ const char *arch_nop_insn(int len)
{
static u32 nop;
- if (len != LOONGARCH_INSN_SIZE)
- WARN("invalid NOP size: %d\n", len);
+ if (len != LOONGARCH_INSN_SIZE) {
+ ERROR("invalid NOP size: %d\n", len);
+ return NULL;
+ }
nop = LOONGARCH_INSN_NOP;
@@ -339,8 +341,10 @@ const char *arch_ret_insn(int len)
{
static u32 ret;
- if (len != LOONGARCH_INSN_SIZE)
- WARN("invalid RET size: %d\n", len);
+ if (len != LOONGARCH_INSN_SIZE) {
+ ERROR("invalid RET size: %d\n", len);
+ return NULL;
+ }
emit_jirl((union loongarch_instruction *)&ret, LOONGARCH_GPR_RA, LOONGARCH_GPR_ZERO, 0);
diff --git a/tools/objtool/arch/loongarch/orc.c b/tools/objtool/arch/loongarch/orc.c
index 873536d009d9..b58c5ff443c9 100644
--- a/tools/objtool/arch/loongarch/orc.c
+++ b/tools/objtool/arch/loongarch/orc.c
@@ -41,7 +41,7 @@ int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, struct instruct
orc->type = ORC_TYPE_REGS_PARTIAL;
break;
default:
- WARN_INSN(insn, "unknown unwind hint type %d", cfi->type);
+ ERROR_INSN(insn, "unknown unwind hint type %d", cfi->type);
return -1;
}
@@ -55,7 +55,7 @@ int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, struct instruct
orc->sp_reg = ORC_REG_FP;
break;
default:
- WARN_INSN(insn, "unknown CFA base reg %d", cfi->cfa.base);
+ ERROR_INSN(insn, "unknown CFA base reg %d", cfi->cfa.base);
return -1;
}
@@ -72,7 +72,7 @@ int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, struct instruct
orc->fp_reg = ORC_REG_FP;
break;
default:
- WARN_INSN(insn, "unknown FP base reg %d", fp->base);
+ ERROR_INSN(insn, "unknown FP base reg %d", fp->base);
return -1;
}
@@ -89,7 +89,7 @@ int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, struct instruct
orc->ra_reg = ORC_REG_FP;
break;
default:
- WARN_INSN(insn, "unknown RA base reg %d", ra->base);
+ ERROR_INSN(insn, "unknown RA base reg %d", ra->base);
return -1;
}
diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
index 7567c893f45e..33d861c04ebd 100644
--- a/tools/objtool/arch/x86/decode.c
+++ b/tools/objtool/arch/x86/decode.c
@@ -36,7 +36,7 @@ static int is_x86_64(const struct elf *elf)
case EM_386:
return 0;
default:
- WARN("unexpected ELF machine type %d", elf->ehdr.e_machine);
+ ERROR("unexpected ELF machine type %d", elf->ehdr.e_machine);
return -1;
}
}
@@ -173,7 +173,7 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec
ret = insn_decode(&ins, sec->data->d_buf + offset, maxlen,
x86_64 ? INSN_MODE_64 : INSN_MODE_32);
if (ret < 0) {
- WARN("can't decode instruction at %s:0x%lx", sec->name, offset);
+ ERROR("can't decode instruction at %s:0x%lx", sec->name, offset);
return -1;
}
@@ -321,7 +321,7 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec
break;
default:
- /* WARN ? */
+ /* ERROR ? */
break;
}
@@ -561,8 +561,7 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec
if (ins.prefixes.nbytes == 1 &&
ins.prefixes.bytes[0] == 0xf2) {
/* ENQCMD cannot be used in the kernel. */
- WARN("ENQCMD instruction at %s:%lx", sec->name,
- offset);
+ WARN("ENQCMD instruction at %s:%lx", sec->name, offset);
}
} else if (op2 == 0xa0 || op2 == 0xa8) {
@@ -646,7 +645,7 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec
if (disp->sym->type == STT_SECTION)
func = find_symbol_by_offset(disp->sym->sec, reloc_addend(disp));
if (!func) {
- WARN("no func for pv_ops[]");
+ ERROR("no func for pv_ops[]");
return -1;
}
@@ -776,7 +775,7 @@ const char *arch_nop_insn(int len)
};
if (len < 1 || len > 5) {
- WARN("invalid NOP size: %d\n", len);
+ ERROR("invalid NOP size: %d\n", len);
return NULL;
}
@@ -796,7 +795,7 @@ const char *arch_ret_insn(int len)
};
if (len < 1 || len > 5) {
- WARN("invalid RET size: %d\n", len);
+ ERROR("invalid RET size: %d\n", len);
return NULL;
}
diff --git a/tools/objtool/arch/x86/orc.c b/tools/objtool/arch/x86/orc.c
index b6cd943e87f9..7176b9ec5b05 100644
--- a/tools/objtool/arch/x86/orc.c
+++ b/tools/objtool/arch/x86/orc.c
@@ -40,7 +40,7 @@ int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, struct instruct
orc->type = ORC_TYPE_REGS_PARTIAL;
break;
default:
- WARN_INSN(insn, "unknown unwind hint type %d", cfi->type);
+ ERROR_INSN(insn, "unknown unwind hint type %d", cfi->type);
return -1;
}
@@ -72,7 +72,7 @@ int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, struct instruct
orc->sp_reg = ORC_REG_DX;
break;
default:
- WARN_INSN(insn, "unknown CFA base reg %d", cfi->cfa.base);
+ ERROR_INSN(insn, "unknown CFA base reg %d", cfi->cfa.base);
return -1;
}
@@ -87,7 +87,7 @@ int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, struct instruct
orc->bp_reg = ORC_REG_BP;
break;
default:
- WARN_INSN(insn, "unknown BP base reg %d", bp->base);
+ ERROR_INSN(insn, "unknown BP base reg %d", bp->base);
return -1;
}
diff --git a/tools/objtool/arch/x86/special.c b/tools/objtool/arch/x86/special.c
index 9c1c9df09aaa..403e587676f1 100644
--- a/tools/objtool/arch/x86/special.c
+++ b/tools/objtool/arch/x86/special.c
@@ -3,11 +3,9 @@
#include <objtool/special.h>
#include <objtool/builtin.h>
+#include <objtool/warn.h>
-#define X86_FEATURE_POPCNT (4 * 32 + 23)
-#define X86_FEATURE_SMAP (9 * 32 + 20)
-
-void arch_handle_alternative(unsigned short feature, struct special_alt *alt)
+void arch_handle_alternative(struct special_alt *alt)
{
static struct special_alt *group, *prev;
@@ -31,34 +29,6 @@ void arch_handle_alternative(unsigned short feature, struct special_alt *alt)
} else group = alt;
prev = alt;
-
- switch (feature) {
- case X86_FEATURE_SMAP:
- /*
- * If UACCESS validation is enabled; force that alternative;
- * otherwise force it the other way.
- *
- * What we want to avoid is having both the original and the
- * alternative code flow at the same time, in that case we can
- * find paths that see the STAC but take the NOP instead of
- * CLAC and the other way around.
- */
- if (opts.uaccess)
- alt->skip_orig = true;
- else
- alt->skip_alt = true;
- break;
- case X86_FEATURE_POPCNT:
- /*
- * It has been requested that we don't validate the !POPCNT
- * feature path which is a "very very small percentage of
- * machines".
- */
- alt->skip_orig = true;
- break;
- default:
- break;
- }
}
bool arch_support_alt_relocation(struct special_alt *special_alt,
@@ -156,8 +126,10 @@ struct reloc *arch_find_switch_table(struct objtool_file *file,
* indicates a rare GCC quirk/bug which can leave dead
* code behind.
*/
- if (reloc_type(text_reloc) == R_X86_64_PC32)
+ if (reloc_type(text_reloc) == R_X86_64_PC32) {
+ WARN_INSN(insn, "ignoring unreachables due to jump table quirk");
file->ignore_unreachables = true;
+ }
*table_size = 0;
return rodata_reloc;
diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c
index 5f761f420b8c..80239843e9f0 100644
--- a/tools/objtool/builtin-check.c
+++ b/tools/objtool/builtin-check.c
@@ -8,18 +8,18 @@
#include <stdlib.h>
#include <fcntl.h>
#include <unistd.h>
+#include <errno.h>
#include <sys/stat.h>
#include <sys/sendfile.h>
#include <objtool/builtin.h>
#include <objtool/objtool.h>
+#include <objtool/warn.h>
-#define ERROR(format, ...) \
- fprintf(stderr, \
- "error: objtool: " format "\n", \
- ##__VA_ARGS__)
+#define ORIG_SUFFIX ".orig"
+int orig_argc;
+static char **orig_argv;
const char *objname;
-
struct opts opts;
static const char * const check_usage[] = {
@@ -194,30 +194,30 @@ static int copy_file(const char *src, const char *dst)
src_fd = open(src, O_RDONLY);
if (src_fd == -1) {
- ERROR("can't open '%s' for reading", src);
+ ERROR("can't open %s for reading: %s", src, strerror(errno));
return 1;
}
dst_fd = open(dst, O_WRONLY | O_CREAT | O_TRUNC, 0400);
if (dst_fd == -1) {
- ERROR("can't open '%s' for writing", dst);
+ ERROR("can't open %s for writing: %s", dst, strerror(errno));
return 1;
}
if (fstat(src_fd, &stat) == -1) {
- perror("fstat");
+ ERROR_GLIBC("fstat");
return 1;
}
if (fchmod(dst_fd, stat.st_mode) == -1) {
- perror("fchmod");
+ ERROR_GLIBC("fchmod");
return 1;
}
for (to_copy = stat.st_size; to_copy > 0; to_copy -= copied) {
copied = sendfile(dst_fd, src_fd, &offset, to_copy);
if (copied == -1) {
- perror("sendfile");
+ ERROR_GLIBC("sendfile");
return 1;
}
}
@@ -227,39 +227,73 @@ static int copy_file(const char *src, const char *dst)
return 0;
}
-static char **save_argv(int argc, const char **argv)
+static void save_argv(int argc, const char **argv)
{
- char **orig_argv;
-
orig_argv = calloc(argc, sizeof(char *));
if (!orig_argv) {
- perror("calloc");
- return NULL;
+ ERROR_GLIBC("calloc");
+ exit(1);
}
for (int i = 0; i < argc; i++) {
orig_argv[i] = strdup(argv[i]);
if (!orig_argv[i]) {
- perror("strdup");
- return NULL;
+ ERROR_GLIBC("strdup(%s)", argv[i]);
+ exit(1);
}
};
-
- return orig_argv;
}
-#define ORIG_SUFFIX ".orig"
+void print_args(void)
+{
+ char *backup = NULL;
+
+ if (opts.output || opts.dryrun)
+ goto print;
+
+ /*
+ * Make a backup before kbuild deletes the file so the error
+ * can be recreated without recompiling or relinking.
+ */
+ backup = malloc(strlen(objname) + strlen(ORIG_SUFFIX) + 1);
+ if (!backup) {
+ ERROR_GLIBC("malloc");
+ goto print;
+ }
+
+ strcpy(backup, objname);
+ strcat(backup, ORIG_SUFFIX);
+ if (copy_file(objname, backup)) {
+ backup = NULL;
+ goto print;
+ }
+
+print:
+ /*
+ * Print the cmdline args to make it easier to recreate. If '--output'
+ * wasn't used, add it to the printed args with the backup as input.
+ */
+ fprintf(stderr, "%s", orig_argv[0]);
+
+ for (int i = 1; i < orig_argc; i++) {
+ char *arg = orig_argv[i];
+
+ if (backup && !strcmp(arg, objname))
+ fprintf(stderr, " %s -o %s", backup, objname);
+ else
+ fprintf(stderr, " %s", arg);
+ }
+
+ fprintf(stderr, "\n");
+}
int objtool_run(int argc, const char **argv)
{
struct objtool_file *file;
- char *backup = NULL;
- char **orig_argv;
int ret = 0;
- orig_argv = save_argv(argc, argv);
- if (!orig_argv)
- return 1;
+ orig_argc = argc;
+ save_argv(argc, argv);
cmd_parse_options(argc, argv, check_usage);
@@ -282,59 +316,19 @@ int objtool_run(int argc, const char **argv)
file = objtool_open_read(objname);
if (!file)
- goto err;
+ return 1;
if (!opts.link && has_multiple_files(file->elf)) {
ERROR("Linked object requires --link");
- goto err;
+ return 1;
}
ret = check(file);
if (ret)
- goto err;
+ return ret;
if (!opts.dryrun && file->elf->changed && elf_write(file->elf))
- goto err;
-
- return 0;
-
-err:
- if (opts.dryrun)
- goto err_msg;
-
- if (opts.output) {
- unlink(opts.output);
- goto err_msg;
- }
-
- /*
- * Make a backup before kbuild deletes the file so the error
- * can be recreated without recompiling or relinking.
- */
- backup = malloc(strlen(objname) + strlen(ORIG_SUFFIX) + 1);
- if (!backup) {
- perror("malloc");
- return 1;
- }
-
- strcpy(backup, objname);
- strcat(backup, ORIG_SUFFIX);
- if (copy_file(objname, backup))
return 1;
-err_msg:
- fprintf(stderr, "%s", orig_argv[0]);
-
- for (int i = 1; i < argc; i++) {
- char *arg = orig_argv[i];
-
- if (backup && !strcmp(arg, objname))
- fprintf(stderr, " %s -o %s", backup, objname);
- else
- fprintf(stderr, " %s", arg);
- }
-
- fprintf(stderr, "\n");
-
- return 1;
+ return 0;
}
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index ca3435acc326..4a1f6c3169b3 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -25,7 +25,6 @@
struct alternative {
struct alternative *next;
struct instruction *insn;
- bool skip_orig;
};
static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache;
@@ -341,12 +340,7 @@ static void init_insn_state(struct objtool_file *file, struct insn_state *state,
memset(state, 0, sizeof(*state));
init_cfi_state(&state->cfi);
- /*
- * We need the full vmlinux for noinstr validation, otherwise we can
- * not correctly determine insn_call_dest(insn)->sec (external symbols
- * do not have a section).
- */
- if (opts.link && opts.noinstr && sec)
+ if (opts.noinstr && sec)
state->noinstr = sec->noinstr;
}
@@ -354,7 +348,7 @@ static struct cfi_state *cfi_alloc(void)
{
struct cfi_state *cfi = calloc(1, sizeof(struct cfi_state));
if (!cfi) {
- WARN("calloc failed");
+ ERROR_GLIBC("calloc");
exit(1);
}
nr_cfi++;
@@ -410,7 +404,7 @@ static void *cfi_hash_alloc(unsigned long size)
PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_ANON, -1, 0);
if (cfi_hash == (void *)-1L) {
- WARN("mmap fail cfi_hash");
+ ERROR_GLIBC("mmap fail cfi_hash");
cfi_hash = NULL;
} else if (opts.stats) {
printf("cfi_bits: %d\n", cfi_bits);
@@ -466,7 +460,7 @@ static int decode_instructions(struct objtool_file *file)
if (!insns || idx == INSN_CHUNK_MAX) {
insns = calloc(sizeof(*insn), INSN_CHUNK_SIZE);
if (!insns) {
- WARN("malloc failed");
+ ERROR_GLIBC("calloc");
return -1;
}
idx = 0;
@@ -501,8 +495,6 @@ static int decode_instructions(struct objtool_file *file)
nr_insns++;
}
-// printf("%s: last chunk used: %d\n", sec->name, (int)idx);
-
sec_for_each_sym(sec, func) {
if (func->type != STT_NOTYPE && func->type != STT_FUNC)
continue;
@@ -511,8 +503,7 @@ static int decode_instructions(struct objtool_file *file)
/* Heuristic: likely an "end" symbol */
if (func->type == STT_NOTYPE)
continue;
- WARN("%s(): STT_FUNC at end of section",
- func->name);
+ ERROR("%s(): STT_FUNC at end of section", func->name);
return -1;
}
@@ -520,8 +511,7 @@ static int decode_instructions(struct objtool_file *file)
continue;
if (!find_insn(file, sec, func->offset)) {
- WARN("%s(): can't find starting instruction",
- func->name);
+ ERROR("%s(): can't find starting instruction", func->name);
return -1;
}
@@ -568,14 +558,20 @@ static int add_pv_ops(struct objtool_file *file, const char *symname)
if (!reloc)
break;
+ idx = (reloc_offset(reloc) - sym->offset) / sizeof(unsigned long);
+
func = reloc->sym;
if (func->type == STT_SECTION)
func = find_symbol_by_offset(reloc->sym->sec,
reloc_addend(reloc));
+ if (!func) {
+ ERROR_FUNC(reloc->sym->sec, reloc_addend(reloc),
+ "can't find func at %s[%d]", symname, idx);
+ return -1;
+ }
- idx = (reloc_offset(reloc) - sym->offset) / sizeof(unsigned long);
-
- objtool_pv_add(file, idx, func);
+ if (objtool_pv_add(file, idx, func))
+ return -1;
off = reloc_offset(reloc) + 1;
if (off > end)
@@ -599,7 +595,7 @@ static int init_pv_ops(struct objtool_file *file)
};
const char *pv_ops;
struct symbol *sym;
- int idx, nr;
+ int idx, nr, ret;
if (!opts.noinstr)
return 0;
@@ -612,14 +608,19 @@ static int init_pv_ops(struct objtool_file *file)
nr = sym->len / sizeof(unsigned long);
file->pv_ops = calloc(sizeof(struct pv_state), nr);
- if (!file->pv_ops)
+ if (!file->pv_ops) {
+ ERROR_GLIBC("calloc");
return -1;
+ }
for (idx = 0; idx < nr; idx++)
INIT_LIST_HEAD(&file->pv_ops[idx].targets);
- for (idx = 0; (pv_ops = pv_ops_tables[idx]); idx++)
- add_pv_ops(file, pv_ops);
+ for (idx = 0; (pv_ops = pv_ops_tables[idx]); idx++) {
+ ret = add_pv_ops(file, pv_ops);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -667,13 +668,12 @@ static int create_static_call_sections(struct objtool_file *file)
/* find key symbol */
key_name = strdup(insn_call_dest(insn)->name);
if (!key_name) {
- perror("strdup");
+ ERROR_GLIBC("strdup");
return -1;
}
if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR,
STATIC_CALL_TRAMP_PREFIX_LEN)) {
- WARN("static_call: trampoline name malformed: %s", key_name);
- free(key_name);
+ ERROR("static_call: trampoline name malformed: %s", key_name);
return -1;
}
tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN;
@@ -682,8 +682,7 @@ static int create_static_call_sections(struct objtool_file *file)
key_sym = find_symbol_by_name(file->elf, tmp);
if (!key_sym) {
if (!opts.module) {
- WARN("static_call: can't find static_call_key symbol: %s", tmp);
- free(key_name);
+ ERROR("static_call: can't find static_call_key symbol: %s", tmp);
return -1;
}
@@ -698,7 +697,6 @@ static int create_static_call_sections(struct objtool_file *file)
*/
key_sym = insn_call_dest(insn);
}
- free(key_name);
/* populate reloc for 'key' */
if (!elf_init_reloc_data_sym(file->elf, sec,
@@ -829,8 +827,11 @@ static int create_ibt_endbr_seal_sections(struct objtool_file *file)
if (opts.module && sym && sym->type == STT_FUNC &&
insn->offset == sym->offset &&
(!strcmp(sym->name, "init_module") ||
- !strcmp(sym->name, "cleanup_module")))
- WARN("%s(): not an indirect call target", sym->name);
+ !strcmp(sym->name, "cleanup_module"))) {
+ ERROR("%s(): Magic init_module() function name is deprecated, use module_init(fn) instead",
+ sym->name);
+ return -1;
+ }
if (!elf_init_reloc_text_sym(file->elf, sec,
idx * sizeof(int), idx,
@@ -979,16 +980,15 @@ static int create_direct_call_sections(struct objtool_file *file)
/*
* Warnings shouldn't be reported for ignored functions.
*/
-static void add_ignores(struct objtool_file *file)
+static int add_ignores(struct objtool_file *file)
{
- struct instruction *insn;
struct section *rsec;
struct symbol *func;
struct reloc *reloc;
rsec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard");
if (!rsec)
- return;
+ return 0;
for_each_reloc(rsec, reloc) {
switch (reloc->sym->type) {
@@ -1003,14 +1003,17 @@ static void add_ignores(struct objtool_file *file)
break;
default:
- WARN("unexpected relocation symbol type in %s: %d",
- rsec->name, reloc->sym->type);
- continue;
+ ERROR("unexpected relocation symbol type in %s: %d",
+ rsec->name, reloc->sym->type);
+ return -1;
}
- func_for_each_insn(file, func, insn)
- insn->ignore = true;
+ func->ignore = true;
+ if (func->cfunc)
+ func->cfunc->ignore = true;
}
+
+ return 0;
}
/*
@@ -1188,12 +1191,15 @@ static const char *uaccess_safe_builtin[] = {
"__ubsan_handle_load_invalid_value",
/* STACKLEAK */
"stackleak_track_stack",
+ /* TRACE_BRANCH_PROFILING */
+ "ftrace_likely_update",
+ /* STACKPROTECTOR */
+ "__stack_chk_fail",
/* misc */
"csum_partial_copy_generic",
"copy_mc_fragile",
"copy_mc_fragile_handle_tail",
"copy_mc_enhanced_fast_string",
- "ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */
"rep_stos_alternative",
"rep_movs_alternative",
"__copy_user_nocache",
@@ -1275,7 +1281,7 @@ static void remove_insn_ops(struct instruction *insn)
insn->stack_ops = NULL;
}
-static void annotate_call_site(struct objtool_file *file,
+static int annotate_call_site(struct objtool_file *file,
struct instruction *insn, bool sibling)
{
struct reloc *reloc = insn_reloc(file, insn);
@@ -1286,12 +1292,12 @@ static void annotate_call_site(struct objtool_file *file,
if (sym->static_call_tramp) {
list_add_tail(&insn->call_node, &file->static_call_list);
- return;
+ return 0;
}
if (sym->retpoline_thunk) {
list_add_tail(&insn->call_node, &file->retpoline_call_list);
- return;
+ return 0;
}
/*
@@ -1303,10 +1309,12 @@ static void annotate_call_site(struct objtool_file *file,
if (reloc)
set_reloc_type(file->elf, reloc, R_NONE);
- elf_write_insn(file->elf, insn->sec,
- insn->offset, insn->len,
- sibling ? arch_ret_insn(insn->len)
- : arch_nop_insn(insn->len));
+ if (elf_write_insn(file->elf, insn->sec,
+ insn->offset, insn->len,
+ sibling ? arch_ret_insn(insn->len)
+ : arch_nop_insn(insn->len))) {
+ return -1;
+ }
insn->type = sibling ? INSN_RETURN : INSN_NOP;
@@ -1320,7 +1328,7 @@ static void annotate_call_site(struct objtool_file *file,
insn->retpoline_safe = true;
}
- return;
+ return 0;
}
if (opts.mcount && sym->fentry) {
@@ -1330,15 +1338,17 @@ static void annotate_call_site(struct objtool_file *file,
if (reloc)
set_reloc_type(file->elf, reloc, R_NONE);
- elf_write_insn(file->elf, insn->sec,
- insn->offset, insn->len,
- arch_nop_insn(insn->len));
+ if (elf_write_insn(file->elf, insn->sec,
+ insn->offset, insn->len,
+ arch_nop_insn(insn->len))) {
+ return -1;
+ }
insn->type = INSN_NOP;
}
list_add_tail(&insn->call_node, &file->mcount_loc_list);
- return;
+ return 0;
}
if (insn->type == INSN_CALL && !insn->sec->init &&
@@ -1347,14 +1357,16 @@ static void annotate_call_site(struct objtool_file *file,
if (!sibling && dead_end_function(file, sym))
insn->dead_end = true;
+
+ return 0;
}
-static void add_call_dest(struct objtool_file *file, struct instruction *insn,
+static int add_call_dest(struct objtool_file *file, struct instruction *insn,
struct symbol *dest, bool sibling)
{
insn->_call_dest = dest;
if (!dest)
- return;
+ return 0;
/*
* Whatever stack impact regular CALLs have, should be undone
@@ -1365,10 +1377,10 @@ static void add_call_dest(struct objtool_file *file, struct instruction *insn,
*/
remove_insn_ops(insn);
- annotate_call_site(file, insn, sibling);
+ return annotate_call_site(file, insn, sibling);
}
-static void add_retpoline_call(struct objtool_file *file, struct instruction *insn)
+static int add_retpoline_call(struct objtool_file *file, struct instruction *insn)
{
/*
* Retpoline calls/jumps are really dynamic calls/jumps in disguise,
@@ -1385,7 +1397,7 @@ static void add_retpoline_call(struct objtool_file *file, struct instruction *in
insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL;
break;
default:
- return;
+ return 0;
}
insn->retpoline_safe = true;
@@ -1399,7 +1411,7 @@ static void add_retpoline_call(struct objtool_file *file, struct instruction *in
*/
remove_insn_ops(insn);
- annotate_call_site(file, insn, false);
+ return annotate_call_site(file, insn, false);
}
static void add_return_call(struct objtool_file *file, struct instruction *insn, bool add)
@@ -1468,8 +1480,11 @@ static int add_jump_destinations(struct objtool_file *file)
struct reloc *reloc;
struct section *dest_sec;
unsigned long dest_off;
+ int ret;
for_each_insn(file, insn) {
+ struct symbol *func = insn_func(insn);
+
if (insn->jump_dest) {
/*
* handle_group_alt() may have previously set
@@ -1488,17 +1503,21 @@ static int add_jump_destinations(struct objtool_file *file)
dest_sec = reloc->sym->sec;
dest_off = arch_dest_reloc_offset(reloc_addend(reloc));
} else if (reloc->sym->retpoline_thunk) {
- add_retpoline_call(file, insn);
+ ret = add_retpoline_call(file, insn);
+ if (ret)
+ return ret;
continue;
} else if (reloc->sym->return_thunk) {
add_return_call(file, insn, true);
continue;
- } else if (insn_func(insn)) {
+ } else if (func) {
/*
* External sibling call or internal sibling call with
* STT_FUNC reloc.
*/
- add_call_dest(file, insn, reloc->sym, true);
+ ret = add_call_dest(file, insn, reloc->sym, true);
+ if (ret)
+ return ret;
continue;
} else if (reloc->sym->sec->idx) {
dest_sec = reloc->sym->sec;
@@ -1526,8 +1545,17 @@ static int add_jump_destinations(struct objtool_file *file)
continue;
}
- WARN_INSN(insn, "can't find jump dest instruction at %s+0x%lx",
- dest_sec->name, dest_off);
+ /*
+ * GCOV/KCOV dead code can jump to the end of the
+ * function/section.
+ */
+ if (file->ignore_unreachables && func &&
+ dest_sec == insn->sec &&
+ dest_off == func->offset + func->len)
+ continue;
+
+ ERROR_INSN(insn, "can't find jump dest instruction at %s+0x%lx",
+ dest_sec->name, dest_off);
return -1;
}
@@ -1538,7 +1566,9 @@ static int add_jump_destinations(struct objtool_file *file)
*/
if (jump_dest->sym && jump_dest->offset == jump_dest->sym->offset) {
if (jump_dest->sym->retpoline_thunk) {
- add_retpoline_call(file, insn);
+ ret = add_retpoline_call(file, insn);
+ if (ret)
+ return ret;
continue;
}
if (jump_dest->sym->return_thunk) {
@@ -1550,8 +1580,7 @@ static int add_jump_destinations(struct objtool_file *file)
/*
* Cross-function jump.
*/
- if (insn_func(insn) && insn_func(jump_dest) &&
- insn_func(insn) != insn_func(jump_dest)) {
+ if (func && insn_func(jump_dest) && func != insn_func(jump_dest)) {
/*
* For GCC 8+, create parent/child links for any cold
@@ -1568,10 +1597,10 @@ static int add_jump_destinations(struct objtool_file *file)
* case where the parent function's only reference to a
* subfunction is through a jump table.
*/
- if (!strstr(insn_func(insn)->name, ".cold") &&
+ if (!strstr(func->name, ".cold") &&
strstr(insn_func(jump_dest)->name, ".cold")) {
- insn_func(insn)->cfunc = insn_func(jump_dest);
- insn_func(jump_dest)->pfunc = insn_func(insn);
+ func->cfunc = insn_func(jump_dest);
+ insn_func(jump_dest)->pfunc = func;
}
}
@@ -1580,7 +1609,9 @@ static int add_jump_destinations(struct objtool_file *file)
* Internal sibling call without reloc or with
* STT_SECTION reloc.
*/
- add_call_dest(file, insn, insn_func(jump_dest), true);
+ ret = add_call_dest(file, insn, insn_func(jump_dest), true);
+ if (ret)
+ return ret;
continue;
}
@@ -1610,8 +1641,10 @@ static int add_call_destinations(struct objtool_file *file)
unsigned long dest_off;
struct symbol *dest;
struct reloc *reloc;
+ int ret;
for_each_insn(file, insn) {
+ struct symbol *func = insn_func(insn);
if (insn->type != INSN_CALL)
continue;
@@ -1620,18 +1653,20 @@ static int add_call_destinations(struct objtool_file *file)
dest_off = arch_jump_destination(insn);
dest = find_call_destination(insn->sec, dest_off);
- add_call_dest(file, insn, dest, false);
+ ret = add_call_dest(file, insn, dest, false);
+ if (ret)
+ return ret;
- if (insn->ignore)
+ if (func && func->ignore)
continue;
if (!insn_call_dest(insn)) {
- WARN_INSN(insn, "unannotated intra-function call");
+ ERROR_INSN(insn, "unannotated intra-function call");
return -1;
}
- if (insn_func(insn) && insn_call_dest(insn)->type != STT_FUNC) {
- WARN_INSN(insn, "unsupported call to non-function");
+ if (func && insn_call_dest(insn)->type != STT_FUNC) {
+ ERROR_INSN(insn, "unsupported call to non-function");
return -1;
}
@@ -1639,18 +1674,25 @@ static int add_call_destinations(struct objtool_file *file)
dest_off = arch_dest_reloc_offset(reloc_addend(reloc));
dest = find_call_destination(reloc->sym->sec, dest_off);
if (!dest) {
- WARN_INSN(insn, "can't find call dest symbol at %s+0x%lx",
- reloc->sym->sec->name, dest_off);
+ ERROR_INSN(insn, "can't find call dest symbol at %s+0x%lx",
+ reloc->sym->sec->name, dest_off);
return -1;
}
- add_call_dest(file, insn, dest, false);
+ ret = add_call_dest(file, insn, dest, false);
+ if (ret)
+ return ret;
} else if (reloc->sym->retpoline_thunk) {
- add_retpoline_call(file, insn);
+ ret = add_retpoline_call(file, insn);
+ if (ret)
+ return ret;
- } else
- add_call_dest(file, insn, reloc->sym, false);
+ } else {
+ ret = add_call_dest(file, insn, reloc->sym, false);
+ if (ret)
+ return ret;
+ }
}
return 0;
@@ -1673,15 +1715,15 @@ static int handle_group_alt(struct objtool_file *file,
if (!orig_alt_group) {
struct instruction *last_orig_insn = NULL;
- orig_alt_group = malloc(sizeof(*orig_alt_group));
+ orig_alt_group = calloc(1, sizeof(*orig_alt_group));
if (!orig_alt_group) {
- WARN("malloc failed");
+ ERROR_GLIBC("calloc");
return -1;
}
orig_alt_group->cfi = calloc(special_alt->orig_len,
sizeof(struct cfi_state *));
if (!orig_alt_group->cfi) {
- WARN("calloc failed");
+ ERROR_GLIBC("calloc");
return -1;
}
@@ -1697,21 +1739,22 @@ static int handle_group_alt(struct objtool_file *file,
orig_alt_group->first_insn = orig_insn;
orig_alt_group->last_insn = last_orig_insn;
orig_alt_group->nop = NULL;
+ orig_alt_group->ignore = orig_insn->ignore_alts;
} else {
if (orig_alt_group->last_insn->offset + orig_alt_group->last_insn->len -
orig_alt_group->first_insn->offset != special_alt->orig_len) {
- WARN_INSN(orig_insn, "weirdly overlapping alternative! %ld != %d",
- orig_alt_group->last_insn->offset +
- orig_alt_group->last_insn->len -
- orig_alt_group->first_insn->offset,
- special_alt->orig_len);
+ ERROR_INSN(orig_insn, "weirdly overlapping alternative! %ld != %d",
+ orig_alt_group->last_insn->offset +
+ orig_alt_group->last_insn->len -
+ orig_alt_group->first_insn->offset,
+ special_alt->orig_len);
return -1;
}
}
- new_alt_group = malloc(sizeof(*new_alt_group));
+ new_alt_group = calloc(1, sizeof(*new_alt_group));
if (!new_alt_group) {
- WARN("malloc failed");
+ ERROR_GLIBC("calloc");
return -1;
}
@@ -1723,9 +1766,9 @@ static int handle_group_alt(struct objtool_file *file,
* instruction affects the stack, the instruction after it (the
* nop) will propagate the new state to the shared CFI array.
*/
- nop = malloc(sizeof(*nop));
+ nop = calloc(1, sizeof(*nop));
if (!nop) {
- WARN("malloc failed");
+ ERROR_GLIBC("calloc");
return -1;
}
memset(nop, 0, sizeof(*nop));
@@ -1736,7 +1779,6 @@ static int handle_group_alt(struct objtool_file *file,
nop->type = INSN_NOP;
nop->sym = orig_insn->sym;
nop->alt_group = new_alt_group;
- nop->ignore = orig_insn->ignore_alts;
}
if (!special_alt->new_len) {
@@ -1753,7 +1795,6 @@ static int handle_group_alt(struct objtool_file *file,
last_new_insn = insn;
- insn->ignore = orig_insn->ignore_alts;
insn->sym = orig_insn->sym;
insn->alt_group = new_alt_group;
@@ -1769,7 +1810,7 @@ static int handle_group_alt(struct objtool_file *file,
if (alt_reloc && arch_pc_relative_reloc(alt_reloc) &&
!arch_support_alt_relocation(special_alt, insn, alt_reloc)) {
- WARN_INSN(insn, "unsupported relocation in alternatives section");
+ ERROR_INSN(insn, "unsupported relocation in alternatives section");
return -1;
}
@@ -1783,15 +1824,15 @@ static int handle_group_alt(struct objtool_file *file,
if (dest_off == special_alt->new_off + special_alt->new_len) {
insn->jump_dest = next_insn_same_sec(file, orig_alt_group->last_insn);
if (!insn->jump_dest) {
- WARN_INSN(insn, "can't find alternative jump destination");
+ ERROR_INSN(insn, "can't find alternative jump destination");
return -1;
}
}
}
if (!last_new_insn) {
- WARN_FUNC("can't find last new alternative instruction",
- special_alt->new_sec, special_alt->new_off);
+ ERROR_FUNC(special_alt->new_sec, special_alt->new_off,
+ "can't find last new alternative instruction");
return -1;
}
@@ -1800,6 +1841,7 @@ end:
new_alt_group->first_insn = *new_insn;
new_alt_group->last_insn = last_new_insn;
new_alt_group->nop = nop;
+ new_alt_group->ignore = (*new_insn)->ignore_alts;
new_alt_group->cfi = orig_alt_group->cfi;
return 0;
}
@@ -1817,7 +1859,7 @@ static int handle_jump_alt(struct objtool_file *file,
if (orig_insn->type != INSN_JUMP_UNCONDITIONAL &&
orig_insn->type != INSN_NOP) {
- WARN_INSN(orig_insn, "unsupported instruction at jump label");
+ ERROR_INSN(orig_insn, "unsupported instruction at jump label");
return -1;
}
@@ -1826,9 +1868,13 @@ static int handle_jump_alt(struct objtool_file *file,
if (reloc)
set_reloc_type(file->elf, reloc, R_NONE);
- elf_write_insn(file->elf, orig_insn->sec,
- orig_insn->offset, orig_insn->len,
- arch_nop_insn(orig_insn->len));
+
+ if (elf_write_insn(file->elf, orig_insn->sec,
+ orig_insn->offset, orig_insn->len,
+ arch_nop_insn(orig_insn->len))) {
+ return -1;
+ }
+
orig_insn->type = INSN_NOP;
}
@@ -1864,19 +1910,17 @@ static int add_special_section_alts(struct objtool_file *file)
struct alternative *alt;
int ret;
- ret = special_get_alts(file->elf, &special_alts);
- if (ret)
- return ret;
+ if (special_get_alts(file->elf, &special_alts))
+ return -1;
list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
orig_insn = find_insn(file, special_alt->orig_sec,
special_alt->orig_off);
if (!orig_insn) {
- WARN_FUNC("special: can't find orig instruction",
- special_alt->orig_sec, special_alt->orig_off);
- ret = -1;
- goto out;
+ ERROR_FUNC(special_alt->orig_sec, special_alt->orig_off,
+ "special: can't find orig instruction");
+ return -1;
}
new_insn = NULL;
@@ -1884,41 +1928,37 @@ static int add_special_section_alts(struct objtool_file *file)
new_insn = find_insn(file, special_alt->new_sec,
special_alt->new_off);
if (!new_insn) {
- WARN_FUNC("special: can't find new instruction",
- special_alt->new_sec,
- special_alt->new_off);
- ret = -1;
- goto out;
+ ERROR_FUNC(special_alt->new_sec, special_alt->new_off,
+ "special: can't find new instruction");
+ return -1;
}
}
if (special_alt->group) {
if (!special_alt->orig_len) {
- WARN_INSN(orig_insn, "empty alternative entry");
+ ERROR_INSN(orig_insn, "empty alternative entry");
continue;
}
ret = handle_group_alt(file, special_alt, orig_insn,
&new_insn);
if (ret)
- goto out;
+ return ret;
+
} else if (special_alt->jump_or_nop) {
ret = handle_jump_alt(file, special_alt, orig_insn,
&new_insn);
if (ret)
- goto out;
+ return ret;
}
- alt = malloc(sizeof(*alt));
+ alt = calloc(1, sizeof(*alt));
if (!alt) {
- WARN("malloc failed");
- ret = -1;
- goto out;
+ ERROR_GLIBC("calloc");
+ return -1;
}
alt->insn = new_insn;
- alt->skip_orig = special_alt->skip_orig;
- orig_insn->ignore_alts |= special_alt->skip_alt;
alt->next = orig_insn->alts;
orig_insn->alts = alt;
@@ -1932,8 +1972,7 @@ static int add_special_section_alts(struct objtool_file *file)
printf("long:\t%ld\t%ld\n", file->jl_nop_long, file->jl_long);
}
-out:
- return ret;
+ return 0;
}
__weak unsigned long arch_jump_table_sym_offset(struct reloc *reloc, struct reloc *table)
@@ -1941,8 +1980,7 @@ __weak unsigned long arch_jump_table_sym_offset(struct reloc *reloc, struct relo
return reloc->sym->offset + reloc_addend(reloc);
}
-static int add_jump_table(struct objtool_file *file, struct instruction *insn,
- struct reloc *next_table)
+static int add_jump_table(struct objtool_file *file, struct instruction *insn)
{
unsigned long table_size = insn_jump_table_size(insn);
struct symbol *pfunc = insn_func(insn)->pfunc;
@@ -1962,7 +2000,7 @@ static int add_jump_table(struct objtool_file *file, struct instruction *insn,
/* Check for the end of the table: */
if (table_size && reloc_offset(reloc) - reloc_offset(table) >= table_size)
break;
- if (reloc != table && reloc == next_table)
+ if (reloc != table && is_jump_table(reloc))
break;
/* Make sure the table entries are consecutive: */
@@ -1991,9 +2029,9 @@ static int add_jump_table(struct objtool_file *file, struct instruction *insn,
if (!insn_func(dest_insn) || insn_func(dest_insn)->pfunc != pfunc)
break;
- alt = malloc(sizeof(*alt));
+ alt = calloc(1, sizeof(*alt));
if (!alt) {
- WARN("malloc failed");
+ ERROR_GLIBC("calloc");
return -1;
}
@@ -2005,7 +2043,7 @@ next:
}
if (!prev_offset) {
- WARN_INSN(insn, "can't find switch jump table");
+ ERROR_INSN(insn, "can't find switch jump table");
return -1;
}
@@ -2041,7 +2079,7 @@ static void find_jump_table(struct objtool_file *file, struct symbol *func,
insn->jump_dest &&
(insn->jump_dest->offset <= insn->offset ||
insn->jump_dest->offset > orig_insn->offset))
- break;
+ break;
table_reloc = arch_find_switch_table(file, insn, &table_size);
if (!table_reloc)
@@ -2053,8 +2091,10 @@ static void find_jump_table(struct objtool_file *file, struct symbol *func,
if (!dest_insn || !insn_func(dest_insn) || insn_func(dest_insn)->pfunc != func)
continue;
+ set_jump_table(table_reloc);
orig_insn->_jump_table = table_reloc;
orig_insn->_jump_table_size = table_size;
+
break;
}
}
@@ -2096,31 +2136,19 @@ static void mark_func_jump_tables(struct objtool_file *file,
static int add_func_jump_tables(struct objtool_file *file,
struct symbol *func)
{
- struct instruction *insn, *insn_t1 = NULL, *insn_t2;
- int ret = 0;
+ struct instruction *insn;
+ int ret;
func_for_each_insn(file, func, insn) {
if (!insn_jump_table(insn))
continue;
- if (!insn_t1) {
- insn_t1 = insn;
- continue;
- }
-
- insn_t2 = insn;
-
- ret = add_jump_table(file, insn_t1, insn_jump_table(insn_t2));
+ ret = add_jump_table(file, insn);
if (ret)
return ret;
-
- insn_t1 = insn_t2;
}
- if (insn_t1)
- ret = add_jump_table(file, insn_t1, NULL);
-
- return ret;
+ return 0;
}
/*
@@ -2173,12 +2201,12 @@ static int read_unwind_hints(struct objtool_file *file)
return 0;
if (!sec->rsec) {
- WARN("missing .rela.discard.unwind_hints section");
+ ERROR("missing .rela.discard.unwind_hints section");
return -1;
}
if (sec->sh.sh_size % sizeof(struct unwind_hint)) {
- WARN("struct unwind_hint size mismatch");
+ ERROR("struct unwind_hint size mismatch");
return -1;
}
@@ -2189,7 +2217,7 @@ static int read_unwind_hints(struct objtool_file *file)
reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint));
if (!reloc) {
- WARN("can't find reloc for unwind_hints[%d]", i);
+ ERROR("can't find reloc for unwind_hints[%d]", i);
return -1;
}
@@ -2198,13 +2226,13 @@ static int read_unwind_hints(struct objtool_file *file)
} else if (reloc->sym->local_label) {
offset = reloc->sym->offset;
} else {
- WARN("unexpected relocation symbol type in %s", sec->rsec->name);
+ ERROR("unexpected relocation symbol type in %s", sec->rsec->name);
return -1;
}
insn = find_insn(file, reloc->sym->sec, offset);
if (!insn) {
- WARN("can't find insn for unwind_hints[%d]", i);
+ ERROR("can't find insn for unwind_hints[%d]", i);
return -1;
}
@@ -2231,7 +2259,8 @@ static int read_unwind_hints(struct objtool_file *file)
if (sym && sym->bind == STB_GLOBAL) {
if (opts.ibt && insn->type != INSN_ENDBR && !insn->noendbr) {
- WARN_INSN(insn, "UNWIND_HINT_IRET_REGS without ENDBR");
+ ERROR_INSN(insn, "UNWIND_HINT_IRET_REGS without ENDBR");
+ return -1;
}
}
}
@@ -2245,7 +2274,7 @@ static int read_unwind_hints(struct objtool_file *file)
cfi = *(insn->cfi);
if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) {
- WARN_INSN(insn, "unsupported unwind_hint sp base reg %d", hint->sp_reg);
+ ERROR_INSN(insn, "unsupported unwind_hint sp base reg %d", hint->sp_reg);
return -1;
}
@@ -2291,7 +2320,7 @@ static int read_annotate(struct objtool_file *file,
insn = find_insn(file, reloc->sym->sec, offset);
if (!insn) {
- WARN("bad .discard.annotate_insn entry: %d of type %d", reloc_idx(reloc), type);
+ ERROR("bad .discard.annotate_insn entry: %d of type %d", reloc_idx(reloc), type);
return -1;
}
@@ -2306,6 +2335,8 @@ static int read_annotate(struct objtool_file *file,
static int __annotate_early(struct objtool_file *file, int type, struct instruction *insn)
{
switch (type) {
+
+ /* Must be before add_special_section_alts() */
case ANNOTYPE_IGNORE_ALTS:
insn->ignore_alts = true;
break;
@@ -2332,7 +2363,7 @@ static int __annotate_ifc(struct objtool_file *file, int type, struct instructio
return 0;
if (insn->type != INSN_CALL) {
- WARN_INSN(insn, "intra_function_call not a direct call");
+ ERROR_INSN(insn, "intra_function_call not a direct call");
return -1;
}
@@ -2346,8 +2377,8 @@ static int __annotate_ifc(struct objtool_file *file, int type, struct instructio
dest_off = arch_jump_destination(insn);
insn->jump_dest = find_insn(file, insn->sec, dest_off);
if (!insn->jump_dest) {
- WARN_INSN(insn, "can't find call dest at %s+0x%lx",
- insn->sec->name, dest_off);
+ ERROR_INSN(insn, "can't find call dest at %s+0x%lx",
+ insn->sec->name, dest_off);
return -1;
}
@@ -2366,7 +2397,7 @@ static int __annotate_late(struct objtool_file *file, int type, struct instructi
insn->type != INSN_CALL_DYNAMIC &&
insn->type != INSN_RETURN &&
insn->type != INSN_NOP) {
- WARN_INSN(insn, "retpoline_safe hint not an indirect jump/call/ret/nop");
+ ERROR_INSN(insn, "retpoline_safe hint not an indirect jump/call/ret/nop");
return -1;
}
@@ -2398,8 +2429,8 @@ static int __annotate_late(struct objtool_file *file, int type, struct instructi
break;
default:
- WARN_INSN(insn, "Unknown annotation type: %d", type);
- break;
+ ERROR_INSN(insn, "Unknown annotation type: %d", type);
+ return -1;
}
return 0;
@@ -2512,7 +2543,10 @@ static int decode_sections(struct objtool_file *file)
if (ret)
return ret;
- add_ignores(file);
+ ret = add_ignores(file);
+ if (ret)
+ return ret;
+
add_uaccess_safe(file);
ret = read_annotate(file, __annotate_early);
@@ -2732,7 +2766,7 @@ static int update_cfi_state(struct instruction *insn,
if (cfa->base == CFI_UNDEFINED) {
if (insn_func(insn)) {
WARN_INSN(insn, "undefined stack state");
- return -1;
+ return 1;
}
return 0;
}
@@ -3175,9 +3209,8 @@ static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn
if (cficmp(alt_cfi[group_off], insn->cfi)) {
struct alt_group *orig_group = insn->alt_group->orig_group ?: insn->alt_group;
struct instruction *orig = orig_group->first_insn;
- char *where = offstr(insn->sec, insn->offset);
- WARN_INSN(orig, "stack layout conflict in alternatives: %s", where);
- free(where);
+ WARN_INSN(orig, "stack layout conflict in alternatives: %s",
+ offstr(insn->sec, insn->offset));
return -1;
}
}
@@ -3190,13 +3223,15 @@ static int handle_insn_ops(struct instruction *insn,
struct insn_state *state)
{
struct stack_op *op;
+ int ret;
for (op = insn->stack_ops; op; op = op->next) {
- if (update_cfi_state(insn, next_insn, &state->cfi, op))
- return 1;
+ ret = update_cfi_state(insn, next_insn, &state->cfi, op);
+ if (ret)
+ return ret;
- if (!insn->alt_group)
+ if (!opts.uaccess || !insn->alt_group)
continue;
if (op->dest.type == OP_DEST_PUSHF) {
@@ -3238,36 +3273,41 @@ static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2)
WARN_INSN(insn, "stack state mismatch: cfa1=%d%+d cfa2=%d%+d",
cfi1->cfa.base, cfi1->cfa.offset,
cfi2->cfa.base, cfi2->cfa.offset);
+ return false;
- } else if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) {
+ }
+
+ if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) {
for (i = 0; i < CFI_NUM_REGS; i++) {
- if (!memcmp(&cfi1->regs[i], &cfi2->regs[i],
- sizeof(struct cfi_reg)))
+
+ if (!memcmp(&cfi1->regs[i], &cfi2->regs[i], sizeof(struct cfi_reg)))
continue;
WARN_INSN(insn, "stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d",
i, cfi1->regs[i].base, cfi1->regs[i].offset,
i, cfi2->regs[i].base, cfi2->regs[i].offset);
- break;
}
+ return false;
+ }
- } else if (cfi1->type != cfi2->type) {
+ if (cfi1->type != cfi2->type) {
WARN_INSN(insn, "stack state mismatch: type1=%d type2=%d",
cfi1->type, cfi2->type);
+ return false;
+ }
- } else if (cfi1->drap != cfi2->drap ||
+ if (cfi1->drap != cfi2->drap ||
(cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) ||
(cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) {
WARN_INSN(insn, "stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)",
cfi1->drap, cfi1->drap_reg, cfi1->drap_offset,
cfi2->drap, cfi2->drap_reg, cfi2->drap_offset);
+ return false;
+ }
- } else
- return true;
-
- return false;
+ return true;
}
static inline bool func_uaccess_safe(struct symbol *func)
@@ -3480,6 +3520,9 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
u8 visited;
int ret;
+ if (func && func->ignore)
+ return 0;
+
sec = insn->sec;
while (1) {
@@ -3491,13 +3534,13 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
!strncmp(func->name, "__pfx_", 6))
return 0;
+ if (file->ignore_unreachables)
+ return 0;
+
WARN("%s() falls through to next function %s()",
func->name, insn_func(insn)->name);
- return 1;
- }
+ func->warned = 1;
- if (func && insn->ignore) {
- WARN_INSN(insn, "BUG: why am I validating an ignored function?");
return 1;
}
@@ -3572,24 +3615,19 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
if (propagate_alt_cfi(file, insn))
return 1;
- if (!insn->ignore_alts && insn->alts) {
- bool skip_orig = false;
-
+ if (insn->alts) {
for (alt = insn->alts; alt; alt = alt->next) {
- if (alt->skip_orig)
- skip_orig = true;
-
ret = validate_branch(file, func, alt->insn, state);
if (ret) {
BT_INSN(insn, "(alt)");
return ret;
}
}
-
- if (skip_orig)
- return 0;
}
+ if (insn->alt_group && insn->alt_group->ignore)
+ return 0;
+
if (handle_insn_ops(insn, next_insn, &state))
return 1;
@@ -3610,9 +3648,6 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
return 1;
}
- if (insn->dead_end)
- return 0;
-
break;
case INSN_JUMP_CONDITIONAL:
@@ -3660,6 +3695,9 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
return 0;
case INSN_STAC:
+ if (!opts.uaccess)
+ break;
+
if (state.uaccess) {
WARN_INSN(insn, "recursive UACCESS enable");
return 1;
@@ -3669,6 +3707,9 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
break;
case INSN_CLAC:
+ if (!opts.uaccess)
+ break;
+
if (!state.uaccess && func) {
WARN_INSN(insn, "redundant UACCESS disable");
return 1;
@@ -3710,7 +3751,12 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
if (!next_insn) {
if (state.cfi.cfa.base == CFI_UNDEFINED)
return 0;
- WARN("%s: unexpected end of section", sec->name);
+ if (file->ignore_unreachables)
+ return 0;
+
+ WARN("%s%sunexpected end of section %s",
+ func ? func->name : "", func ? "(): " : "",
+ sec->name);
return 1;
}
@@ -3725,7 +3771,7 @@ static int validate_unwind_hint(struct objtool_file *file,
struct instruction *insn,
struct insn_state *state)
{
- if (insn->hint && !insn->visited && !insn->ignore) {
+ if (insn->hint && !insn->visited) {
int ret = validate_branch(file, insn_func(insn), insn, *state);
if (ret)
BT_INSN(insn, "<=== (hint)");
@@ -3776,23 +3822,15 @@ static int validate_unret(struct objtool_file *file, struct instruction *insn)
insn->visited |= VISITED_UNRET;
- if (!insn->ignore_alts && insn->alts) {
+ if (insn->alts) {
struct alternative *alt;
- bool skip_orig = false;
-
for (alt = insn->alts; alt; alt = alt->next) {
- if (alt->skip_orig)
- skip_orig = true;
-
ret = validate_unret(file, alt->insn);
if (ret) {
BT_INSN(insn, "(alt)");
return ret;
}
}
-
- if (skip_orig)
- return 0;
}
switch (insn->type) {
@@ -3808,7 +3846,7 @@ static int validate_unret(struct objtool_file *file, struct instruction *insn)
if (!is_sibling_call(insn)) {
if (!insn->jump_dest) {
WARN_INSN(insn, "unresolved jump target after linking?!?");
- return -1;
+ return 1;
}
ret = validate_unret(file, insn->jump_dest);
if (ret) {
@@ -3830,7 +3868,7 @@ static int validate_unret(struct objtool_file *file, struct instruction *insn)
if (!dest) {
WARN("Unresolved function after linking!?: %s",
insn_call_dest(insn)->name);
- return -1;
+ return 1;
}
ret = validate_unret(file, dest);
@@ -3859,7 +3897,7 @@ static int validate_unret(struct objtool_file *file, struct instruction *insn)
if (!next) {
WARN_INSN(insn, "teh end!");
- return -1;
+ return 1;
}
insn = next;
}
@@ -3874,18 +3912,13 @@ static int validate_unret(struct objtool_file *file, struct instruction *insn)
static int validate_unrets(struct objtool_file *file)
{
struct instruction *insn;
- int ret, warnings = 0;
+ int warnings = 0;
for_each_insn(file, insn) {
if (!insn->unret)
continue;
- ret = validate_unret(file, insn);
- if (ret < 0) {
- WARN_INSN(insn, "Failed UNRET validation");
- return ret;
- }
- warnings += ret;
+ warnings += validate_unret(file, insn);
}
return warnings;
@@ -3911,13 +3944,13 @@ static int validate_retpoline(struct objtool_file *file)
if (insn->type == INSN_RETURN) {
if (opts.rethunk) {
WARN_INSN(insn, "'naked' return found in MITIGATION_RETHUNK build");
- } else
- continue;
- } else {
- WARN_INSN(insn, "indirect %s found in MITIGATION_RETPOLINE build",
- insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
+ warnings++;
+ }
+ continue;
}
+ WARN_INSN(insn, "indirect %s found in MITIGATION_RETPOLINE build",
+ insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
warnings++;
}
@@ -3939,10 +3972,11 @@ static bool is_ubsan_insn(struct instruction *insn)
static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn)
{
- int i;
+ struct symbol *func = insn_func(insn);
struct instruction *prev_insn;
+ int i;
- if (insn->ignore || insn->type == INSN_NOP || insn->type == INSN_TRAP)
+ if (insn->type == INSN_NOP || insn->type == INSN_TRAP || (func && func->ignore))
return true;
/*
@@ -3961,7 +3995,7 @@ static bool ignore_unreachable_insn(struct objtool_file *file, struct instructio
* In this case we'll find a piece of code (whole function) that is not
* covered by a !section symbol. Ignore them.
*/
- if (opts.link && !insn_func(insn)) {
+ if (opts.link && !func) {
int size = find_symbol_hole_containing(insn->sec, insn->offset);
unsigned long end = insn->offset + size;
@@ -3987,19 +4021,17 @@ static bool ignore_unreachable_insn(struct objtool_file *file, struct instructio
*/
if (insn->jump_dest && insn_func(insn->jump_dest) &&
strstr(insn_func(insn->jump_dest)->name, ".cold")) {
- struct instruction *dest = insn->jump_dest;
- func_for_each_insn(file, insn_func(dest), dest)
- dest->ignore = true;
+ insn_func(insn->jump_dest)->ignore = true;
}
}
return false;
}
- if (!insn_func(insn))
+ if (!func)
return false;
- if (insn_func(insn)->static_call_tramp)
+ if (func->static_call_tramp)
return true;
/*
@@ -4011,7 +4043,7 @@ static bool ignore_unreachable_insn(struct objtool_file *file, struct instructio
* It may also insert a UD2 after calling a __noreturn function.
*/
prev_insn = prev_insn_same_sec(file, insn);
- if (prev_insn->dead_end &&
+ if (prev_insn && prev_insn->dead_end &&
(insn->type == INSN_BUG ||
(insn->type == INSN_JUMP_UNCONDITIONAL &&
insn->jump_dest && insn->jump_dest->type == INSN_BUG)))
@@ -4030,7 +4062,7 @@ static bool ignore_unreachable_insn(struct objtool_file *file, struct instructio
if (insn->type == INSN_JUMP_UNCONDITIONAL) {
if (insn->jump_dest &&
- insn_func(insn->jump_dest) == insn_func(insn)) {
+ insn_func(insn->jump_dest) == func) {
insn = insn->jump_dest;
continue;
}
@@ -4038,7 +4070,7 @@ static bool ignore_unreachable_insn(struct objtool_file *file, struct instructio
break;
}
- if (insn->offset + insn->len >= insn_func(insn)->offset + insn_func(insn)->len)
+ if (insn->offset + insn->len >= func->offset + func->len)
break;
insn = next_insn_same_sec(file, insn);
@@ -4130,10 +4162,11 @@ static int validate_symbol(struct objtool_file *file, struct section *sec,
return 0;
insn = find_insn(file, sec, sym->offset);
- if (!insn || insn->ignore || insn->visited)
+ if (!insn || insn->visited)
return 0;
- state->uaccess = sym->uaccess_safe;
+ if (opts.uaccess)
+ state->uaccess = sym->uaccess_safe;
ret = validate_branch(file, insn_func(insn), insn, *state);
if (ret)
@@ -4354,9 +4387,8 @@ static int validate_ibt_data_reloc(struct objtool_file *file,
if (dest->noendbr)
return 0;
- WARN_FUNC("data relocation to !ENDBR: %s",
- reloc->sec->base, reloc_offset(reloc),
- offstr(dest->sec, dest->offset));
+ WARN_FUNC(reloc->sec->base, reloc_offset(reloc),
+ "data relocation to !ENDBR: %s", offstr(dest->sec, dest->offset));
return 1;
}
@@ -4484,13 +4516,15 @@ static int validate_reachable_instructions(struct objtool_file *file)
}
/* 'funcs' is a space-separated list of function names */
-static int disas_funcs(const char *funcs)
+static void disas_funcs(const char *funcs)
{
const char *objdump_str, *cross_compile;
int size, ret;
char *cmd;
cross_compile = getenv("CROSS_COMPILE");
+ if (!cross_compile)
+ cross_compile = "";
objdump_str = "%sobjdump -wdr %s | gawk -M -v _funcs='%s' '"
"BEGIN { split(_funcs, funcs); }"
@@ -4517,7 +4551,7 @@ static int disas_funcs(const char *funcs)
size = snprintf(NULL, 0, objdump_str, cross_compile, objname, funcs) + 1;
if (size <= 0) {
WARN("objdump string size calculation failed");
- return -1;
+ return;
}
cmd = malloc(size);
@@ -4527,24 +4561,30 @@ static int disas_funcs(const char *funcs)
ret = system(cmd);
if (ret) {
WARN("disassembly failed: %d", ret);
- return -1;
+ return;
}
-
- return 0;
}
-static int disas_warned_funcs(struct objtool_file *file)
+static void disas_warned_funcs(struct objtool_file *file)
{
struct symbol *sym;
char *funcs = NULL, *tmp;
for_each_sym(file, sym) {
- if (sym->warnings) {
+ if (sym->warned) {
if (!funcs) {
funcs = malloc(strlen(sym->name) + 1);
+ if (!funcs) {
+ ERROR_GLIBC("malloc");
+ return;
+ }
strcpy(funcs, sym->name);
} else {
tmp = malloc(strlen(funcs) + strlen(sym->name) + 2);
+ if (!tmp) {
+ ERROR_GLIBC("malloc");
+ return;
+ }
sprintf(tmp, "%s %s", funcs, sym->name);
free(funcs);
funcs = tmp;
@@ -4554,8 +4594,6 @@ static int disas_warned_funcs(struct objtool_file *file)
if (funcs)
disas_funcs(funcs);
-
- return 0;
}
struct insn_chunk {
@@ -4588,7 +4626,7 @@ static void free_insns(struct objtool_file *file)
int check(struct objtool_file *file)
{
- int ret, warnings = 0;
+ int ret = 0, warnings = 0;
arch_initial_func_cfi_state(&initial_func_cfi);
init_cfi_state(&init_cfi);
@@ -4606,44 +4644,27 @@ int check(struct objtool_file *file)
cfi_hash_add(&func_cfi);
ret = decode_sections(file);
- if (ret < 0)
+ if (ret)
goto out;
- warnings += ret;
-
if (!nr_insns)
goto out;
- if (opts.retpoline) {
- ret = validate_retpoline(file);
- if (ret < 0)
- goto out;
- warnings += ret;
- }
+ if (opts.retpoline)
+ warnings += validate_retpoline(file);
if (opts.stackval || opts.orc || opts.uaccess) {
- ret = validate_functions(file);
- if (ret < 0)
- goto out;
- warnings += ret;
+ int w = 0;
- ret = validate_unwind_hints(file, NULL);
- if (ret < 0)
- goto out;
- warnings += ret;
+ w += validate_functions(file);
+ w += validate_unwind_hints(file, NULL);
+ if (!w)
+ w += validate_reachable_instructions(file);
- if (!warnings) {
- ret = validate_reachable_instructions(file);
- if (ret < 0)
- goto out;
- warnings += ret;
- }
+ warnings += w;
} else if (opts.noinstr) {
- ret = validate_noinstr_sections(file);
- if (ret < 0)
- goto out;
- warnings += ret;
+ warnings += validate_noinstr_sections(file);
}
if (opts.unret) {
@@ -4651,94 +4672,71 @@ int check(struct objtool_file *file)
* Must be after validate_branch() and friends, it plays
* further games with insn->visited.
*/
- ret = validate_unrets(file);
- if (ret < 0)
- goto out;
- warnings += ret;
+ warnings += validate_unrets(file);
}
- if (opts.ibt) {
- ret = validate_ibt(file);
- if (ret < 0)
- goto out;
- warnings += ret;
- }
+ if (opts.ibt)
+ warnings += validate_ibt(file);
- if (opts.sls) {
- ret = validate_sls(file);
- if (ret < 0)
- goto out;
- warnings += ret;
- }
+ if (opts.sls)
+ warnings += validate_sls(file);
if (opts.static_call) {
ret = create_static_call_sections(file);
- if (ret < 0)
+ if (ret)
goto out;
- warnings += ret;
}
if (opts.retpoline) {
ret = create_retpoline_sites_sections(file);
- if (ret < 0)
+ if (ret)
goto out;
- warnings += ret;
}
if (opts.cfi) {
ret = create_cfi_sections(file);
- if (ret < 0)
+ if (ret)
goto out;
- warnings += ret;
}
if (opts.rethunk) {
ret = create_return_sites_sections(file);
- if (ret < 0)
+ if (ret)
goto out;
- warnings += ret;
if (opts.hack_skylake) {
ret = create_direct_call_sections(file);
- if (ret < 0)
+ if (ret)
goto out;
- warnings += ret;
}
}
if (opts.mcount) {
ret = create_mcount_loc_sections(file);
- if (ret < 0)
+ if (ret)
goto out;
- warnings += ret;
}
if (opts.prefix) {
ret = add_prefix_symbols(file);
- if (ret < 0)
+ if (ret)
goto out;
- warnings += ret;
}
if (opts.ibt) {
ret = create_ibt_endbr_seal_sections(file);
- if (ret < 0)
+ if (ret)
goto out;
- warnings += ret;
}
if (opts.orc && nr_insns) {
ret = orc_create(file);
- if (ret < 0)
+ if (ret)
goto out;
- warnings += ret;
}
free_insns(file);
- if (opts.verbose)
- disas_warned_funcs(file);
-
if (opts.stats) {
printf("nr_insns_visited: %ld\n", nr_insns_visited);
printf("nr_cfi: %ld\n", nr_cfi);
@@ -4747,19 +4745,18 @@ int check(struct objtool_file *file)
}
out:
- /*
- * CONFIG_OBJTOOL_WERROR upgrades all warnings (and errors) to actual
- * errors.
- *
- * Note that even "fatal" type errors don't actually return an error
- * without CONFIG_OBJTOOL_WERROR. That probably needs improved at some
- * point.
- */
- if (opts.werror && (ret || warnings)) {
- if (warnings)
+ if (!ret && !warnings)
+ return 0;
+
+ if (opts.werror && warnings)
+ ret = 1;
+
+ if (opts.verbose) {
+ if (opts.werror && warnings)
WARN("%d warning(s) upgraded to errors", warnings);
- return 1;
+ print_args();
+ disas_warned_funcs(file);
}
- return 0;
+ return ret;
}
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
index be4f4b62730c..727a3a4fd9d7 100644
--- a/tools/objtool/elf.c
+++ b/tools/objtool/elf.c
@@ -72,17 +72,17 @@ static inline void __elf_hash_del(struct elf_hash_node *node,
obj; \
obj = elf_list_entry(obj->member.next, typeof(*(obj)), member))
-#define elf_alloc_hash(name, size) \
-({ \
- __elf_bits(name) = max(10, ilog2(size)); \
+#define elf_alloc_hash(name, size) \
+({ \
+ __elf_bits(name) = max(10, ilog2(size)); \
__elf_table(name) = mmap(NULL, sizeof(struct elf_hash_node *) << __elf_bits(name), \
- PROT_READ|PROT_WRITE, \
- MAP_PRIVATE|MAP_ANON, -1, 0); \
- if (__elf_table(name) == (void *)-1L) { \
- WARN("mmap fail " #name); \
- __elf_table(name) = NULL; \
- } \
- __elf_table(name); \
+ PROT_READ|PROT_WRITE, \
+ MAP_PRIVATE|MAP_ANON, -1, 0); \
+ if (__elf_table(name) == (void *)-1L) { \
+ ERROR_GLIBC("mmap fail " #name); \
+ __elf_table(name) = NULL; \
+ } \
+ __elf_table(name); \
})
static inline unsigned long __sym_start(struct symbol *s)
@@ -316,12 +316,12 @@ static int read_sections(struct elf *elf)
int i;
if (elf_getshdrnum(elf->elf, &sections_nr)) {
- WARN_ELF("elf_getshdrnum");
+ ERROR_ELF("elf_getshdrnum");
return -1;
}
if (elf_getshdrstrndx(elf->elf, &shstrndx)) {
- WARN_ELF("elf_getshdrstrndx");
+ ERROR_ELF("elf_getshdrstrndx");
return -1;
}
@@ -331,7 +331,7 @@ static int read_sections(struct elf *elf)
elf->section_data = calloc(sections_nr, sizeof(*sec));
if (!elf->section_data) {
- perror("calloc");
+ ERROR_GLIBC("calloc");
return -1;
}
for (i = 0; i < sections_nr; i++) {
@@ -341,33 +341,32 @@ static int read_sections(struct elf *elf)
s = elf_getscn(elf->elf, i);
if (!s) {
- WARN_ELF("elf_getscn");
+ ERROR_ELF("elf_getscn");
return -1;
}
sec->idx = elf_ndxscn(s);
if (!gelf_getshdr(s, &sec->sh)) {
- WARN_ELF("gelf_getshdr");
+ ERROR_ELF("gelf_getshdr");
return -1;
}
sec->name = elf_strptr(elf->elf, shstrndx, sec->sh.sh_name);
if (!sec->name) {
- WARN_ELF("elf_strptr");
+ ERROR_ELF("elf_strptr");
return -1;
}
if (sec->sh.sh_size != 0 && !is_dwarf_section(sec)) {
sec->data = elf_getdata(s, NULL);
if (!sec->data) {
- WARN_ELF("elf_getdata");
+ ERROR_ELF("elf_getdata");
return -1;
}
if (sec->data->d_off != 0 ||
sec->data->d_size != sec->sh.sh_size) {
- WARN("unexpected data attributes for %s",
- sec->name);
+ ERROR("unexpected data attributes for %s", sec->name);
return -1;
}
}
@@ -387,7 +386,7 @@ static int read_sections(struct elf *elf)
/* sanity check, one more call to elf_nextscn() should return NULL */
if (elf_nextscn(elf->elf, s)) {
- WARN("section entry mismatch");
+ ERROR("section entry mismatch");
return -1;
}
@@ -467,7 +466,7 @@ static int read_symbols(struct elf *elf)
elf->symbol_data = calloc(symbols_nr, sizeof(*sym));
if (!elf->symbol_data) {
- perror("calloc");
+ ERROR_GLIBC("calloc");
return -1;
}
for (i = 0; i < symbols_nr; i++) {
@@ -477,14 +476,14 @@ static int read_symbols(struct elf *elf)
if (!gelf_getsymshndx(symtab->data, shndx_data, i, &sym->sym,
&shndx)) {
- WARN_ELF("gelf_getsymshndx");
+ ERROR_ELF("gelf_getsymshndx");
goto err;
}
sym->name = elf_strptr(elf->elf, symtab->sh.sh_link,
sym->sym.st_name);
if (!sym->name) {
- WARN_ELF("elf_strptr");
+ ERROR_ELF("elf_strptr");
goto err;
}
@@ -496,8 +495,7 @@ static int read_symbols(struct elf *elf)
sym->sec = find_section_by_index(elf, shndx);
if (!sym->sec) {
- WARN("couldn't find section for symbol %s",
- sym->name);
+ ERROR("couldn't find section for symbol %s", sym->name);
goto err;
}
if (GELF_ST_TYPE(sym->sym.st_info) == STT_SECTION) {
@@ -536,8 +534,7 @@ static int read_symbols(struct elf *elf)
pnamelen = coldstr - sym->name;
pname = strndup(sym->name, pnamelen);
if (!pname) {
- WARN("%s(): failed to allocate memory",
- sym->name);
+ ERROR("%s(): failed to allocate memory", sym->name);
return -1;
}
@@ -545,8 +542,7 @@ static int read_symbols(struct elf *elf)
free(pname);
if (!pfunc) {
- WARN("%s(): can't find parent function",
- sym->name);
+ ERROR("%s(): can't find parent function", sym->name);
return -1;
}
@@ -583,7 +579,7 @@ static int elf_update_sym_relocs(struct elf *elf, struct symbol *sym)
{
struct reloc *reloc;
- for (reloc = sym->relocs; reloc; reloc = reloc->sym_next_reloc)
+ for (reloc = sym->relocs; reloc; reloc = sym_next_reloc(reloc))
set_reloc_sym(elf, reloc, reloc->sym->idx);
return 0;
@@ -613,14 +609,14 @@ static int elf_update_symbol(struct elf *elf, struct section *symtab,
s = elf_getscn(elf->elf, symtab->idx);
if (!s) {
- WARN_ELF("elf_getscn");
+ ERROR_ELF("elf_getscn");
return -1;
}
if (symtab_shndx) {
t = elf_getscn(elf->elf, symtab_shndx->idx);
if (!t) {
- WARN_ELF("elf_getscn");
+ ERROR_ELF("elf_getscn");
return -1;
}
}
@@ -643,7 +639,7 @@ static int elf_update_symbol(struct elf *elf, struct section *symtab,
if (idx) {
/* we don't do holes in symbol tables */
- WARN("index out of range");
+ ERROR("index out of range");
return -1;
}
@@ -654,7 +650,7 @@ static int elf_update_symbol(struct elf *elf, struct section *symtab,
buf = calloc(num, entsize);
if (!buf) {
- WARN("malloc");
+ ERROR_GLIBC("calloc");
return -1;
}
@@ -669,7 +665,7 @@ static int elf_update_symbol(struct elf *elf, struct section *symtab,
if (t) {
buf = calloc(num, sizeof(Elf32_Word));
if (!buf) {
- WARN("malloc");
+ ERROR_GLIBC("calloc");
return -1;
}
@@ -687,7 +683,7 @@ static int elf_update_symbol(struct elf *elf, struct section *symtab,
/* empty blocks should not happen */
if (!symtab_data->d_size) {
- WARN("zero size data");
+ ERROR("zero size data");
return -1;
}
@@ -702,7 +698,7 @@ static int elf_update_symbol(struct elf *elf, struct section *symtab,
/* something went side-ways */
if (idx < 0) {
- WARN("negative index");
+ ERROR("negative index");
return -1;
}
@@ -714,13 +710,13 @@ static int elf_update_symbol(struct elf *elf, struct section *symtab,
} else {
sym->sym.st_shndx = SHN_XINDEX;
if (!shndx_data) {
- WARN("no .symtab_shndx");
+ ERROR("no .symtab_shndx");
return -1;
}
}
if (!gelf_update_symshndx(symtab_data, shndx_data, idx, &sym->sym, shndx)) {
- WARN_ELF("gelf_update_symshndx");
+ ERROR_ELF("gelf_update_symshndx");
return -1;
}
@@ -738,7 +734,7 @@ __elf_create_symbol(struct elf *elf, struct symbol *sym)
if (symtab) {
symtab_shndx = find_section_by_name(elf, ".symtab_shndx");
} else {
- WARN("no .symtab");
+ ERROR("no .symtab");
return NULL;
}
@@ -760,7 +756,7 @@ __elf_create_symbol(struct elf *elf, struct symbol *sym)
old->idx = new_idx;
if (elf_update_symbol(elf, symtab, symtab_shndx, old)) {
- WARN("elf_update_symbol move");
+ ERROR("elf_update_symbol move");
return NULL;
}
@@ -778,7 +774,7 @@ __elf_create_symbol(struct elf *elf, struct symbol *sym)
non_local:
sym->idx = new_idx;
if (elf_update_symbol(elf, symtab, symtab_shndx, sym)) {
- WARN("elf_update_symbol");
+ ERROR("elf_update_symbol");
return NULL;
}
@@ -799,7 +795,7 @@ elf_create_section_symbol(struct elf *elf, struct section *sec)
struct symbol *sym = calloc(1, sizeof(*sym));
if (!sym) {
- perror("malloc");
+ ERROR_GLIBC("malloc");
return NULL;
}
@@ -829,7 +825,7 @@ elf_create_prefix_symbol(struct elf *elf, struct symbol *orig, long size)
char *name = malloc(namelen);
if (!sym || !name) {
- perror("malloc");
+ ERROR_GLIBC("malloc");
return NULL;
}
@@ -858,16 +854,16 @@ static struct reloc *elf_init_reloc(struct elf *elf, struct section *rsec,
struct reloc *reloc, empty = { 0 };
if (reloc_idx >= sec_num_entries(rsec)) {
- WARN("%s: bad reloc_idx %u for %s with %d relocs",
- __func__, reloc_idx, rsec->name, sec_num_entries(rsec));
+ ERROR("%s: bad reloc_idx %u for %s with %d relocs",
+ __func__, reloc_idx, rsec->name, sec_num_entries(rsec));
return NULL;
}
reloc = &rsec->relocs[reloc_idx];
if (memcmp(reloc, &empty, sizeof(empty))) {
- WARN("%s: %s: reloc %d already initialized!",
- __func__, rsec->name, reloc_idx);
+ ERROR("%s: %s: reloc %d already initialized!",
+ __func__, rsec->name, reloc_idx);
return NULL;
}
@@ -880,7 +876,7 @@ static struct reloc *elf_init_reloc(struct elf *elf, struct section *rsec,
set_reloc_addend(elf, reloc, addend);
elf_hash_add(reloc, &reloc->hash, reloc_hash(reloc));
- reloc->sym_next_reloc = sym->relocs;
+ set_sym_next_reloc(reloc, sym->relocs);
sym->relocs = reloc;
return reloc;
@@ -896,8 +892,7 @@ struct reloc *elf_init_reloc_text_sym(struct elf *elf, struct section *sec,
int addend = insn_off;
if (!(insn_sec->sh.sh_flags & SHF_EXECINSTR)) {
- WARN("bad call to %s() for data symbol %s",
- __func__, sym->name);
+ ERROR("bad call to %s() for data symbol %s", __func__, sym->name);
return NULL;
}
@@ -926,8 +921,7 @@ struct reloc *elf_init_reloc_data_sym(struct elf *elf, struct section *sec,
s64 addend)
{
if (sym->sec && (sec->sh.sh_flags & SHF_EXECINSTR)) {
- WARN("bad call to %s() for text symbol %s",
- __func__, sym->name);
+ ERROR("bad call to %s() for text symbol %s", __func__, sym->name);
return NULL;
}
@@ -953,8 +947,7 @@ static int read_relocs(struct elf *elf)
rsec->base = find_section_by_index(elf, rsec->sh.sh_info);
if (!rsec->base) {
- WARN("can't find base section for reloc section %s",
- rsec->name);
+ ERROR("can't find base section for reloc section %s", rsec->name);
return -1;
}
@@ -963,7 +956,7 @@ static int read_relocs(struct elf *elf)
nr_reloc = 0;
rsec->relocs = calloc(sec_num_entries(rsec), sizeof(*reloc));
if (!rsec->relocs) {
- perror("calloc");
+ ERROR_GLIBC("calloc");
return -1;
}
for (i = 0; i < sec_num_entries(rsec); i++) {
@@ -973,13 +966,12 @@ static int read_relocs(struct elf *elf)
symndx = reloc_sym(reloc);
reloc->sym = sym = find_symbol_by_index(elf, symndx);
if (!reloc->sym) {
- WARN("can't find reloc entry symbol %d for %s",
- symndx, rsec->name);
+ ERROR("can't find reloc entry symbol %d for %s", symndx, rsec->name);
return -1;
}
elf_hash_add(reloc, &reloc->hash, reloc_hash(reloc));
- reloc->sym_next_reloc = sym->relocs;
+ set_sym_next_reloc(reloc, sym->relocs);
sym->relocs = reloc;
nr_reloc++;
@@ -1005,7 +997,7 @@ struct elf *elf_open_read(const char *name, int flags)
elf = malloc(sizeof(*elf));
if (!elf) {
- perror("malloc");
+ ERROR_GLIBC("malloc");
return NULL;
}
memset(elf, 0, sizeof(*elf));
@@ -1028,12 +1020,12 @@ struct elf *elf_open_read(const char *name, int flags)
elf->elf = elf_begin(elf->fd, cmd, NULL);
if (!elf->elf) {
- WARN_ELF("elf_begin");
+ ERROR_ELF("elf_begin");
goto err;
}
if (!gelf_getehdr(elf->elf, &elf->ehdr)) {
- WARN_ELF("gelf_getehdr");
+ ERROR_ELF("gelf_getehdr");
goto err;
}
@@ -1062,19 +1054,19 @@ static int elf_add_string(struct elf *elf, struct section *strtab, char *str)
if (!strtab)
strtab = find_section_by_name(elf, ".strtab");
if (!strtab) {
- WARN("can't find .strtab section");
+ ERROR("can't find .strtab section");
return -1;
}
s = elf_getscn(elf->elf, strtab->idx);
if (!s) {
- WARN_ELF("elf_getscn");
+ ERROR_ELF("elf_getscn");
return -1;
}
data = elf_newdata(s);
if (!data) {
- WARN_ELF("elf_newdata");
+ ERROR_ELF("elf_newdata");
return -1;
}
@@ -1099,7 +1091,7 @@ struct section *elf_create_section(struct elf *elf, const char *name,
sec = malloc(sizeof(*sec));
if (!sec) {
- perror("malloc");
+ ERROR_GLIBC("malloc");
return NULL;
}
memset(sec, 0, sizeof(*sec));
@@ -1108,13 +1100,13 @@ struct section *elf_create_section(struct elf *elf, const char *name,
s = elf_newscn(elf->elf);
if (!s) {
- WARN_ELF("elf_newscn");
+ ERROR_ELF("elf_newscn");
return NULL;
}
sec->name = strdup(name);
if (!sec->name) {
- perror("strdup");
+ ERROR_GLIBC("strdup");
return NULL;
}
@@ -1122,7 +1114,7 @@ struct section *elf_create_section(struct elf *elf, const char *name,
sec->data = elf_newdata(s);
if (!sec->data) {
- WARN_ELF("elf_newdata");
+ ERROR_ELF("elf_newdata");
return NULL;
}
@@ -1132,14 +1124,14 @@ struct section *elf_create_section(struct elf *elf, const char *name,
if (size) {
sec->data->d_buf = malloc(size);
if (!sec->data->d_buf) {
- perror("malloc");
+ ERROR_GLIBC("malloc");
return NULL;
}
memset(sec->data->d_buf, 0, size);
}
if (!gelf_getshdr(s, &sec->sh)) {
- WARN_ELF("gelf_getshdr");
+ ERROR_ELF("gelf_getshdr");
return NULL;
}
@@ -1154,7 +1146,7 @@ struct section *elf_create_section(struct elf *elf, const char *name,
if (!shstrtab)
shstrtab = find_section_by_name(elf, ".strtab");
if (!shstrtab) {
- WARN("can't find .shstrtab or .strtab section");
+ ERROR("can't find .shstrtab or .strtab section");
return NULL;
}
sec->sh.sh_name = elf_add_string(elf, shstrtab, sec->name);
@@ -1179,7 +1171,7 @@ static struct section *elf_create_rela_section(struct elf *elf,
rsec_name = malloc(strlen(sec->name) + strlen(".rela") + 1);
if (!rsec_name) {
- perror("malloc");
+ ERROR_GLIBC("malloc");
return NULL;
}
strcpy(rsec_name, ".rela");
@@ -1199,7 +1191,7 @@ static struct section *elf_create_rela_section(struct elf *elf,
rsec->relocs = calloc(sec_num_entries(rsec), sizeof(struct reloc));
if (!rsec->relocs) {
- perror("calloc");
+ ERROR_GLIBC("calloc");
return NULL;
}
@@ -1232,7 +1224,7 @@ int elf_write_insn(struct elf *elf, struct section *sec,
Elf_Data *data = sec->data;
if (data->d_type != ELF_T_BYTE || data->d_off) {
- WARN("write to unexpected data for section: %s", sec->name);
+ ERROR("write to unexpected data for section: %s", sec->name);
return -1;
}
@@ -1261,7 +1253,7 @@ static int elf_truncate_section(struct elf *elf, struct section *sec)
s = elf_getscn(elf->elf, sec->idx);
if (!s) {
- WARN_ELF("elf_getscn");
+ ERROR_ELF("elf_getscn");
return -1;
}
@@ -1271,7 +1263,7 @@ static int elf_truncate_section(struct elf *elf, struct section *sec)
if (!data) {
if (size) {
- WARN("end of section data but non-zero size left\n");
+ ERROR("end of section data but non-zero size left\n");
return -1;
}
return 0;
@@ -1279,12 +1271,12 @@ static int elf_truncate_section(struct elf *elf, struct section *sec)
if (truncated) {
/* when we remove symbols */
- WARN("truncated; but more data\n");
+ ERROR("truncated; but more data\n");
return -1;
}
if (!data->d_size) {
- WARN("zero size data");
+ ERROR("zero size data");
return -1;
}
@@ -1310,13 +1302,13 @@ int elf_write(struct elf *elf)
if (sec_changed(sec)) {
s = elf_getscn(elf->elf, sec->idx);
if (!s) {
- WARN_ELF("elf_getscn");
+ ERROR_ELF("elf_getscn");
return -1;
}
/* Note this also flags the section dirty */
if (!gelf_update_shdr(s, &sec->sh)) {
- WARN_ELF("gelf_update_shdr");
+ ERROR_ELF("gelf_update_shdr");
return -1;
}
@@ -1329,7 +1321,7 @@ int elf_write(struct elf *elf)
/* Write all changes to the file. */
if (elf_update(elf->elf, ELF_C_WRITE) < 0) {
- WARN_ELF("elf_update");
+ ERROR_ELF("elf_update");
return -1;
}
diff --git a/tools/objtool/include/objtool/builtin.h b/tools/objtool/include/objtool/builtin.h
index 0fafd0f7a209..6b08666fa69d 100644
--- a/tools/objtool/include/objtool/builtin.h
+++ b/tools/objtool/include/objtool/builtin.h
@@ -43,8 +43,10 @@ struct opts {
extern struct opts opts;
-extern int cmd_parse_options(int argc, const char **argv, const char * const usage[]);
+int cmd_parse_options(int argc, const char **argv, const char * const usage[]);
-extern int objtool_run(int argc, const char **argv);
+int objtool_run(int argc, const char **argv);
+
+void print_args(void);
#endif /* _BUILTIN_H */
diff --git a/tools/objtool/include/objtool/check.h b/tools/objtool/include/objtool/check.h
index e1cd13cd28a3..00fb745e7233 100644
--- a/tools/objtool/include/objtool/check.h
+++ b/tools/objtool/include/objtool/check.h
@@ -34,6 +34,8 @@ struct alt_group {
* This is shared with the other alt_groups in the same alternative.
*/
struct cfi_state **cfi;
+
+ bool ignore;
};
#define INSN_CHUNK_BITS 8
@@ -54,7 +56,6 @@ struct instruction {
u32 idx : INSN_CHUNK_BITS,
dead_end : 1,
- ignore : 1,
ignore_alts : 1,
hint : 1,
save : 1,
diff --git a/tools/objtool/include/objtool/elf.h b/tools/objtool/include/objtool/elf.h
index 223ac1c24b90..c7c4e87ebe88 100644
--- a/tools/objtool/include/objtool/elf.h
+++ b/tools/objtool/include/objtool/elf.h
@@ -65,10 +65,11 @@ struct symbol {
u8 return_thunk : 1;
u8 fentry : 1;
u8 profiling_func : 1;
+ u8 warned : 1;
u8 embedded_insn : 1;
u8 local_label : 1;
u8 frame_pointer : 1;
- u8 warnings : 2;
+ u8 ignore : 1;
struct list_head pv_target;
struct reloc *relocs;
};
@@ -77,7 +78,7 @@ struct reloc {
struct elf_hash_node hash;
struct section *sec;
struct symbol *sym;
- struct reloc *sym_next_reloc;
+ unsigned long _sym_next_reloc;
};
struct elf {
@@ -297,6 +298,31 @@ static inline void set_reloc_type(struct elf *elf, struct reloc *reloc, unsigned
mark_sec_changed(elf, reloc->sec, true);
}
+#define RELOC_JUMP_TABLE_BIT 1UL
+
+/* Does reloc mark the beginning of a jump table? */
+static inline bool is_jump_table(struct reloc *reloc)
+{
+ return reloc->_sym_next_reloc & RELOC_JUMP_TABLE_BIT;
+}
+
+static inline void set_jump_table(struct reloc *reloc)
+{
+ reloc->_sym_next_reloc |= RELOC_JUMP_TABLE_BIT;
+}
+
+static inline struct reloc *sym_next_reloc(struct reloc *reloc)
+{
+ return (struct reloc *)(reloc->_sym_next_reloc & ~RELOC_JUMP_TABLE_BIT);
+}
+
+static inline void set_sym_next_reloc(struct reloc *reloc, struct reloc *next)
+{
+ unsigned long bit = reloc->_sym_next_reloc & RELOC_JUMP_TABLE_BIT;
+
+ reloc->_sym_next_reloc = (unsigned long)next | bit;
+}
+
#define for_each_sec(file, sec) \
list_for_each_entry(sec, &file->elf->sections, list)
diff --git a/tools/objtool/include/objtool/objtool.h b/tools/objtool/include/objtool/objtool.h
index 94a33ee7b363..c0dc86a78ff6 100644
--- a/tools/objtool/include/objtool/objtool.h
+++ b/tools/objtool/include/objtool/objtool.h
@@ -41,7 +41,7 @@ struct objtool_file {
struct objtool_file *objtool_open_read(const char *_objname);
-void objtool_pv_add(struct objtool_file *file, int idx, struct symbol *func);
+int objtool_pv_add(struct objtool_file *file, int idx, struct symbol *func);
int check(struct objtool_file *file);
int orc_dump(const char *objname);
diff --git a/tools/objtool/include/objtool/special.h b/tools/objtool/include/objtool/special.h
index e049679bb17b..72d09c0adf1a 100644
--- a/tools/objtool/include/objtool/special.h
+++ b/tools/objtool/include/objtool/special.h
@@ -16,8 +16,6 @@ struct special_alt {
struct list_head list;
bool group;
- bool skip_orig;
- bool skip_alt;
bool jump_or_nop;
u8 key_addend;
@@ -32,7 +30,7 @@ struct special_alt {
int special_get_alts(struct elf *elf, struct list_head *alts);
-void arch_handle_alternative(unsigned short feature, struct special_alt *alt);
+void arch_handle_alternative(struct special_alt *alt);
bool arch_support_alt_relocation(struct special_alt *special_alt,
struct instruction *insn,
diff --git a/tools/objtool/include/objtool/warn.h b/tools/objtool/include/objtool/warn.h
index e72b9d630551..cb8fe846d9dd 100644
--- a/tools/objtool/include/objtool/warn.h
+++ b/tools/objtool/include/objtool/warn.h
@@ -11,6 +11,7 @@
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
+#include <errno.h>
#include <objtool/builtin.h>
#include <objtool/elf.h>
@@ -41,36 +42,46 @@ static inline char *offstr(struct section *sec, unsigned long offset)
return str;
}
-#define WARN(format, ...) \
- fprintf(stderr, \
- "%s: %s: objtool: " format "\n", \
- objname, \
- opts.werror ? "error" : "warning", \
+#define ___WARN(severity, extra, format, ...) \
+ fprintf(stderr, \
+ "%s%s%s: objtool" extra ": " format "\n", \
+ objname ?: "", \
+ objname ? ": " : "", \
+ severity, \
##__VA_ARGS__)
-#define WARN_FUNC(format, sec, offset, ...) \
-({ \
- char *_str = offstr(sec, offset); \
- WARN("%s: " format, _str, ##__VA_ARGS__); \
- free(_str); \
+#define __WARN(severity, format, ...) \
+ ___WARN(severity, "", format, ##__VA_ARGS__)
+
+#define __WARN_LINE(severity, format, ...) \
+ ___WARN(severity, " [%s:%d]", format, __FILE__, __LINE__, ##__VA_ARGS__)
+
+#define __WARN_ELF(severity, format, ...) \
+ __WARN_LINE(severity, "%s: " format " failed: %s", __func__, ##__VA_ARGS__, elf_errmsg(-1))
+
+#define __WARN_GLIBC(severity, format, ...) \
+ __WARN_LINE(severity, "%s: " format " failed: %s", __func__, ##__VA_ARGS__, strerror(errno))
+
+#define __WARN_FUNC(severity, sec, offset, format, ...) \
+({ \
+ char *_str = offstr(sec, offset); \
+ __WARN(severity, "%s: " format, _str, ##__VA_ARGS__); \
+ free(_str); \
})
-#define WARN_LIMIT 2
+#define WARN_STR (opts.werror ? "error" : "warning")
+
+#define WARN(format, ...) __WARN(WARN_STR, format, ##__VA_ARGS__)
+#define WARN_FUNC(sec, offset, format, ...) __WARN_FUNC(WARN_STR, sec, offset, format, ##__VA_ARGS__)
#define WARN_INSN(insn, format, ...) \
({ \
struct instruction *_insn = (insn); \
- BUILD_BUG_ON(WARN_LIMIT > 2); \
- if (!_insn->sym || _insn->sym->warnings < WARN_LIMIT) { \
- WARN_FUNC(format, _insn->sec, _insn->offset, \
+ if (!_insn->sym || !_insn->sym->warned) \
+ WARN_FUNC(_insn->sec, _insn->offset, format, \
##__VA_ARGS__); \
- if (_insn->sym) \
- _insn->sym->warnings++; \
- } else if (_insn->sym && _insn->sym->warnings == WARN_LIMIT) { \
- WARN_FUNC("skipping duplicate warning(s)", \
- _insn->sec, _insn->offset); \
- _insn->sym->warnings++; \
- } \
+ if (_insn->sym) \
+ _insn->sym->warned = 1; \
})
#define BT_INSN(insn, format, ...) \
@@ -83,7 +94,12 @@ static inline char *offstr(struct section *sec, unsigned long offset)
} \
})
-#define WARN_ELF(format, ...) \
- WARN(format ": %s", ##__VA_ARGS__, elf_errmsg(-1))
+#define ERROR_STR "error"
+
+#define ERROR(format, ...) __WARN(ERROR_STR, format, ##__VA_ARGS__)
+#define ERROR_ELF(format, ...) __WARN_ELF(ERROR_STR, format, ##__VA_ARGS__)
+#define ERROR_GLIBC(format, ...) __WARN_GLIBC(ERROR_STR, format, ##__VA_ARGS__)
+#define ERROR_FUNC(sec, offset, format, ...) __WARN_FUNC(ERROR_STR, sec, offset, format, ##__VA_ARGS__)
+#define ERROR_INSN(insn, format, ...) WARN_FUNC(insn->sec, insn->offset, format, ##__VA_ARGS__)
#endif /* _WARN_H */
diff --git a/tools/objtool/objtool.c b/tools/objtool/objtool.c
index 1c73fb62fd57..5c8b974ad0f9 100644
--- a/tools/objtool/objtool.c
+++ b/tools/objtool/objtool.c
@@ -23,7 +23,7 @@ static struct objtool_file file;
struct objtool_file *objtool_open_read(const char *filename)
{
if (file.elf) {
- WARN("won't handle more than one file at a time");
+ ERROR("won't handle more than one file at a time");
return NULL;
}
@@ -44,14 +44,14 @@ struct objtool_file *objtool_open_read(const char *filename)
return &file;
}
-void objtool_pv_add(struct objtool_file *f, int idx, struct symbol *func)
+int objtool_pv_add(struct objtool_file *f, int idx, struct symbol *func)
{
if (!opts.noinstr)
- return;
+ return 0;
if (!f->pv_ops) {
- WARN("paravirt confusion");
- return;
+ ERROR("paravirt confusion");
+ return -1;
}
/*
@@ -60,14 +60,15 @@ void objtool_pv_add(struct objtool_file *f, int idx, struct symbol *func)
*/
if (!strcmp(func->name, "_paravirt_nop") ||
!strcmp(func->name, "_paravirt_ident_64"))
- return;
+ return 0;
/* already added this function */
if (!list_empty(&func->pv_target))
- return;
+ return 0;
list_add(&func->pv_target, &f->pv_ops[idx].targets);
f->pv_ops[idx].clean = false;
+ return 0;
}
int main(int argc, const char **argv)
diff --git a/tools/objtool/orc_dump.c b/tools/objtool/orc_dump.c
index 05ef0e297837..1dd9fc18fe62 100644
--- a/tools/objtool/orc_dump.c
+++ b/tools/objtool/orc_dump.c
@@ -36,47 +36,47 @@ int orc_dump(const char *filename)
elf = elf_begin(fd, ELF_C_READ_MMAP, NULL);
if (!elf) {
- WARN_ELF("elf_begin");
+ ERROR_ELF("elf_begin");
return -1;
}
if (!elf64_getehdr(elf)) {
- WARN_ELF("elf64_getehdr");
+ ERROR_ELF("elf64_getehdr");
return -1;
}
memcpy(&dummy_elf.ehdr, elf64_getehdr(elf), sizeof(dummy_elf.ehdr));
if (elf_getshdrnum(elf, &nr_sections)) {
- WARN_ELF("elf_getshdrnum");
+ ERROR_ELF("elf_getshdrnum");
return -1;
}
if (elf_getshdrstrndx(elf, &shstrtab_idx)) {
- WARN_ELF("elf_getshdrstrndx");
+ ERROR_ELF("elf_getshdrstrndx");
return -1;
}
for (i = 0; i < nr_sections; i++) {
scn = elf_getscn(elf, i);
if (!scn) {
- WARN_ELF("elf_getscn");
+ ERROR_ELF("elf_getscn");
return -1;
}
if (!gelf_getshdr(scn, &sh)) {
- WARN_ELF("gelf_getshdr");
+ ERROR_ELF("gelf_getshdr");
return -1;
}
name = elf_strptr(elf, shstrtab_idx, sh.sh_name);
if (!name) {
- WARN_ELF("elf_strptr");
+ ERROR_ELF("elf_strptr");
return -1;
}
data = elf_getdata(scn, NULL);
if (!data) {
- WARN_ELF("elf_getdata");
+ ERROR_ELF("elf_getdata");
return -1;
}
@@ -99,7 +99,7 @@ int orc_dump(const char *filename)
return 0;
if (orc_size % sizeof(*orc) != 0) {
- WARN("bad .orc_unwind section size");
+ ERROR("bad .orc_unwind section size");
return -1;
}
@@ -107,36 +107,36 @@ int orc_dump(const char *filename)
for (i = 0; i < nr_entries; i++) {
if (rela_orc_ip) {
if (!gelf_getrela(rela_orc_ip, i, &rela)) {
- WARN_ELF("gelf_getrela");
+ ERROR_ELF("gelf_getrela");
return -1;
}
if (!gelf_getsym(symtab, GELF_R_SYM(rela.r_info), &sym)) {
- WARN_ELF("gelf_getsym");
+ ERROR_ELF("gelf_getsym");
return -1;
}
if (GELF_ST_TYPE(sym.st_info) == STT_SECTION) {
scn = elf_getscn(elf, sym.st_shndx);
if (!scn) {
- WARN_ELF("elf_getscn");
+ ERROR_ELF("elf_getscn");
return -1;
}
if (!gelf_getshdr(scn, &sh)) {
- WARN_ELF("gelf_getshdr");
+ ERROR_ELF("gelf_getshdr");
return -1;
}
name = elf_strptr(elf, shstrtab_idx, sh.sh_name);
if (!name) {
- WARN_ELF("elf_strptr");
+ ERROR_ELF("elf_strptr");
return -1;
}
} else {
name = elf_strptr(elf, strtab_idx, sym.st_name);
if (!name) {
- WARN_ELF("elf_strptr");
+ ERROR_ELF("elf_strptr");
return -1;
}
}
diff --git a/tools/objtool/special.c b/tools/objtool/special.c
index 097a69db82a0..c80fed8a840e 100644
--- a/tools/objtool/special.c
+++ b/tools/objtool/special.c
@@ -54,7 +54,7 @@ static const struct special_entry entries[] = {
{},
};
-void __weak arch_handle_alternative(unsigned short feature, struct special_alt *alt)
+void __weak arch_handle_alternative(struct special_alt *alt)
{
}
@@ -86,27 +86,18 @@ static int get_alt_entry(struct elf *elf, const struct special_entry *entry,
orig_reloc = find_reloc_by_dest(elf, sec, offset + entry->orig);
if (!orig_reloc) {
- WARN_FUNC("can't find orig reloc", sec, offset + entry->orig);
+ ERROR_FUNC(sec, offset + entry->orig, "can't find orig reloc");
return -1;
}
reloc_to_sec_off(orig_reloc, &alt->orig_sec, &alt->orig_off);
- if (entry->feature) {
- unsigned short feature;
-
- feature = bswap_if_needed(elf,
- *(unsigned short *)(sec->data->d_buf +
- offset +
- entry->feature));
- arch_handle_alternative(feature, alt);
- }
+ arch_handle_alternative(alt);
if (!entry->group || alt->new_len) {
new_reloc = find_reloc_by_dest(elf, sec, offset + entry->new);
if (!new_reloc) {
- WARN_FUNC("can't find new reloc",
- sec, offset + entry->new);
+ ERROR_FUNC(sec, offset + entry->new, "can't find new reloc");
return -1;
}
@@ -122,8 +113,7 @@ static int get_alt_entry(struct elf *elf, const struct special_entry *entry,
key_reloc = find_reloc_by_dest(elf, sec, offset + entry->key);
if (!key_reloc) {
- WARN_FUNC("can't find key reloc",
- sec, offset + entry->key);
+ ERROR_FUNC(sec, offset + entry->key, "can't find key reloc");
return -1;
}
alt->key_addend = reloc_addend(key_reloc);
@@ -153,8 +143,7 @@ int special_get_alts(struct elf *elf, struct list_head *alts)
continue;
if (sec->sh.sh_size % entry->size != 0) {
- WARN("%s size not a multiple of %d",
- sec->name, entry->size);
+ ERROR("%s size not a multiple of %d", sec->name, entry->size);
return -1;
}
@@ -163,7 +152,7 @@ int special_get_alts(struct elf *elf, struct list_head *alts)
for (idx = 0; idx < nr_entries; idx++) {
alt = malloc(sizeof(*alt));
if (!alt) {
- WARN("malloc failed");
+ ERROR_GLIBC("malloc failed");
return -1;
}
memset(alt, 0, sizeof(*alt));
diff --git a/tools/sched_ext/include/scx/common.bpf.h b/tools/sched_ext/include/scx/common.bpf.h
index dc4333d23189..8787048c6762 100644
--- a/tools/sched_ext/include/scx/common.bpf.h
+++ b/tools/sched_ext/include/scx/common.bpf.h
@@ -586,36 +586,48 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
}
}
-#define READ_ONCE(x) \
-({ \
- union { typeof(x) __val; char __c[1]; } __u = \
- { .__c = { 0 } }; \
- __read_once_size(&(x), __u.__c, sizeof(x)); \
- __u.__val; \
-})
-
-#define WRITE_ONCE(x, val) \
-({ \
- union { typeof(x) __val; char __c[1]; } __u = \
- { .__val = (val) }; \
- __write_once_size(&(x), __u.__c, sizeof(x)); \
- __u.__val; \
-})
-
-#define READ_ONCE_ARENA(type, x) \
-({ \
- union { type __val; char __c[1]; } __u = \
- { .__c = { 0 } }; \
- __read_once_size((void *)&(x), __u.__c, sizeof(x)); \
- __u.__val; \
+/*
+ * __unqual_typeof(x) - Declare an unqualified scalar type, leaving
+ * non-scalar types unchanged,
+ *
+ * Prefer C11 _Generic for better compile-times and simpler code. Note: 'char'
+ * is not type-compatible with 'signed char', and we define a separate case.
+ *
+ * This is copied verbatim from kernel's include/linux/compiler_types.h, but
+ * with default expression (for pointers) changed from (x) to (typeof(x)0).
+ *
+ * This is because LLVM has a bug where for lvalue (x), it does not get rid of
+ * an extra address_space qualifier, but does in case of rvalue (typeof(x)0).
+ * Hence, for pointers, we need to create an rvalue expression to get the
+ * desired type. See https://github.com/llvm/llvm-project/issues/53400.
+ */
+#define __scalar_type_to_expr_cases(type) \
+ unsigned type : (unsigned type)0, signed type : (signed type)0
+
+#define __unqual_typeof(x) \
+ typeof(_Generic((x), \
+ char: (char)0, \
+ __scalar_type_to_expr_cases(char), \
+ __scalar_type_to_expr_cases(short), \
+ __scalar_type_to_expr_cases(int), \
+ __scalar_type_to_expr_cases(long), \
+ __scalar_type_to_expr_cases(long long), \
+ default: (typeof(x))0))
+
+#define READ_ONCE(x) \
+({ \
+ union { __unqual_typeof(x) __val; char __c[1]; } __u = \
+ { .__c = { 0 } }; \
+ __read_once_size((__unqual_typeof(x) *)&(x), __u.__c, sizeof(x)); \
+ __u.__val; \
})
-#define WRITE_ONCE_ARENA(type, x, val) \
-({ \
- union { type __val; char __c[1]; } __u = \
- { .__val = (val) }; \
- __write_once_size((void *)&(x), __u.__c, sizeof(x)); \
- __u.__val; \
+#define WRITE_ONCE(x, val) \
+({ \
+ union { __unqual_typeof(x) __val; char __c[1]; } __u = \
+ { .__val = (val) }; \
+ __write_once_size((__unqual_typeof(x) *)&(x), __u.__c, sizeof(x)); \
+ __u.__val; \
})
/*
@@ -648,6 +660,23 @@ static inline u32 log2_u64(u64 v)
return log2_u32(v) + 1;
}
+/*
+ * Return a value proportionally scaled to the task's weight.
+ */
+static inline u64 scale_by_task_weight(const struct task_struct *p, u64 value)
+{
+ return (value * p->scx.weight) / 100;
+}
+
+/*
+ * Return a value inversely proportional to the task's weight.
+ */
+static inline u64 scale_by_task_weight_inverse(const struct task_struct *p, u64 value)
+{
+ return value * 100 / p->scx.weight;
+}
+
+
#include "compat.bpf.h"
#include "enums.bpf.h"
diff --git a/tools/sched_ext/include/scx/enum_defs.autogen.h b/tools/sched_ext/include/scx/enum_defs.autogen.h
index 6e6c45f14fe1..c2c33df9292c 100644
--- a/tools/sched_ext/include/scx/enum_defs.autogen.h
+++ b/tools/sched_ext/include/scx/enum_defs.autogen.h
@@ -88,6 +88,8 @@
#define HAVE_SCX_OPS_ENQ_LAST
#define HAVE_SCX_OPS_ENQ_EXITING
#define HAVE_SCX_OPS_SWITCH_PARTIAL
+#define HAVE_SCX_OPS_ENQ_MIGRATION_DISABLED
+#define HAVE_SCX_OPS_ALLOW_QUEUED_WAKEUP
#define HAVE_SCX_OPS_HAS_CGROUP_WEIGHT
#define HAVE_SCX_OPS_ALL_FLAGS
#define HAVE_SCX_OPSS_NONE
@@ -104,6 +106,7 @@
#define HAVE_SCX_RQ_BAL_PENDING
#define HAVE_SCX_RQ_BAL_KEEP
#define HAVE_SCX_RQ_BYPASSING
+#define HAVE_SCX_RQ_CLK_VALID
#define HAVE_SCX_RQ_IN_WAKEUP
#define HAVE_SCX_RQ_IN_BALANCE
#define HAVE_SCX_TASK_NONE
diff --git a/tools/sched_ext/include/scx/enums.autogen.bpf.h b/tools/sched_ext/include/scx/enums.autogen.bpf.h
index 0e941a0d6f88..2f8002bcc19a 100644
--- a/tools/sched_ext/include/scx/enums.autogen.bpf.h
+++ b/tools/sched_ext/include/scx/enums.autogen.bpf.h
@@ -13,6 +13,30 @@ const volatile u64 __SCX_SLICE_DFL __weak;
const volatile u64 __SCX_SLICE_INF __weak;
#define SCX_SLICE_INF __SCX_SLICE_INF
+const volatile u64 __SCX_RQ_ONLINE __weak;
+#define SCX_RQ_ONLINE __SCX_RQ_ONLINE
+
+const volatile u64 __SCX_RQ_CAN_STOP_TICK __weak;
+#define SCX_RQ_CAN_STOP_TICK __SCX_RQ_CAN_STOP_TICK
+
+const volatile u64 __SCX_RQ_BAL_PENDING __weak;
+#define SCX_RQ_BAL_PENDING __SCX_RQ_BAL_PENDING
+
+const volatile u64 __SCX_RQ_BAL_KEEP __weak;
+#define SCX_RQ_BAL_KEEP __SCX_RQ_BAL_KEEP
+
+const volatile u64 __SCX_RQ_BYPASSING __weak;
+#define SCX_RQ_BYPASSING __SCX_RQ_BYPASSING
+
+const volatile u64 __SCX_RQ_CLK_VALID __weak;
+#define SCX_RQ_CLK_VALID __SCX_RQ_CLK_VALID
+
+const volatile u64 __SCX_RQ_IN_WAKEUP __weak;
+#define SCX_RQ_IN_WAKEUP __SCX_RQ_IN_WAKEUP
+
+const volatile u64 __SCX_RQ_IN_BALANCE __weak;
+#define SCX_RQ_IN_BALANCE __SCX_RQ_IN_BALANCE
+
const volatile u64 __SCX_DSQ_FLAG_BUILTIN __weak;
#define SCX_DSQ_FLAG_BUILTIN __SCX_DSQ_FLAG_BUILTIN
diff --git a/tools/sched_ext/include/scx/enums.autogen.h b/tools/sched_ext/include/scx/enums.autogen.h
index 88137a140e72..fedec938584b 100644
--- a/tools/sched_ext/include/scx/enums.autogen.h
+++ b/tools/sched_ext/include/scx/enums.autogen.h
@@ -8,6 +8,14 @@
SCX_ENUM_SET(skel, scx_public_consts, SCX_OPS_NAME_LEN); \
SCX_ENUM_SET(skel, scx_public_consts, SCX_SLICE_DFL); \
SCX_ENUM_SET(skel, scx_public_consts, SCX_SLICE_INF); \
+ SCX_ENUM_SET(skel, scx_rq_flags, SCX_RQ_ONLINE); \
+ SCX_ENUM_SET(skel, scx_rq_flags, SCX_RQ_CAN_STOP_TICK); \
+ SCX_ENUM_SET(skel, scx_rq_flags, SCX_RQ_BAL_PENDING); \
+ SCX_ENUM_SET(skel, scx_rq_flags, SCX_RQ_BAL_KEEP); \
+ SCX_ENUM_SET(skel, scx_rq_flags, SCX_RQ_BYPASSING); \
+ SCX_ENUM_SET(skel, scx_rq_flags, SCX_RQ_CLK_VALID); \
+ SCX_ENUM_SET(skel, scx_rq_flags, SCX_RQ_IN_WAKEUP); \
+ SCX_ENUM_SET(skel, scx_rq_flags, SCX_RQ_IN_BALANCE); \
SCX_ENUM_SET(skel, scx_dsq_id_flags, SCX_DSQ_FLAG_BUILTIN); \
SCX_ENUM_SET(skel, scx_dsq_id_flags, SCX_DSQ_FLAG_LOCAL_ON); \
SCX_ENUM_SET(skel, scx_dsq_id_flags, SCX_DSQ_INVALID); \
diff --git a/tools/sched_ext/include/scx/enums.h b/tools/sched_ext/include/scx/enums.h
index 34cbebe974b7..8e7c91575f0b 100644
--- a/tools/sched_ext/include/scx/enums.h
+++ b/tools/sched_ext/include/scx/enums.h
@@ -14,7 +14,8 @@ static inline void __ENUM_set(u64 *val, char *type, char *name)
bool res;
res = __COMPAT_read_enum(type, name, val);
- SCX_BUG_ON(!res, "enum not found(%s)", name);
+ if (!res)
+ *val = 0;
}
#define SCX_ENUM_SET(skel, type, name) do { \
diff --git a/tools/testing/cxl/Kbuild b/tools/testing/cxl/Kbuild
index 0a6572ab6f37..387f3df8b988 100644
--- a/tools/testing/cxl/Kbuild
+++ b/tools/testing/cxl/Kbuild
@@ -61,8 +61,11 @@ cxl_core-y += $(CXL_CORE_SRC)/pci.o
cxl_core-y += $(CXL_CORE_SRC)/hdm.o
cxl_core-y += $(CXL_CORE_SRC)/pmu.o
cxl_core-y += $(CXL_CORE_SRC)/cdat.o
+cxl_core-y += $(CXL_CORE_SRC)/ras.o
+cxl_core-y += $(CXL_CORE_SRC)/acpi.o
cxl_core-$(CONFIG_TRACING) += $(CXL_CORE_SRC)/trace.o
cxl_core-$(CONFIG_CXL_REGION) += $(CXL_CORE_SRC)/region.o
+cxl_core-$(CONFIG_CXL_MCE) += $(CXL_CORE_SRC)/mce.o
cxl_core-$(CONFIG_CXL_FEATURES) += $(CXL_CORE_SRC)/features.o
cxl_core-y += config_check.o
cxl_core-y += cxl_core_test.o
diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c
index cc8948f49117..1c3336095923 100644
--- a/tools/testing/cxl/test/cxl.c
+++ b/tools/testing/cxl/test/cxl.c
@@ -155,7 +155,7 @@ static struct {
} cfmws7;
struct {
struct acpi_cedt_cfmws cfmws;
- u32 target[4];
+ u32 target[3];
} cfmws8;
struct {
struct acpi_cedt_cxims cxims;
@@ -331,14 +331,14 @@ static struct {
.length = sizeof(mock_cedt.cfmws8),
},
.interleave_arithmetic = ACPI_CEDT_CFMWS_ARITHMETIC_XOR,
- .interleave_ways = 2,
- .granularity = 0,
+ .interleave_ways = 8,
+ .granularity = 1,
.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
ACPI_CEDT_CFMWS_RESTRICT_PMEM,
.qtg_id = FAKE_QTG_ID,
- .window_size = SZ_256M * 16UL,
+ .window_size = SZ_512M * 6UL,
},
- .target = { 0, 1, 0, 1, },
+ .target = { 0, 1, 2, },
},
.cxims0 = {
.cxims = {
@@ -1000,25 +1000,21 @@ static void mock_cxl_endpoint_parse_cdat(struct cxl_port *port)
find_cxl_root(port);
struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
struct access_coordinate ep_c[ACCESS_COORDINATE_MAX];
- struct range pmem_range = {
- .start = cxlds->pmem_res.start,
- .end = cxlds->pmem_res.end,
- };
- struct range ram_range = {
- .start = cxlds->ram_res.start,
- .end = cxlds->ram_res.end,
- };
if (!cxl_root)
return;
- if (range_len(&ram_range))
- dpa_perf_setup(port, &ram_range, &mds->ram_perf);
+ for (int i = 0; i < cxlds->nr_partitions; i++) {
+ struct resource *res = &cxlds->part[i].res;
+ struct cxl_dpa_perf *perf = &cxlds->part[i].perf;
+ struct range range = {
+ .start = res->start,
+ .end = res->end,
+ };
- if (range_len(&pmem_range))
- dpa_perf_setup(port, &pmem_range, &mds->pmem_perf);
+ dpa_perf_setup(port, &range, perf);
+ }
cxl_memdev_update_perf(cxlmd);
diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c
index 9495dbcc03a7..f2957a3e36fe 100644
--- a/tools/testing/cxl/test/mem.c
+++ b/tools/testing/cxl/test/mem.c
@@ -78,6 +78,10 @@ static struct cxl_cel_entry mock_cel[] = {
.effect = CXL_CMD_EFFECT_NONE,
},
{
+ .opcode = cpu_to_le16(CXL_MBOX_OP_SET_SHUTDOWN_STATE),
+ .effect = POLICY_CHANGE_IMMEDIATE,
+ },
+ {
.opcode = cpu_to_le16(CXL_MBOX_OP_GET_POISON),
.effect = CXL_CMD_EFFECT_NONE,
},
@@ -178,6 +182,7 @@ struct cxl_mockmem_data {
u64 timestamp;
unsigned long sanitize_timeout;
struct vendor_test_feat test_feat;
+ u8 shutdown_state;
};
static struct mock_event_log *event_find_log(struct device *dev, int log_type)
@@ -1105,6 +1110,21 @@ static int mock_health_info(struct cxl_mbox_cmd *cmd)
return 0;
}
+static int mock_set_shutdown_state(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
+{
+ struct cxl_mbox_set_shutdown_state_in *ss = cmd->payload_in;
+
+ if (cmd->size_in != sizeof(*ss))
+ return -EINVAL;
+
+ if (cmd->size_out != 0)
+ return -EINVAL;
+
+ mdata->shutdown_state = ss->state;
+ return 0;
+}
+
static struct mock_poison {
struct cxl_dev_state *cxlds;
u64 dpa;
@@ -1583,6 +1603,9 @@ static int cxl_mock_mbox_send(struct cxl_mailbox *cxl_mbox,
case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE:
rc = mock_passphrase_secure_erase(mdata, cmd);
break;
+ case CXL_MBOX_OP_SET_SHUTDOWN_STATE:
+ rc = mock_set_shutdown_state(mdata, cmd);
+ break;
case CXL_MBOX_OP_GET_POISON:
rc = mock_get_poison(cxlds, cmd);
break;
@@ -1670,6 +1693,7 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
struct cxl_dev_state *cxlds;
struct cxl_mockmem_data *mdata;
struct cxl_mailbox *cxl_mbox;
+ struct cxl_dpa_info range_info = { 0 };
int rc;
mdata = devm_kzalloc(dev, sizeof(*mdata), GFP_KERNEL);
@@ -1709,7 +1733,7 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
mds->event.buf = (struct cxl_get_event_payload *) mdata->event_buf;
INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mockmem_sanitize_work);
- cxlds->serial = pdev->id;
+ cxlds->serial = pdev->id + 1;
if (is_rcd(pdev))
cxlds->rcd = true;
@@ -1730,7 +1754,11 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
if (rc)
return rc;
- rc = cxl_mem_create_range_info(mds);
+ rc = cxl_mem_dpa_fetch(mds, &range_info);
+ if (rc)
+ return rc;
+
+ rc = cxl_dpa_setup(cxlds, &range_info);
if (rc)
return rc;
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 2694344274bf..c77c8c8e3d9b 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -62,6 +62,7 @@ TARGETS += mount
TARGETS += mount_setattr
TARGETS += move_mount_set_group
TARGETS += mqueue
+TARGETS += mseal_system_mappings
TARGETS += nci
TARGETS += net
TARGETS += net/af_unix
diff --git a/tools/testing/selftests/bpf/progs/test_module_attach.c b/tools/testing/selftests/bpf/progs/test_module_attach.c
index fb07f5773888..7f3c233943b3 100644
--- a/tools/testing/selftests/bpf/progs/test_module_attach.c
+++ b/tools/testing/selftests/bpf/progs/test_module_attach.c
@@ -117,7 +117,7 @@ int BPF_PROG(handle_fexit_ret, int arg, struct file *ret)
bpf_probe_read_kernel(&buf, 8, ret);
bpf_probe_read_kernel(&buf, 8, (char *)ret + 256);
- *(volatile long long *)ret;
+ *(volatile int *)ret;
*(volatile int *)&ret->f_mode;
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_subprogs_extable.c b/tools/testing/selftests/bpf/progs/test_subprogs_extable.c
index e2a21fbd4e44..dcac69f5928a 100644
--- a/tools/testing/selftests/bpf/progs/test_subprogs_extable.c
+++ b/tools/testing/selftests/bpf/progs/test_subprogs_extable.c
@@ -21,7 +21,7 @@ static __u64 test_cb(struct bpf_map *map, __u32 *key, __u64 *val, void *data)
SEC("fexit/bpf_testmod_return_ptr")
int BPF_PROG(handle_fexit_ret_subprogs, int arg, struct file *ret)
{
- *(volatile long *)ret;
+ *(volatile int *)ret;
*(volatile int *)&ret->f_mode;
bpf_for_each_map_elem(&test_array, test_cb, NULL, 0);
triggered++;
@@ -31,7 +31,7 @@ int BPF_PROG(handle_fexit_ret_subprogs, int arg, struct file *ret)
SEC("fexit/bpf_testmod_return_ptr")
int BPF_PROG(handle_fexit_ret_subprogs2, int arg, struct file *ret)
{
- *(volatile long *)ret;
+ *(volatile int *)ret;
*(volatile int *)&ret->f_mode;
bpf_for_each_map_elem(&test_array, test_cb, NULL, 0);
triggered++;
@@ -41,7 +41,7 @@ int BPF_PROG(handle_fexit_ret_subprogs2, int arg, struct file *ret)
SEC("fexit/bpf_testmod_return_ptr")
int BPF_PROG(handle_fexit_ret_subprogs3, int arg, struct file *ret)
{
- *(volatile long *)ret;
+ *(volatile int *)ret;
*(volatile int *)&ret->f_mode;
bpf_for_each_map_elem(&test_array, test_cb, NULL, 0);
triggered++;
diff --git a/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c b/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c
index a9be6ae49454..c258b0722e04 100644
--- a/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c
+++ b/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c
@@ -12,7 +12,7 @@ SEC("raw_tp")
__arch_x86_64
__log_level(4) __msg("stack depth 8")
__xlated("4: r5 = 5")
-__xlated("5: w0 = ")
+__xlated("5: r0 = ")
__xlated("6: r0 = &(void __percpu *)(r0)")
__xlated("7: r0 = *(u32 *)(r0 +0)")
__xlated("8: exit")
@@ -704,7 +704,7 @@ SEC("raw_tp")
__arch_x86_64
__log_level(4) __msg("stack depth 32+0")
__xlated("2: r1 = 1")
-__xlated("3: w0 =")
+__xlated("3: r0 =")
__xlated("4: r0 = &(void __percpu *)(r0)")
__xlated("5: r0 = *(u32 *)(r0 +0)")
/* bpf_loop params setup */
@@ -753,7 +753,7 @@ __arch_x86_64
__log_level(4) __msg("stack depth 40+0")
/* call bpf_get_smp_processor_id */
__xlated("2: r1 = 42")
-__xlated("3: w0 =")
+__xlated("3: r0 =")
__xlated("4: r0 = &(void __percpu *)(r0)")
__xlated("5: r0 = *(u32 *)(r0 +0)")
/* call bpf_get_prandom_u32 */
diff --git a/tools/testing/selftests/bpf/progs/verifier_private_stack.c b/tools/testing/selftests/bpf/progs/verifier_private_stack.c
index b1fbdf119553..fc91b414364e 100644
--- a/tools/testing/selftests/bpf/progs/verifier_private_stack.c
+++ b/tools/testing/selftests/bpf/progs/verifier_private_stack.c
@@ -27,7 +27,7 @@ __description("Private stack, single prog")
__success
__arch_x86_64
__jited(" movabsq $0x{{.*}}, %r9")
-__jited(" addq %gs:0x{{.*}}, %r9")
+__jited(" addq %gs:{{.*}}, %r9")
__jited(" movl $0x2a, %edi")
__jited(" movq %rdi, -0x100(%r9)")
__naked void private_stack_single_prog(void)
@@ -74,7 +74,7 @@ __success
__arch_x86_64
/* private stack fp for the main prog */
__jited(" movabsq $0x{{.*}}, %r9")
-__jited(" addq %gs:0x{{.*}}, %r9")
+__jited(" addq %gs:{{.*}}, %r9")
__jited(" movl $0x2a, %edi")
__jited(" movq %rdi, -0x200(%r9)")
__jited(" pushq %r9")
@@ -122,7 +122,7 @@ __jited(" pushq %rbp")
__jited(" movq %rsp, %rbp")
__jited(" endbr64")
__jited(" movabsq $0x{{.*}}, %r9")
-__jited(" addq %gs:0x{{.*}}, %r9")
+__jited(" addq %gs:{{.*}}, %r9")
__jited(" pushq %r9")
__jited(" callq")
__jited(" popq %r9")
diff --git a/tools/testing/selftests/clone3/clone3_selftests.h b/tools/testing/selftests/clone3/clone3_selftests.h
index 3d2663fe50ba..eeca8005723f 100644
--- a/tools/testing/selftests/clone3/clone3_selftests.h
+++ b/tools/testing/selftests/clone3/clone3_selftests.h
@@ -16,7 +16,7 @@
#define ptr_to_u64(ptr) ((__u64)((uintptr_t)(ptr)))
#ifndef __NR_clone3
-#define __NR_clone3 -1
+#define __NR_clone3 435
#endif
struct __clone_args {
diff --git a/tools/testing/selftests/mm/va_high_addr_switch.sh b/tools/testing/selftests/mm/va_high_addr_switch.sh
index 2c725773cd79..1f92e8caceac 100755
--- a/tools/testing/selftests/mm/va_high_addr_switch.sh
+++ b/tools/testing/selftests/mm/va_high_addr_switch.sh
@@ -41,6 +41,31 @@ check_supported_x86_64()
fi
}
+check_supported_ppc64()
+{
+ local config="/proc/config.gz"
+ [[ -f "${config}" ]] || config="/boot/config-$(uname -r)"
+ [[ -f "${config}" ]] || fail "Cannot find kernel config in /proc or /boot"
+
+ local pg_table_levels=$(gzip -dcfq "${config}" | grep PGTABLE_LEVELS | cut -d'=' -f 2)
+ if [[ "${pg_table_levels}" -lt 5 ]]; then
+ echo "$0: PGTABLE_LEVELS=${pg_table_levels}, must be >= 5 to run this test"
+ exit $ksft_skip
+ fi
+
+ local mmu_support=$(grep -m1 "mmu" /proc/cpuinfo | awk '{print $3}')
+ if [[ "$mmu_support" != "radix" ]]; then
+ echo "$0: System does not use Radix MMU, required for 5-level paging"
+ exit $ksft_skip
+ fi
+
+ local hugepages_total=$(awk '/HugePages_Total/ {print $2}' /proc/meminfo)
+ if [[ "${hugepages_total}" -eq 0 ]]; then
+ echo "$0: HugePages are not enabled, required for some tests"
+ exit $ksft_skip
+ fi
+}
+
check_test_requirements()
{
# The test supports x86_64 and powerpc64. We currently have no useful
@@ -50,6 +75,9 @@ check_test_requirements()
"x86_64")
check_supported_x86_64
;;
+ "ppc64le"|"ppc64")
+ check_supported_ppc64
+ ;;
*)
return 0
;;
diff --git a/tools/testing/selftests/mseal_system_mappings/.gitignore b/tools/testing/selftests/mseal_system_mappings/.gitignore
new file mode 100644
index 000000000000..319c497a595e
--- /dev/null
+++ b/tools/testing/selftests/mseal_system_mappings/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+sysmap_is_sealed
diff --git a/tools/testing/selftests/mseal_system_mappings/Makefile b/tools/testing/selftests/mseal_system_mappings/Makefile
new file mode 100644
index 000000000000..2b4504e2f52f
--- /dev/null
+++ b/tools/testing/selftests/mseal_system_mappings/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+CFLAGS += -std=c99 -pthread -Wall $(KHDR_INCLUDES)
+
+TEST_GEN_PROGS := sysmap_is_sealed
+
+include ../lib.mk
diff --git a/tools/testing/selftests/mseal_system_mappings/config b/tools/testing/selftests/mseal_system_mappings/config
new file mode 100644
index 000000000000..675cb9f37b86
--- /dev/null
+++ b/tools/testing/selftests/mseal_system_mappings/config
@@ -0,0 +1 @@
+CONFIG_MSEAL_SYSTEM_MAPPINGS=y
diff --git a/tools/testing/selftests/mseal_system_mappings/sysmap_is_sealed.c b/tools/testing/selftests/mseal_system_mappings/sysmap_is_sealed.c
new file mode 100644
index 000000000000..0d2af30c3bf5
--- /dev/null
+++ b/tools/testing/selftests/mseal_system_mappings/sysmap_is_sealed.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * test system mappings are sealed when
+ * KCONFIG_MSEAL_SYSTEM_MAPPINGS=y
+ */
+
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <errno.h>
+#include <unistd.h>
+#include <string.h>
+#include <stdbool.h>
+
+#include "../kselftest.h"
+#include "../kselftest_harness.h"
+
+#define VMFLAGS "VmFlags:"
+#define MSEAL_FLAGS "sl"
+#define MAX_LINE_LEN 512
+
+bool has_mapping(char *name, FILE *maps)
+{
+ char line[MAX_LINE_LEN];
+
+ while (fgets(line, sizeof(line), maps)) {
+ if (strstr(line, name))
+ return true;
+ }
+
+ return false;
+}
+
+bool mapping_is_sealed(char *name, FILE *maps)
+{
+ char line[MAX_LINE_LEN];
+
+ while (fgets(line, sizeof(line), maps)) {
+ if (!strncmp(line, VMFLAGS, strlen(VMFLAGS))) {
+ if (strstr(line, MSEAL_FLAGS))
+ return true;
+
+ return false;
+ }
+ }
+
+ return false;
+}
+
+FIXTURE(basic) {
+ FILE *maps;
+};
+
+FIXTURE_SETUP(basic)
+{
+ self->maps = fopen("/proc/self/smaps", "r");
+ if (!self->maps)
+ SKIP(return, "Could not open /proc/self/smap, errno=%d",
+ errno);
+};
+
+FIXTURE_TEARDOWN(basic)
+{
+ if (self->maps)
+ fclose(self->maps);
+};
+
+FIXTURE_VARIANT(basic)
+{
+ char *name;
+ bool sealed;
+};
+
+FIXTURE_VARIANT_ADD(basic, vdso) {
+ .name = "[vdso]",
+ .sealed = true,
+};
+
+FIXTURE_VARIANT_ADD(basic, vvar) {
+ .name = "[vvar]",
+ .sealed = true,
+};
+
+FIXTURE_VARIANT_ADD(basic, vvar_vclock) {
+ .name = "[vvar_vclock]",
+ .sealed = true,
+};
+
+FIXTURE_VARIANT_ADD(basic, sigpage) {
+ .name = "[sigpage]",
+ .sealed = true,
+};
+
+FIXTURE_VARIANT_ADD(basic, vectors) {
+ .name = "[vectors]",
+ .sealed = true,
+};
+
+FIXTURE_VARIANT_ADD(basic, uprobes) {
+ .name = "[uprobes]",
+ .sealed = true,
+};
+
+FIXTURE_VARIANT_ADD(basic, stack) {
+ .name = "[stack]",
+ .sealed = false,
+};
+
+TEST_F(basic, check_sealed)
+{
+ if (!has_mapping(variant->name, self->maps)) {
+ SKIP(return, "could not find the mapping, %s",
+ variant->name);
+ }
+
+ EXPECT_EQ(variant->sealed,
+ mapping_is_sealed(variant->name, self->maps));
+};
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/pidfd/pidfd.h b/tools/testing/selftests/pidfd/pidfd.h
index cec22aa11cdf..55bcf81a2b9a 100644
--- a/tools/testing/selftests/pidfd/pidfd.h
+++ b/tools/testing/selftests/pidfd/pidfd.h
@@ -32,19 +32,19 @@
#endif
#ifndef __NR_pidfd_open
-#define __NR_pidfd_open -1
+#define __NR_pidfd_open 434
#endif
#ifndef __NR_pidfd_send_signal
-#define __NR_pidfd_send_signal -1
+#define __NR_pidfd_send_signal 424
#endif
#ifndef __NR_clone3
-#define __NR_clone3 -1
+#define __NR_clone3 435
#endif
#ifndef __NR_pidfd_getfd
-#define __NR_pidfd_getfd -1
+#define __NR_pidfd_getfd 438
#endif
#ifndef PIDFD_NONBLOCK
diff --git a/tools/testing/selftests/rtc/.gitignore b/tools/testing/selftests/rtc/.gitignore
index fb2d533aa575..a2afe7994e85 100644
--- a/tools/testing/selftests/rtc/.gitignore
+++ b/tools/testing/selftests/rtc/.gitignore
@@ -1,3 +1,2 @@
# SPDX-License-Identifier: GPL-2.0-only
rtctest
-setdate
diff --git a/tools/testing/selftests/rtc/Makefile b/tools/testing/selftests/rtc/Makefile
index 9dbb395c5c79..547c244a2ca5 100644
--- a/tools/testing/selftests/rtc/Makefile
+++ b/tools/testing/selftests/rtc/Makefile
@@ -4,8 +4,6 @@ LDLIBS += -lrt -lpthread -lm
TEST_GEN_PROGS = rtctest
-TEST_GEN_PROGS_EXTENDED = setdate
-
TEST_FILES := settings
include ../lib.mk
diff --git a/tools/testing/selftests/rtc/rtctest.c b/tools/testing/selftests/rtc/rtctest.c
index e103097d0b5b..be175c0e6ae3 100644
--- a/tools/testing/selftests/rtc/rtctest.c
+++ b/tools/testing/selftests/rtc/rtctest.c
@@ -29,6 +29,7 @@ enum rtc_alarm_state {
RTC_ALARM_UNKNOWN,
RTC_ALARM_ENABLED,
RTC_ALARM_DISABLED,
+ RTC_ALARM_RES_MINUTE,
};
FIXTURE(rtc) {
@@ -88,7 +89,7 @@ static void nanosleep_with_retries(long ns)
}
}
-static enum rtc_alarm_state get_rtc_alarm_state(int fd)
+static enum rtc_alarm_state get_rtc_alarm_state(int fd, int need_seconds)
{
struct rtc_param param = { 0 };
int rc;
@@ -103,6 +104,10 @@ static enum rtc_alarm_state get_rtc_alarm_state(int fd)
if ((param.uvalue & _BITUL(RTC_FEATURE_ALARM)) == 0)
return RTC_ALARM_DISABLED;
+ /* Check if alarm has desired granularity */
+ if (need_seconds && (param.uvalue & _BITUL(RTC_FEATURE_ALARM_RES_MINUTE)))
+ return RTC_ALARM_RES_MINUTE;
+
return RTC_ALARM_ENABLED;
}
@@ -227,9 +232,11 @@ TEST_F(rtc, alarm_alm_set) {
SKIP(return, "Skipping test since %s does not exist", rtc_file);
ASSERT_NE(-1, self->fd);
- alarm_state = get_rtc_alarm_state(self->fd);
+ alarm_state = get_rtc_alarm_state(self->fd, 1);
if (alarm_state == RTC_ALARM_DISABLED)
SKIP(return, "Skipping test since alarms are not supported.");
+ if (alarm_state == RTC_ALARM_RES_MINUTE)
+ SKIP(return, "Skipping test since alarms has only minute granularity.");
rc = ioctl(self->fd, RTC_RD_TIME, &tm);
ASSERT_NE(-1, rc);
@@ -295,9 +302,11 @@ TEST_F(rtc, alarm_wkalm_set) {
SKIP(return, "Skipping test since %s does not exist", rtc_file);
ASSERT_NE(-1, self->fd);
- alarm_state = get_rtc_alarm_state(self->fd);
+ alarm_state = get_rtc_alarm_state(self->fd, 1);
if (alarm_state == RTC_ALARM_DISABLED)
SKIP(return, "Skipping test since alarms are not supported.");
+ if (alarm_state == RTC_ALARM_RES_MINUTE)
+ SKIP(return, "Skipping test since alarms has only minute granularity.");
rc = ioctl(self->fd, RTC_RD_TIME, &alarm.time);
ASSERT_NE(-1, rc);
@@ -357,7 +366,7 @@ TEST_F_TIMEOUT(rtc, alarm_alm_set_minute, 65) {
SKIP(return, "Skipping test since %s does not exist", rtc_file);
ASSERT_NE(-1, self->fd);
- alarm_state = get_rtc_alarm_state(self->fd);
+ alarm_state = get_rtc_alarm_state(self->fd, 0);
if (alarm_state == RTC_ALARM_DISABLED)
SKIP(return, "Skipping test since alarms are not supported.");
@@ -425,7 +434,7 @@ TEST_F_TIMEOUT(rtc, alarm_wkalm_set_minute, 65) {
SKIP(return, "Skipping test since %s does not exist", rtc_file);
ASSERT_NE(-1, self->fd);
- alarm_state = get_rtc_alarm_state(self->fd);
+ alarm_state = get_rtc_alarm_state(self->fd, 0);
if (alarm_state == RTC_ALARM_DISABLED)
SKIP(return, "Skipping test since alarms are not supported.");
diff --git a/tools/testing/selftests/rtc/setdate.c b/tools/testing/selftests/rtc/setdate.c
deleted file mode 100644
index b303890b3de2..000000000000
--- a/tools/testing/selftests/rtc/setdate.c
+++ /dev/null
@@ -1,77 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* Real Time Clock Driver Test
- * by: Benjamin Gaignard (benjamin.gaignard@linaro.org)
- *
- * To build
- * gcc rtctest_setdate.c -o rtctest_setdate
- */
-
-#include <stdio.h>
-#include <linux/rtc.h>
-#include <sys/ioctl.h>
-#include <sys/time.h>
-#include <sys/types.h>
-#include <fcntl.h>
-#include <unistd.h>
-#include <stdlib.h>
-#include <errno.h>
-
-static const char default_time[] = "00:00:00";
-
-int main(int argc, char **argv)
-{
- int fd, retval;
- struct rtc_time new, current;
- const char *rtc, *date;
- const char *time = default_time;
-
- switch (argc) {
- case 4:
- time = argv[3];
- /* FALLTHROUGH */
- case 3:
- date = argv[2];
- rtc = argv[1];
- break;
- default:
- fprintf(stderr, "usage: rtctest_setdate <rtcdev> <DD-MM-YYYY> [HH:MM:SS]\n");
- return 1;
- }
-
- fd = open(rtc, O_RDONLY);
- if (fd == -1) {
- perror(rtc);
- exit(errno);
- }
-
- sscanf(date, "%d-%d-%d", &new.tm_mday, &new.tm_mon, &new.tm_year);
- new.tm_mon -= 1;
- new.tm_year -= 1900;
- sscanf(time, "%d:%d:%d", &new.tm_hour, &new.tm_min, &new.tm_sec);
-
- fprintf(stderr, "Test will set RTC date/time to %d-%d-%d, %02d:%02d:%02d.\n",
- new.tm_mday, new.tm_mon + 1, new.tm_year + 1900,
- new.tm_hour, new.tm_min, new.tm_sec);
-
- /* Write the new date in RTC */
- retval = ioctl(fd, RTC_SET_TIME, &new);
- if (retval == -1) {
- perror("RTC_SET_TIME ioctl");
- close(fd);
- exit(errno);
- }
-
- /* Read back */
- retval = ioctl(fd, RTC_RD_TIME, &current);
- if (retval == -1) {
- perror("RTC_RD_TIME ioctl");
- exit(errno);
- }
-
- fprintf(stderr, "\n\nCurrent RTC date/time is %d-%d-%d, %02d:%02d:%02d.\n",
- current.tm_mday, current.tm_mon + 1, current.tm_year + 1900,
- current.tm_hour, current.tm_min, current.tm_sec);
-
- close(fd);
- return 0;
-}
diff --git a/tools/testing/selftests/ublk/Makefile b/tools/testing/selftests/ublk/Makefile
index 7817afe29005..c7781efea0f3 100644
--- a/tools/testing/selftests/ublk/Makefile
+++ b/tools/testing/selftests/ublk/Makefile
@@ -4,6 +4,8 @@ CFLAGS += -O3 -Wl,-no-as-needed -Wall -I $(top_srcdir)
LDLIBS += -lpthread -lm -luring
TEST_PROGS := test_generic_01.sh
+TEST_PROGS += test_generic_02.sh
+TEST_PROGS += test_generic_03.sh
TEST_PROGS += test_null_01.sh
TEST_PROGS += test_null_02.sh
@@ -11,8 +13,11 @@ TEST_PROGS += test_loop_01.sh
TEST_PROGS += test_loop_02.sh
TEST_PROGS += test_loop_03.sh
TEST_PROGS += test_loop_04.sh
+TEST_PROGS += test_loop_05.sh
TEST_PROGS += test_stripe_01.sh
TEST_PROGS += test_stripe_02.sh
+TEST_PROGS += test_stripe_03.sh
+TEST_PROGS += test_stripe_04.sh
TEST_PROGS += test_stress_01.sh
TEST_PROGS += test_stress_02.sh
diff --git a/tools/testing/selftests/ublk/kublk.c b/tools/testing/selftests/ublk/kublk.c
index 05147b53c361..91c282bc7674 100644
--- a/tools/testing/selftests/ublk/kublk.c
+++ b/tools/testing/selftests/ublk/kublk.c
@@ -99,7 +99,7 @@ static int __ublk_ctrl_cmd(struct ublk_dev *dev,
static int ublk_ctrl_stop_dev(struct ublk_dev *dev)
{
struct ublk_ctrl_cmd_data data = {
- .cmd_op = UBLK_CMD_STOP_DEV,
+ .cmd_op = UBLK_U_CMD_STOP_DEV,
};
return __ublk_ctrl_cmd(dev, &data);
@@ -169,7 +169,7 @@ static int ublk_ctrl_get_params(struct ublk_dev *dev,
struct ublk_params *params)
{
struct ublk_ctrl_cmd_data data = {
- .cmd_op = UBLK_CMD_GET_PARAMS,
+ .cmd_op = UBLK_U_CMD_GET_PARAMS,
.flags = CTRL_CMD_HAS_BUF,
.addr = (__u64)params,
.len = sizeof(*params),
@@ -215,7 +215,7 @@ static void ublk_ctrl_dump(struct ublk_dev *dev)
ret = ublk_ctrl_get_params(dev, &p);
if (ret < 0) {
- ublk_err("failed to get params %m\n");
+ ublk_err("failed to get params %d %s\n", ret, strerror(-ret));
return;
}
@@ -322,7 +322,7 @@ static int ublk_queue_init(struct ublk_queue *q)
cmd_buf_size = ublk_queue_cmd_buf_sz(q);
off = UBLKSRV_CMD_BUF_OFFSET + q->q_id * ublk_queue_max_cmd_buf_sz();
- q->io_cmd_buf = (char *)mmap(0, cmd_buf_size, PROT_READ,
+ q->io_cmd_buf = mmap(0, cmd_buf_size, PROT_READ,
MAP_SHARED | MAP_POPULATE, dev->fds[0], off);
if (q->io_cmd_buf == MAP_FAILED) {
ublk_err("ublk dev %d queue %d map io_cmd_buf failed %m\n",
diff --git a/tools/testing/selftests/ublk/kublk.h b/tools/testing/selftests/ublk/kublk.h
index f31a5c4d4143..760ff8ffb810 100644
--- a/tools/testing/selftests/ublk/kublk.h
+++ b/tools/testing/selftests/ublk/kublk.h
@@ -128,7 +128,7 @@ struct ublk_queue {
unsigned int io_inflight;
struct ublk_dev *dev;
const struct ublk_tgt_ops *tgt_ops;
- char *io_cmd_buf;
+ struct ublksrv_io_desc *io_cmd_buf;
struct io_uring ring;
struct ublk_io ios[UBLK_QUEUE_DEPTH];
#define UBLKSRV_QUEUE_STOPPING (1U << 0)
@@ -302,7 +302,7 @@ static inline void ublk_mark_io_done(struct ublk_io *io, int res)
static inline const struct ublksrv_io_desc *ublk_get_iod(const struct ublk_queue *q, int tag)
{
- return (struct ublksrv_io_desc *)&(q->io_cmd_buf[tag * sizeof(struct ublksrv_io_desc)]);
+ return &q->io_cmd_buf[tag];
}
static inline void ublk_set_sqe_cmd_op(struct io_uring_sqe *sqe, __u32 cmd_op)
diff --git a/tools/testing/selftests/ublk/null.c b/tools/testing/selftests/ublk/null.c
index 899875ff50fe..91fec3690d4b 100644
--- a/tools/testing/selftests/ublk/null.c
+++ b/tools/testing/selftests/ublk/null.c
@@ -17,7 +17,8 @@ static int ublk_null_tgt_init(const struct dev_ctx *ctx, struct ublk_dev *dev)
dev->tgt.dev_size = dev_size;
dev->tgt.params = (struct ublk_params) {
- .types = UBLK_PARAM_TYPE_BASIC,
+ .types = UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DMA_ALIGN |
+ UBLK_PARAM_TYPE_SEGMENT,
.basic = {
.logical_bs_shift = 9,
.physical_bs_shift = 12,
@@ -26,6 +27,14 @@ static int ublk_null_tgt_init(const struct dev_ctx *ctx, struct ublk_dev *dev)
.max_sectors = info->max_io_buf_bytes >> 9,
.dev_sectors = dev_size >> 9,
},
+ .dma = {
+ .alignment = 4095,
+ },
+ .seg = {
+ .seg_boundary_mask = 4095,
+ .max_segment_size = 32 << 10,
+ .max_segments = 32,
+ },
};
if (info->flags & UBLK_F_SUPPORT_ZERO_COPY)
diff --git a/tools/testing/selftests/ublk/stripe.c b/tools/testing/selftests/ublk/stripe.c
index 98c564b12f3c..179731c3dd6f 100644
--- a/tools/testing/selftests/ublk/stripe.c
+++ b/tools/testing/selftests/ublk/stripe.c
@@ -111,43 +111,67 @@ static void calculate_stripe_array(const struct stripe_conf *conf,
}
}
-static inline enum io_uring_op stripe_to_uring_op(const struct ublksrv_io_desc *iod)
+static inline enum io_uring_op stripe_to_uring_op(
+ const struct ublksrv_io_desc *iod, int zc)
{
unsigned ublk_op = ublksrv_get_op(iod);
if (ublk_op == UBLK_IO_OP_READ)
- return IORING_OP_READV;
+ return zc ? IORING_OP_READV_FIXED : IORING_OP_READV;
else if (ublk_op == UBLK_IO_OP_WRITE)
- return IORING_OP_WRITEV;
+ return zc ? IORING_OP_WRITEV_FIXED : IORING_OP_WRITEV;
assert(0);
}
static int stripe_queue_tgt_rw_io(struct ublk_queue *q, const struct ublksrv_io_desc *iod, int tag)
{
const struct stripe_conf *conf = get_chunk_shift(q);
- enum io_uring_op op = stripe_to_uring_op(iod);
+ int zc = !!(ublk_queue_use_zc(q) != 0);
+ enum io_uring_op op = stripe_to_uring_op(iod, zc);
struct io_uring_sqe *sqe[NR_STRIPE];
struct stripe_array *s = alloc_stripe_array(conf, iod);
struct ublk_io *io = ublk_get_io(q, tag);
- int i;
+ int i, extra = zc ? 2 : 0;
io->private_data = s;
calculate_stripe_array(conf, iod, s);
- ublk_queue_alloc_sqes(q, sqe, s->nr);
- for (i = 0; i < s->nr; i++) {
- struct stripe *t = &s->s[i];
+ ublk_queue_alloc_sqes(q, sqe, s->nr + extra);
+
+ if (zc) {
+ io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, tag);
+ sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK;
+ sqe[0]->user_data = build_user_data(tag,
+ ublk_cmd_op_nr(sqe[0]->cmd_op), 0, 1);
+ }
+
+ for (i = zc; i < s->nr + extra - zc; i++) {
+ struct stripe *t = &s->s[i - zc];
io_uring_prep_rw(op, sqe[i],
t->seq + 1,
(void *)t->vec,
t->nr_vec,
t->start << 9);
- io_uring_sqe_set_flags(sqe[i], IOSQE_FIXED_FILE);
+ if (zc) {
+ sqe[i]->buf_index = tag;
+ io_uring_sqe_set_flags(sqe[i],
+ IOSQE_FIXED_FILE | IOSQE_IO_HARDLINK);
+ } else {
+ io_uring_sqe_set_flags(sqe[i], IOSQE_FIXED_FILE);
+ }
/* bit63 marks us as tgt io */
- sqe[i]->user_data = build_user_data(tag, ublksrv_get_op(iod), i, 1);
+ sqe[i]->user_data = build_user_data(tag, ublksrv_get_op(iod), i - zc, 1);
+ }
+ if (zc) {
+ struct io_uring_sqe *unreg = sqe[s->nr + 1];
+
+ io_uring_prep_buf_unregister(unreg, 0, tag, q->q_id, tag);
+ unreg->user_data = build_user_data(tag, ublk_cmd_op_nr(unreg->cmd_op), 0, 1);
}
- return s->nr;
+
+ /* register buffer is skip_success */
+ return s->nr + zc;
}
static int handle_flush(struct ublk_queue *q, const struct ublksrv_io_desc *iod, int tag)
@@ -208,19 +232,27 @@ static void ublk_stripe_io_done(struct ublk_queue *q, int tag,
struct ublk_io *io = ublk_get_io(q, tag);
int res = cqe->res;
- if (res < 0) {
+ if (res < 0 || op != ublk_cmd_op_nr(UBLK_U_IO_UNREGISTER_IO_BUF)) {
if (!io->result)
io->result = res;
- ublk_err("%s: io failure %d tag %u\n", __func__, res, tag);
+ if (res < 0)
+ ublk_err("%s: io failure %d tag %u\n", __func__, res, tag);
}
+ /* buffer register op is IOSQE_CQE_SKIP_SUCCESS */
+ if (op == ublk_cmd_op_nr(UBLK_U_IO_REGISTER_IO_BUF))
+ io->tgt_ios += 1;
+
/* fail short READ/WRITE simply */
if (op == UBLK_IO_OP_READ || op == UBLK_IO_OP_WRITE) {
unsigned seq = user_data_to_tgt_data(cqe->user_data);
struct stripe_array *s = io->private_data;
- if (res < s->s[seq].vec->iov_len)
+ if (res < s->s[seq].nr_sects << 9) {
io->result = -EIO;
+ ublk_err("%s: short rw op %u res %d exp %u tag %u\n",
+ __func__, op, res, s->s[seq].vec->iov_len, tag);
+ }
}
if (ublk_completed_tgt_io(q, tag)) {
@@ -253,7 +285,7 @@ static int ublk_stripe_tgt_init(const struct dev_ctx *ctx, struct ublk_dev *dev)
struct stripe_conf *conf;
unsigned chunk_shift;
loff_t bytes = 0;
- int ret, i;
+ int ret, i, mul = 1;
if ((chunk_size & (chunk_size - 1)) || !chunk_size) {
ublk_err("invalid chunk size %u\n", chunk_size);
@@ -295,8 +327,11 @@ static int ublk_stripe_tgt_init(const struct dev_ctx *ctx, struct ublk_dev *dev)
dev->tgt.dev_size = bytes;
p.basic.dev_sectors = bytes >> 9;
dev->tgt.params = p;
- dev->tgt.sq_depth = dev->dev_info.queue_depth * conf->nr_files;
- dev->tgt.cq_depth = dev->dev_info.queue_depth * conf->nr_files;
+
+ if (dev->dev_info.flags & UBLK_F_SUPPORT_ZERO_COPY)
+ mul = 2;
+ dev->tgt.sq_depth = mul * dev->dev_info.queue_depth * conf->nr_files;
+ dev->tgt.cq_depth = mul * dev->dev_info.queue_depth * conf->nr_files;
printf("%s: shift %u files %u\n", __func__, conf->shift, conf->nr_files);
diff --git a/tools/testing/selftests/ublk/test_common.sh b/tools/testing/selftests/ublk/test_common.sh
index 75f54ac6b1c4..a88b35943227 100755
--- a/tools/testing/selftests/ublk/test_common.sh
+++ b/tools/testing/selftests/ublk/test_common.sh
@@ -23,6 +23,12 @@ _get_disk_dev_t() {
echo $(( (major & 0xfff) << 20 | (minor & 0xfffff) ))
}
+_run_fio_verify_io() {
+ fio --name=verify --rw=randwrite --direct=1 --ioengine=libaio \
+ --bs=8k --iodepth=32 --verify=crc32c --do_verify=1 \
+ --verify_state_save=0 "$@" > /dev/null
+}
+
_create_backfile() {
local my_size=$1
local my_file
diff --git a/tools/testing/selftests/ublk/test_generic_02.sh b/tools/testing/selftests/ublk/test_generic_02.sh
new file mode 100755
index 000000000000..3e80121e3bf5
--- /dev/null
+++ b/tools/testing/selftests/ublk/test_generic_02.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+. "$(cd "$(dirname "$0")" && pwd)"/test_common.sh
+
+TID="generic_02"
+ERR_CODE=0
+
+if ! _have_program bpftrace; then
+ exit "$UBLK_SKIP_CODE"
+fi
+
+_prep_test "null" "sequential io order for MQ"
+
+dev_id=$(_add_ublk_dev -t null -q 2)
+_check_add_dev $TID $?
+
+dev_t=$(_get_disk_dev_t "$dev_id")
+bpftrace trace/seq_io.bt "$dev_t" "W" 1 > "$UBLK_TMP" 2>&1 &
+btrace_pid=$!
+sleep 2
+
+if ! kill -0 "$btrace_pid" > /dev/null 2>&1; then
+ _cleanup_test "null"
+ exit "$UBLK_SKIP_CODE"
+fi
+
+# run fio over this ublk disk
+fio --name=write_seq \
+ --filename=/dev/ublkb"${dev_id}" \
+ --ioengine=libaio --iodepth=16 \
+ --rw=write \
+ --size=512M \
+ --direct=1 \
+ --bs=4k > /dev/null 2>&1
+ERR_CODE=$?
+kill "$btrace_pid"
+wait
+if grep -q "io_out_of_order" "$UBLK_TMP"; then
+ cat "$UBLK_TMP"
+ ERR_CODE=255
+fi
+_cleanup_test "null"
+_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_generic_03.sh b/tools/testing/selftests/ublk/test_generic_03.sh
new file mode 100755
index 000000000000..b551aa76cb0d
--- /dev/null
+++ b/tools/testing/selftests/ublk/test_generic_03.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+. "$(cd "$(dirname "$0")" && pwd)"/test_common.sh
+
+TID="generic_03"
+ERR_CODE=0
+
+_prep_test "null" "check dma & segment limits for zero copy"
+
+dev_id=$(_add_ublk_dev -t null -z)
+_check_add_dev $TID $?
+
+sysfs_path=/sys/block/ublkb"${dev_id}"
+dma_align=$(cat "$sysfs_path"/queue/dma_alignment)
+max_segments=$(cat "$sysfs_path"/queue/max_segments)
+max_segment_size=$(cat "$sysfs_path"/queue/max_segment_size)
+if [ "$dma_align" != "4095" ]; then
+ ERR_CODE=255
+fi
+if [ "$max_segments" != "32" ]; then
+ ERR_CODE=255
+fi
+if [ "$max_segment_size" != "32768" ]; then
+ ERR_CODE=255
+fi
+_cleanup_test "null"
+_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_loop_01.sh b/tools/testing/selftests/ublk/test_loop_01.sh
index c882d2a08e13..1ef8b6044777 100755
--- a/tools/testing/selftests/ublk/test_loop_01.sh
+++ b/tools/testing/selftests/ublk/test_loop_01.sh
@@ -6,6 +6,10 @@
TID="loop_01"
ERR_CODE=0
+if ! _have_program fio; then
+ exit "$UBLK_SKIP_CODE"
+fi
+
_prep_test "loop" "write and verify test"
backfile_0=$(_create_backfile 256M)
@@ -14,15 +18,7 @@ dev_id=$(_add_ublk_dev -t loop "$backfile_0")
_check_add_dev $TID $? "${backfile_0}"
# run fio over the ublk disk
-fio --name=write_and_verify \
- --filename=/dev/ublkb"${dev_id}" \
- --ioengine=libaio --iodepth=16 \
- --rw=write \
- --size=256M \
- --direct=1 \
- --verify=crc32c \
- --do_verify=1 \
- --bs=4k > /dev/null 2>&1
+_run_fio_verify_io --filename=/dev/ublkb"${dev_id}" --size=256M
ERR_CODE=$?
_cleanup_test "loop"
diff --git a/tools/testing/selftests/ublk/test_loop_03.sh b/tools/testing/selftests/ublk/test_loop_03.sh
index 269c96787d7d..e9ca744de8b1 100755
--- a/tools/testing/selftests/ublk/test_loop_03.sh
+++ b/tools/testing/selftests/ublk/test_loop_03.sh
@@ -6,6 +6,10 @@
TID="loop_03"
ERR_CODE=0
+if ! _have_program fio; then
+ exit "$UBLK_SKIP_CODE"
+fi
+
_prep_test "loop" "write and verify over zero copy"
backfile_0=$(_create_backfile 256M)
@@ -13,15 +17,7 @@ dev_id=$(_add_ublk_dev -t loop -z "$backfile_0")
_check_add_dev $TID $? "$backfile_0"
# run fio over the ublk disk
-fio --name=write_and_verify \
- --filename=/dev/ublkb"${dev_id}" \
- --ioengine=libaio --iodepth=64 \
- --rw=write \
- --size=256M \
- --direct=1 \
- --verify=crc32c \
- --do_verify=1 \
- --bs=4k > /dev/null 2>&1
+_run_fio_verify_io --filename=/dev/ublkb"${dev_id}" --size=256M
ERR_CODE=$?
_cleanup_test "loop"
diff --git a/tools/testing/selftests/ublk/test_loop_05.sh b/tools/testing/selftests/ublk/test_loop_05.sh
new file mode 100755
index 000000000000..2e6e2e6978fc
--- /dev/null
+++ b/tools/testing/selftests/ublk/test_loop_05.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+. "$(cd "$(dirname "$0")" && pwd)"/test_common.sh
+
+TID="loop_05"
+ERR_CODE=0
+
+if ! _have_program fio; then
+ exit "$UBLK_SKIP_CODE"
+fi
+
+_prep_test "loop" "write and verify test"
+
+backfile_0=$(_create_backfile 256M)
+
+dev_id=$(_add_ublk_dev -q 2 -t loop "$backfile_0")
+_check_add_dev $TID $? "${backfile_0}"
+
+# run fio over the ublk disk
+_run_fio_verify_io --filename=/dev/ublkb"${dev_id}" --size=256M
+ERR_CODE=$?
+
+_cleanup_test "loop"
+
+_remove_backfile "$backfile_0"
+
+_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/ublk/test_stress_01.sh b/tools/testing/selftests/ublk/test_stress_01.sh
index 7177f6c57bc5..a8be24532b24 100755
--- a/tools/testing/selftests/ublk/test_stress_01.sh
+++ b/tools/testing/selftests/ublk/test_stress_01.sh
@@ -27,20 +27,20 @@ ublk_io_and_remove()
_prep_test "stress" "run IO and remove device"
-ublk_io_and_remove 8G -t null
+ublk_io_and_remove 8G -t null -q 4
ERR_CODE=$?
if [ ${ERR_CODE} -ne 0 ]; then
_show_result $TID $ERR_CODE
fi
BACK_FILE=$(_create_backfile 256M)
-ublk_io_and_remove 256M -t loop "${BACK_FILE}"
+ublk_io_and_remove 256M -t loop -q 4 "${BACK_FILE}"
ERR_CODE=$?
if [ ${ERR_CODE} -ne 0 ]; then
_show_result $TID $ERR_CODE
fi
-ublk_io_and_remove 256M -t loop -z "${BACK_FILE}"
+ublk_io_and_remove 256M -t loop -q 4 -z "${BACK_FILE}"
ERR_CODE=$?
_cleanup_test "stress"
_remove_backfile "${BACK_FILE}"
diff --git a/tools/testing/selftests/ublk/test_stress_02.sh b/tools/testing/selftests/ublk/test_stress_02.sh
index 2a8e60579a06..2159e4cc8140 100755
--- a/tools/testing/selftests/ublk/test_stress_02.sh
+++ b/tools/testing/selftests/ublk/test_stress_02.sh
@@ -27,20 +27,20 @@ ublk_io_and_kill_daemon()
_prep_test "stress" "run IO and kill ublk server"
-ublk_io_and_kill_daemon 8G -t null
+ublk_io_and_kill_daemon 8G -t null -q 4
ERR_CODE=$?
if [ ${ERR_CODE} -ne 0 ]; then
_show_result $TID $ERR_CODE
fi
BACK_FILE=$(_create_backfile 256M)
-ublk_io_and_kill_daemon 256M -t loop "${BACK_FILE}"
+ublk_io_and_kill_daemon 256M -t loop -q 4 "${BACK_FILE}"
ERR_CODE=$?
if [ ${ERR_CODE} -ne 0 ]; then
_show_result $TID $ERR_CODE
fi
-ublk_io_and_kill_daemon 256M -t loop -z "${BACK_FILE}"
+ublk_io_and_kill_daemon 256M -t loop -q 4 -z "${BACK_FILE}"
ERR_CODE=$?
_cleanup_test "stress"
_remove_backfile "${BACK_FILE}"
diff --git a/tools/testing/selftests/ublk/test_stripe_01.sh b/tools/testing/selftests/ublk/test_stripe_01.sh
index c01f3dc325ab..7e387ef656ea 100755
--- a/tools/testing/selftests/ublk/test_stripe_01.sh
+++ b/tools/testing/selftests/ublk/test_stripe_01.sh
@@ -6,6 +6,10 @@
TID="stripe_01"
ERR_CODE=0
+if ! _have_program fio; then
+ exit "$UBLK_SKIP_CODE"
+fi
+
_prep_test "stripe" "write and verify test"
backfile_0=$(_create_backfile 256M)
@@ -15,15 +19,7 @@ dev_id=$(_add_ublk_dev -t stripe "$backfile_0" "$backfile_1")
_check_add_dev $TID $? "${backfile_0}"
# run fio over the ublk disk
-fio --name=write_and_verify \
- --filename=/dev/ublkb"${dev_id}" \
- --ioengine=libaio --iodepth=32 \
- --rw=write \
- --size=512M \
- --direct=1 \
- --verify=crc32c \
- --do_verify=1 \
- --bs=4k > /dev/null 2>&1
+_run_fio_verify_io --filename=/dev/ublkb"${dev_id}" --size=512M
ERR_CODE=$?
_cleanup_test "stripe"
diff --git a/tools/testing/selftests/ublk/test_stripe_03.sh b/tools/testing/selftests/ublk/test_stripe_03.sh
new file mode 100755
index 000000000000..c1b34af36145
--- /dev/null
+++ b/tools/testing/selftests/ublk/test_stripe_03.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+. "$(cd "$(dirname "$0")" && pwd)"/test_common.sh
+
+TID="stripe_03"
+ERR_CODE=0
+
+if ! _have_program fio; then
+ exit "$UBLK_SKIP_CODE"
+fi
+
+_prep_test "stripe" "write and verify test"
+
+backfile_0=$(_create_backfile 256M)
+backfile_1=$(_create_backfile 256M)
+
+dev_id=$(_add_ublk_dev -q 2 -t stripe "$backfile_0" "$backfile_1")
+_check_add_dev $TID $? "${backfile_0}"
+
+# run fio over the ublk disk
+_run_fio_verify_io --filename=/dev/ublkb"${dev_id}" --size=512M
+ERR_CODE=$?
+
+_cleanup_test "stripe"
+
+_remove_backfile "$backfile_0"
+_remove_backfile "$backfile_1"
+
+_show_result $TID $ERR_CODE
diff --git a/tools/testing/selftests/x86/test_mremap_vdso.c b/tools/testing/selftests/x86/test_mremap_vdso.c
index d53959e03593..94bee6e0c813 100644
--- a/tools/testing/selftests/x86/test_mremap_vdso.c
+++ b/tools/testing/selftests/x86/test_mremap_vdso.c
@@ -14,6 +14,7 @@
#include <errno.h>
#include <unistd.h>
#include <string.h>
+#include <stdbool.h>
#include <sys/mman.h>
#include <sys/auxv.h>
@@ -55,13 +56,55 @@ static int try_to_remap(void *vdso_addr, unsigned long size)
}
+#define VDSO_NAME "[vdso]"
+#define VMFLAGS "VmFlags:"
+#define MSEAL_FLAGS "sl"
+#define MAX_LINE_LEN 512
+
+bool vdso_sealed(FILE *maps)
+{
+ char line[MAX_LINE_LEN];
+ bool has_vdso = false;
+
+ while (fgets(line, sizeof(line), maps)) {
+ if (strstr(line, VDSO_NAME))
+ has_vdso = true;
+
+ if (has_vdso && !strncmp(line, VMFLAGS, strlen(VMFLAGS))) {
+ if (strstr(line, MSEAL_FLAGS))
+ return true;
+
+ return false;
+ }
+ }
+
+ return false;
+}
+
int main(int argc, char **argv, char **envp)
{
pid_t child;
+ FILE *maps;
ksft_print_header();
ksft_set_plan(1);
+ maps = fopen("/proc/self/smaps", "r");
+ if (!maps) {
+ ksft_test_result_skip(
+ "Could not open /proc/self/smaps, errno=%d\n",
+ errno);
+
+ return 0;
+ }
+
+ if (vdso_sealed(maps)) {
+ ksft_test_result_skip("vdso is sealed\n");
+ return 0;
+ }
+
+ fclose(maps);
+
child = fork();
if (child == -1)
ksft_exit_fail_msg("failed to fork (%d): %m\n", errno);